1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/scatterlist.h>
11 #include <asm/swiotlb.h>
12 #include <asm-generic/dma-coherent.h>
14 extern dma_addr_t bad_dma_address;
15 extern int iommu_merge;
16 extern struct device x86_dma_fallback_dev;
17 extern int panic_on_overflow;
18 extern int force_iommu;
20 struct dma_mapping_ops {
21 int (*mapping_error)(struct device *dev,
23 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
27 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
28 size_t size, int direction);
29 /* like map_single, but doesn't check the device mask */
30 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
31 size_t size, int direction);
32 void (*unmap_single)(struct device *dev, dma_addr_t addr,
33 size_t size, int direction);
34 void (*sync_single_for_cpu)(struct device *hwdev,
35 dma_addr_t dma_handle, size_t size,
37 void (*sync_single_for_device)(struct device *hwdev,
38 dma_addr_t dma_handle, size_t size,
40 void (*sync_single_range_for_cpu)(struct device *hwdev,
41 dma_addr_t dma_handle, unsigned long offset,
42 size_t size, int direction);
43 void (*sync_single_range_for_device)(struct device *hwdev,
44 dma_addr_t dma_handle, unsigned long offset,
45 size_t size, int direction);
46 void (*sync_sg_for_cpu)(struct device *hwdev,
47 struct scatterlist *sg, int nelems,
49 void (*sync_sg_for_device)(struct device *hwdev,
50 struct scatterlist *sg, int nelems,
52 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
53 int nents, int direction);
54 void (*unmap_sg)(struct device *hwdev,
55 struct scatterlist *sg, int nents,
57 int (*dma_supported)(struct device *hwdev, u64 mask);
61 extern struct dma_mapping_ops *dma_ops;
63 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68 if (unlikely(!dev) || !dev->archdata.dma_ops)
71 return dev->archdata.dma_ops;
75 /* Make sure we keep the same behaviour */
76 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
81 struct dma_mapping_ops *ops = get_dma_ops(dev);
82 if (ops->mapping_error)
83 return ops->mapping_error(dev, dma_addr);
85 return (dma_addr == bad_dma_address);
89 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
90 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
91 #define dma_is_consistent(d, h) (1)
93 extern int dma_supported(struct device *hwdev, u64 mask);
94 extern int dma_set_mask(struct device *dev, u64 mask);
96 static inline dma_addr_t
97 dma_map_single(struct device *hwdev, void *ptr, size_t size,
100 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
102 BUG_ON(!valid_dma_direction(direction));
103 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
107 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
110 struct dma_mapping_ops *ops = get_dma_ops(dev);
112 BUG_ON(!valid_dma_direction(direction));
113 if (ops->unmap_single)
114 ops->unmap_single(dev, addr, size, direction);
118 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
119 int nents, int direction)
121 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
123 BUG_ON(!valid_dma_direction(direction));
124 return ops->map_sg(hwdev, sg, nents, direction);
128 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
131 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
133 BUG_ON(!valid_dma_direction(direction));
135 ops->unmap_sg(hwdev, sg, nents, direction);
139 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140 size_t size, int direction)
142 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
144 BUG_ON(!valid_dma_direction(direction));
145 if (ops->sync_single_for_cpu)
146 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
147 flush_write_buffers();
151 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
152 size_t size, int direction)
154 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
156 BUG_ON(!valid_dma_direction(direction));
157 if (ops->sync_single_for_device)
158 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
159 flush_write_buffers();
163 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
164 unsigned long offset, size_t size, int direction)
166 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
168 BUG_ON(!valid_dma_direction(direction));
169 if (ops->sync_single_range_for_cpu)
170 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
172 flush_write_buffers();
176 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
177 unsigned long offset, size_t size,
180 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
182 BUG_ON(!valid_dma_direction(direction));
183 if (ops->sync_single_range_for_device)
184 ops->sync_single_range_for_device(hwdev, dma_handle,
185 offset, size, direction);
186 flush_write_buffers();
190 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
191 int nelems, int direction)
193 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
195 BUG_ON(!valid_dma_direction(direction));
196 if (ops->sync_sg_for_cpu)
197 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
198 flush_write_buffers();
202 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
203 int nelems, int direction)
205 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
207 BUG_ON(!valid_dma_direction(direction));
208 if (ops->sync_sg_for_device)
209 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
211 flush_write_buffers();
214 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
215 size_t offset, size_t size,
218 struct dma_mapping_ops *ops = get_dma_ops(dev);
220 BUG_ON(!valid_dma_direction(direction));
221 return ops->map_single(dev, page_to_phys(page) + offset,
225 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226 size_t size, int direction)
228 dma_unmap_single(dev, addr, size, direction);
232 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
233 enum dma_data_direction dir)
235 flush_write_buffers();
238 static inline int dma_get_cache_alignment(void)
240 /* no easy way to get cache size on all x86, so return the
241 * maximum possible, to be safe */
242 return boot_cpu_data.x86_clflush_size;
246 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
249 struct dma_mapping_ops *ops = get_dma_ops(dev);
252 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
256 dev = &x86_dma_fallback_dev;
260 if (ops->alloc_coherent)
261 return ops->alloc_coherent(dev, size,
266 static inline void dma_free_coherent(struct device *dev, size_t size,
267 void *vaddr, dma_addr_t bus)
269 struct dma_mapping_ops *ops = get_dma_ops(dev);
271 WARN_ON(irqs_disabled()); /* for portability */
273 if (dma_release_from_coherent(dev, get_order(size), vaddr))
276 if (ops->free_coherent)
277 ops->free_coherent(dev, size, vaddr, bus);