1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/scatterlist.h>
11 #include <asm/swiotlb.h>
12 #include <asm-generic/dma-coherent.h>
14 extern dma_addr_t bad_dma_address;
15 extern int iommu_merge;
16 extern struct device x86_dma_fallback_dev;
17 extern int panic_on_overflow;
19 struct dma_mapping_ops {
20 int (*mapping_error)(struct device *dev,
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
53 int (*dma_supported)(struct device *hwdev, u64 mask);
57 extern struct dma_mapping_ops *dma_ops;
59 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
67 return dev->archdata.dma_ops;
71 /* Make sure we keep the same behaviour */
72 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
77 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr);
81 return (dma_addr == bad_dma_address);
85 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
87 #define dma_is_consistent(d, h) (1)
89 extern int dma_supported(struct device *hwdev, u64 mask);
90 extern int dma_set_mask(struct device *dev, u64 mask);
92 static inline dma_addr_t
93 dma_map_single(struct device *hwdev, void *ptr, size_t size,
96 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
98 BUG_ON(!valid_dma_direction(direction));
99 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
103 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
106 struct dma_mapping_ops *ops = get_dma_ops(dev);
108 BUG_ON(!valid_dma_direction(direction));
109 if (ops->unmap_single)
110 ops->unmap_single(dev, addr, size, direction);
114 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
115 int nents, int direction)
117 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
119 BUG_ON(!valid_dma_direction(direction));
120 return ops->map_sg(hwdev, sg, nents, direction);
124 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
127 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
129 BUG_ON(!valid_dma_direction(direction));
131 ops->unmap_sg(hwdev, sg, nents, direction);
135 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
136 size_t size, int direction)
138 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
140 BUG_ON(!valid_dma_direction(direction));
141 if (ops->sync_single_for_cpu)
142 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
143 flush_write_buffers();
147 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
148 size_t size, int direction)
150 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
152 BUG_ON(!valid_dma_direction(direction));
153 if (ops->sync_single_for_device)
154 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
155 flush_write_buffers();
159 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
160 unsigned long offset, size_t size, int direction)
162 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
164 BUG_ON(!valid_dma_direction(direction));
165 if (ops->sync_single_range_for_cpu)
166 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
168 flush_write_buffers();
172 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
173 unsigned long offset, size_t size,
176 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
178 BUG_ON(!valid_dma_direction(direction));
179 if (ops->sync_single_range_for_device)
180 ops->sync_single_range_for_device(hwdev, dma_handle,
181 offset, size, direction);
182 flush_write_buffers();
186 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
187 int nelems, int direction)
189 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
191 BUG_ON(!valid_dma_direction(direction));
192 if (ops->sync_sg_for_cpu)
193 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
194 flush_write_buffers();
198 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
199 int nelems, int direction)
201 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
203 BUG_ON(!valid_dma_direction(direction));
204 if (ops->sync_sg_for_device)
205 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
207 flush_write_buffers();
210 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
211 size_t offset, size_t size,
214 struct dma_mapping_ops *ops = get_dma_ops(dev);
216 BUG_ON(!valid_dma_direction(direction));
217 return ops->map_single(dev, page_to_phys(page) + offset,
221 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
222 size_t size, int direction)
224 dma_unmap_single(dev, addr, size, direction);
228 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
229 enum dma_data_direction dir)
231 flush_write_buffers();
234 static inline int dma_get_cache_alignment(void)
236 /* no easy way to get cache size on all x86, so return the
237 * maximum possible, to be safe */
238 return boot_cpu_data.x86_clflush_size;
241 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
244 unsigned long dma_mask = 0;
246 dma_mask = dev->coherent_dma_mask;
248 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
253 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
256 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
258 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
265 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
268 struct dma_mapping_ops *ops = get_dma_ops(dev);
271 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
273 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 dev = &x86_dma_fallback_dev;
284 if (!ops->alloc_coherent)
287 return ops->alloc_coherent(dev, size, dma_handle,
288 dma_alloc_coherent_gfp_flags(dev, gfp));
291 static inline void dma_free_coherent(struct device *dev, size_t size,
292 void *vaddr, dma_addr_t bus)
294 struct dma_mapping_ops *ops = get_dma_ops(dev);
296 WARN_ON(irqs_disabled()); /* for portability */
298 if (dma_release_from_coherent(dev, get_order(size), vaddr))
301 if (ops->free_coherent)
302 ops->free_coherent(dev, size, vaddr, bus);