1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/config.h>
7 #include <linux/mm.h> /* need struct page */
8 #include <linux/device.h>
10 #include <asm/scatterlist.h>
13 * DMA-consistent mapping functions. These allocate/free a region of
14 * uncached, unwrite-buffered mapped memory space for use with DMA
15 * devices. This is the "generic" version. The PCI specific version
18 extern void consistent_sync(void *kaddr, size_t size, int rw);
21 * Return whether the given device DMA address mask can be supported
22 * properly. For example, if your device can only drive the low 24-bits
23 * during bus mastering, then you would pass 0x00ffffff as the mask
26 * FIXME: This should really be a platform specific issue - we should
27 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
29 static inline int dma_supported(struct device *dev, u64 mask)
31 return dev->dma_mask && *dev->dma_mask != 0;
34 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
36 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
39 *dev->dma_mask = dma_mask;
44 static inline int dma_get_cache_alignment(void)
49 static inline int dma_is_consistent(dma_addr_t handle)
55 * DMA errors are defined by all-bits-set in the DMA address.
57 static inline int dma_mapping_error(dma_addr_t dma_addr)
59 return dma_addr == ~0;
63 * dma_alloc_coherent - allocate consistent memory for DMA
64 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
65 * @size: required memory size
66 * @handle: bus-specific DMA address
68 * Allocate some uncached, unbuffered memory for a device for
69 * performing DMA. This function allocates pages, and will
70 * return the CPU-viewed address, and sets @handle to be the
71 * device-viewed address.
74 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
77 * dma_free_coherent - free memory allocated by dma_alloc_coherent
78 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
79 * @size: size of memory originally requested in dma_alloc_coherent
80 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
81 * @handle: device-view address returned from dma_alloc_coherent
83 * Free (and unmap) a DMA buffer previously allocated by
84 * dma_alloc_coherent().
86 * References to memory and mappings associated with cpu_addr/handle
87 * during and after this call executing are illegal.
90 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
94 * dma_mmap_coherent - map a coherent DMA allocation into user space
95 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
96 * @vma: vm_area_struct describing requested user mapping
97 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
98 * @handle: device-view address returned from dma_alloc_coherent
99 * @size: size of memory originally requested in dma_alloc_coherent
101 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
102 * into user space. The coherent DMA buffer must not be freed by the
103 * driver until the user space mapping has been released.
105 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
106 void *cpu_addr, dma_addr_t handle, size_t size);
110 * dma_alloc_writecombine - allocate writecombining memory for DMA
111 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
112 * @size: required memory size
113 * @handle: bus-specific DMA address
115 * Allocate some uncached, buffered memory for a device for
116 * performing DMA. This function allocates pages, and will
117 * return the CPU-viewed address, and sets @handle to be the
118 * device-viewed address.
121 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
123 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
124 dma_free_coherent(dev,size,cpu_addr,handle)
126 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
127 void *cpu_addr, dma_addr_t handle, size_t size);
131 * dma_map_single - map a single buffer for streaming DMA
132 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
133 * @cpu_addr: CPU direct mapped address of buffer
134 * @size: size of buffer to map
135 * @dir: DMA transfer direction
137 * Ensure that any data held in the cache is appropriately discarded
140 * The device owns this memory once this call has completed. The CPU
141 * can regain ownership by calling dma_unmap_single() or
142 * dma_sync_single_for_cpu().
144 #ifndef CONFIG_DMABOUNCE
145 static inline dma_addr_t
146 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
147 enum dma_data_direction dir)
149 consistent_sync(cpu_addr, size, dir);
150 return virt_to_dma(dev, (unsigned long)cpu_addr);
153 extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
157 * dma_map_page - map a portion of a page for streaming DMA
158 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
159 * @page: page that buffer resides in
160 * @offset: offset into page for start of buffer
161 * @size: size of buffer to map
162 * @dir: DMA transfer direction
164 * Ensure that any data held in the cache is appropriately discarded
167 * The device owns this memory once this call has completed. The CPU
168 * can regain ownership by calling dma_unmap_page() or
169 * dma_sync_single_for_cpu().
171 static inline dma_addr_t
172 dma_map_page(struct device *dev, struct page *page,
173 unsigned long offset, size_t size,
174 enum dma_data_direction dir)
176 return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
180 * dma_unmap_single - unmap a single buffer previously mapped
181 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
182 * @handle: DMA address of buffer
183 * @size: size of buffer to map
184 * @dir: DMA transfer direction
186 * Unmap a single streaming mode DMA translation. The handle and size
187 * must match what was provided in the previous dma_map_single() call.
188 * All other usages are undefined.
190 * After this call, reads by the CPU to the buffer are guaranteed to see
191 * whatever the device wrote there.
193 #ifndef CONFIG_DMABOUNCE
195 dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
196 enum dma_data_direction dir)
201 extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
205 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
206 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
207 * @handle: DMA address of buffer
208 * @size: size of buffer to map
209 * @dir: DMA transfer direction
211 * Unmap a single streaming mode DMA translation. The handle and size
212 * must match what was provided in the previous dma_map_single() call.
213 * All other usages are undefined.
215 * After this call, reads by the CPU to the buffer are guaranteed to see
216 * whatever the device wrote there.
219 dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
220 enum dma_data_direction dir)
222 dma_unmap_single(dev, handle, size, (int)dir);
226 * dma_map_sg - map a set of SG buffers for streaming mode DMA
227 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
228 * @sg: list of buffers
229 * @nents: number of buffers to map
230 * @dir: DMA transfer direction
232 * Map a set of buffers described by scatterlist in streaming
233 * mode for DMA. This is the scatter-gather version of the
234 * above dma_map_single interface. Here the scatter gather list
235 * elements are each tagged with the appropriate dma address
236 * and length. They are obtained via sg_dma_{address,length}(SG).
238 * NOTE: An implementation may be able to use a smaller number of
239 * DMA address/length pairs than there are SG table elements.
240 * (for example via virtual mapping capabilities)
241 * The routine returns the number of addr/length pairs actually
242 * used, at most nents.
244 * Device ownership issues as mentioned above for dma_map_single are
247 #ifndef CONFIG_DMABOUNCE
249 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
250 enum dma_data_direction dir)
254 for (i = 0; i < nents; i++, sg++) {
257 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
258 virt = page_address(sg->page) + sg->offset;
259 consistent_sync(virt, sg->length, dir);
265 extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
269 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
270 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
271 * @sg: list of buffers
272 * @nents: number of buffers to map
273 * @dir: DMA transfer direction
275 * Unmap a set of streaming mode DMA translations.
276 * Again, CPU read rules concerning calls here are the same as for
277 * dma_unmap_single() above.
279 #ifndef CONFIG_DMABOUNCE
281 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
282 enum dma_data_direction dir)
288 extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
293 * dma_sync_single_for_cpu
294 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
295 * @handle: DMA address of buffer
296 * @size: size of buffer to map
297 * @dir: DMA transfer direction
299 * Make physical memory consistent for a single streaming mode DMA
300 * translation after a transfer.
302 * If you perform a dma_map_single() but wish to interrogate the
303 * buffer using the cpu, yet do not wish to teardown the PCI dma
304 * mapping, you must call this function before doing so. At the
305 * next point you give the PCI dma address back to the card, you
306 * must first the perform a dma_sync_for_device, and then the
307 * device again owns the buffer.
309 #ifndef CONFIG_DMABOUNCE
311 dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
312 enum dma_data_direction dir)
314 consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
318 dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
319 enum dma_data_direction dir)
321 consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
324 extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
325 extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
330 * dma_sync_sg_for_cpu
331 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
332 * @sg: list of buffers
333 * @nents: number of buffers to map
334 * @dir: DMA transfer direction
336 * Make physical memory consistent for a set of streaming
337 * mode DMA translations after a transfer.
339 * The same as dma_sync_single_for_* but for a scatter-gather list,
340 * same rules and usage.
342 #ifndef CONFIG_DMABOUNCE
344 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
345 enum dma_data_direction dir)
349 for (i = 0; i < nents; i++, sg++) {
350 char *virt = page_address(sg->page) + sg->offset;
351 consistent_sync(virt, sg->length, dir);
356 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
357 enum dma_data_direction dir)
361 for (i = 0; i < nents; i++, sg++) {
362 char *virt = page_address(sg->page) + sg->offset;
363 consistent_sync(virt, sg->length, dir);
367 extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
368 extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
371 #ifdef CONFIG_DMABOUNCE
373 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
374 * and utilize bounce buffers as needed to work around limited DMA windows.
376 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
377 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
378 * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
380 * The following are helper functions used by the dmabounce subystem
385 * dmabounce_register_dev
387 * @dev: valid struct device pointer
388 * @small_buf_size: size of buffers to use with small buffer pool
389 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
391 * This function should be called by low-level platform code to register
392 * a device as requireing DMA buffer bouncing. The function will allocate
393 * appropriate DMA pools for the device.
396 extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
399 * dmabounce_unregister_dev
401 * @dev: valid struct device pointer
403 * This function should be called by low-level platform code when device
404 * that was previously registered with dmabounce_register_dev is removed
408 extern void dmabounce_unregister_dev(struct device *);
413 * @dev: valid struct device pointer
414 * @dma_handle: dma_handle of unbounced buffer
415 * @size: size of region being mapped
417 * Platforms that utilize the dmabounce mechanism must implement
420 * The dmabounce routines call this function whenever a dma-mapping
421 * is requested to determine whether a given buffer needs to be bounced
422 * or not. The function must return 0 if the the buffer is OK for
423 * DMA access and 1 if the buffer needs to be bounced.
426 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
427 #endif /* CONFIG_DMABOUNCE */
429 #endif /* __KERNEL__ */