2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM, the remainder of memory is at the top and the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
32 #include <linux/scatterlist.h>
34 #include <asm/cacheflush.h>
39 #define DO_STATS(X) do { X ; } while (0)
41 #define DO_STATS(X) do { } while (0)
44 /* ************************************************** */
47 struct list_head node;
49 /* original request */
54 /* safe buffer info */
55 struct dmabounce_pool *pool;
57 dma_addr_t safe_dma_addr;
60 struct dmabounce_pool {
62 struct dma_pool *pool;
68 struct dmabounce_device_info {
70 struct list_head safe_buffers;
72 unsigned long total_allocs;
73 unsigned long map_op_count;
74 unsigned long bounce_count;
77 struct dmabounce_pool small;
78 struct dmabounce_pool large;
84 static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
87 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
88 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
89 device_info->small.allocs,
90 device_info->large.allocs,
91 device_info->total_allocs - device_info->small.allocs -
92 device_info->large.allocs,
93 device_info->total_allocs,
94 device_info->map_op_count,
95 device_info->bounce_count);
98 static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
102 /* allocate a 'safe' buffer and keep track of it */
103 static inline struct safe_buffer *
104 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
105 size_t size, enum dma_data_direction dir)
107 struct safe_buffer *buf;
108 struct dmabounce_pool *pool;
109 struct device *dev = device_info->dev;
112 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
113 __func__, ptr, size, dir);
115 if (size <= device_info->small.size) {
116 pool = &device_info->small;
117 } else if (size <= device_info->large.size) {
118 pool = &device_info->large;
123 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
125 dev_warn(dev, "%s: kmalloc failed\n", __func__);
131 buf->direction = dir;
135 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
136 &buf->safe_dma_addr);
138 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
142 if (buf->safe == NULL) {
144 "%s: could not alloc dma memory (size=%d)\n",
153 device_info->total_allocs++;
156 write_lock_irqsave(&device_info->lock, flags);
158 list_add(&buf->node, &device_info->safe_buffers);
160 write_unlock_irqrestore(&device_info->lock, flags);
165 /* determine if a buffer is from our "safe" pool */
166 static inline struct safe_buffer *
167 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
169 struct safe_buffer *b, *rb = NULL;
172 read_lock_irqsave(&device_info->lock, flags);
174 list_for_each_entry(b, &device_info->safe_buffers, node)
175 if (b->safe_dma_addr == safe_dma_addr) {
180 read_unlock_irqrestore(&device_info->lock, flags);
185 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
189 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
191 write_lock_irqsave(&device_info->lock, flags);
193 list_del(&buf->node);
195 write_unlock_irqrestore(&device_info->lock, flags);
198 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
200 dma_free_coherent(device_info->dev, buf->size, buf->safe,
206 /* ************************************************** */
208 static inline dma_addr_t
209 map_single(struct device *dev, void *ptr, size_t size,
210 enum dma_data_direction dir)
212 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
214 int needs_bounce = 0;
217 DO_STATS ( device_info->map_op_count++ );
219 dma_addr = virt_to_dma(dev, ptr);
222 unsigned long mask = *dev->dma_mask;
225 limit = (mask + 1) & ~mask;
226 if (limit && size > limit) {
227 dev_err(dev, "DMA mapping too big (requested %#x "
228 "mask %#Lx)\n", size, *dev->dma_mask);
233 * Figure out if we need to bounce from the DMA mask.
235 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
238 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
239 struct safe_buffer *buf;
241 buf = alloc_safe_buffer(device_info, ptr, size, dir);
243 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
249 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
250 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
251 buf->safe, buf->safe_dma_addr);
253 if ((dir == DMA_TO_DEVICE) ||
254 (dir == DMA_BIDIRECTIONAL)) {
255 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
256 __func__, ptr, buf->safe, size);
257 memcpy(buf->safe, ptr, size);
261 dma_addr = buf->safe_dma_addr;
264 * We don't need to sync the DMA buffer since
265 * it was allocated via the coherent allocators.
267 dma_cache_maint(ptr, size, dir);
274 unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
275 enum dma_data_direction dir)
277 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
278 struct safe_buffer *buf = NULL;
281 * Trying to unmap an invalid mapping
283 if (dma_mapping_error(dev, dma_addr)) {
284 dev_err(dev, "Trying to unmap invalid mapping\n");
289 buf = find_safe_buffer(device_info, dma_addr);
292 BUG_ON(buf->size != size);
295 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
296 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
297 buf->safe, buf->safe_dma_addr);
299 DO_STATS ( device_info->bounce_count++ );
301 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
302 void *ptr = buf->ptr;
305 "%s: copy back safe %p to unsafe %p size %d\n",
306 __func__, buf->safe, ptr, size);
307 memcpy(ptr, buf->safe, size);
310 * DMA buffers must have the same cache properties
311 * as if they were really used for DMA - which means
312 * data must be written back to RAM. Note that
313 * we don't use dmac_flush_range() here for the
314 * bidirectional case because we know the cache
315 * lines will be coherent with the data written.
317 dmac_clean_range(ptr, ptr + size);
318 outer_clean_range(__pa(ptr), __pa(ptr) + size);
320 free_safe_buffer(device_info, buf);
324 static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
325 enum dma_data_direction dir)
327 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
328 struct safe_buffer *buf = NULL;
331 buf = find_safe_buffer(device_info, dma_addr);
335 * Both of these checks from original code need to be
336 * commented out b/c some drivers rely on the following:
338 * 1) Drivers may map a large chunk of memory into DMA space
339 * but only sync a small portion of it. Good example is
340 * allocating a large buffer, mapping it, and then
341 * breaking it up into small descriptors. No point
342 * in syncing the whole buffer if you only have to
343 * touch one descriptor.
345 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
346 * usually only synced in one dir at a time.
348 * See drivers/net/eepro100.c for examples of both cases.
352 * BUG_ON(buf->size != size);
353 * BUG_ON(buf->direction != dir);
357 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
358 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
359 buf->safe, buf->safe_dma_addr);
361 DO_STATS ( device_info->bounce_count++ );
364 case DMA_FROM_DEVICE:
366 "%s: copy back safe %p to unsafe %p size %d\n",
367 __func__, buf->safe, buf->ptr, size);
368 memcpy(buf->ptr, buf->safe, size);
372 "%s: copy out unsafe %p to safe %p, size %d\n",
373 __func__,buf->ptr, buf->safe, size);
374 memcpy(buf->safe, buf->ptr, size);
376 case DMA_BIDIRECTIONAL:
377 BUG(); /* is this allowed? what does it mean? */
382 * No need to sync the safe buffer - it was allocated
383 * via the coherent allocators.
391 /* ************************************************** */
394 * see if a buffer address is in an 'unsafe' range. if it is
395 * allocate a 'safe' buffer and copy the unsafe buffer into it.
396 * substitute the safe buffer for the unsafe one.
397 * (basically move the buffer from an unsafe area to a safe one)
400 dma_map_single(struct device *dev, void *ptr, size_t size,
401 enum dma_data_direction dir)
405 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
406 __func__, ptr, size, dir);
408 BUG_ON(dir == DMA_NONE);
410 dma_addr = map_single(dev, ptr, size, dir);
415 dma_addr_t dma_map_page(struct device *dev, struct page *page,
416 unsigned long offset, size_t size,
417 enum dma_data_direction dir)
419 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
420 __func__, page, offset, size, dir);
422 BUG_ON(dir == DMA_NONE);
424 return map_single(dev, page_address(page) + offset, size, dir);
426 EXPORT_SYMBOL(dma_map_page);
429 * see if a mapped address was really a "safe" buffer and if so, copy
430 * the data from the safe buffer back to the unsafe buffer and free up
431 * the safe buffer. (basically return things back to the way they
436 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
437 enum dma_data_direction dir)
439 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
440 __func__, (void *) dma_addr, size, dir);
442 BUG_ON(dir == DMA_NONE);
444 unmap_single(dev, dma_addr, size, dir);
448 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
449 enum dma_data_direction dir)
451 struct scatterlist *s;
454 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
455 __func__, sg, nents, dir);
457 BUG_ON(dir == DMA_NONE);
459 for_each_sg(sg, s, nents, i) {
460 struct page *page = sg_page(s);
461 unsigned int offset = s->offset;
462 unsigned int length = s->length;
463 void *ptr = page_address(page) + offset;
465 s->dma_address = map_single(dev, ptr, length, dir);
472 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
473 enum dma_data_direction dir)
475 struct scatterlist *s;
478 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
479 __func__, sg, nents, dir);
481 BUG_ON(dir == DMA_NONE);
483 for_each_sg(sg, s, nents, i) {
484 dma_addr_t dma_addr = s->dma_address;
485 unsigned int length = s->length;
487 unmap_single(dev, dma_addr, length, dir);
491 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr,
492 unsigned long offset, size_t size,
493 enum dma_data_direction dir)
495 dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
496 __func__, dma_addr, offset, size, dir);
498 if (sync_single(dev, dma_addr, offset + size, dir))
499 dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
501 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
503 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr,
504 unsigned long offset, size_t size,
505 enum dma_data_direction dir)
507 dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
508 __func__, dma_addr, offset, size, dir);
510 if (sync_single(dev, dma_addr, offset + size, dir))
511 dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
513 EXPORT_SYMBOL(dma_sync_single_range_for_device);
516 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
517 enum dma_data_direction dir)
519 struct scatterlist *s;
522 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
523 __func__, sg, nents, dir);
525 BUG_ON(dir == DMA_NONE);
527 for_each_sg(sg, s, nents, i) {
528 dma_addr_t dma_addr = s->dma_address;
529 unsigned int length = s->length;
531 sync_single(dev, dma_addr, length, dir);
536 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
537 enum dma_data_direction dir)
539 struct scatterlist *s;
542 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
543 __func__, sg, nents, dir);
545 BUG_ON(dir == DMA_NONE);
547 for_each_sg(sg, s, nents, i) {
548 dma_addr_t dma_addr = s->dma_address;
549 unsigned int length = s->length;
551 sync_single(dev, dma_addr, length, dir);
556 dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
560 DO_STATS(pool->allocs = 0);
561 pool->pool = dma_pool_create(name, dev, size,
562 0 /* byte alignment */,
563 0 /* no page-crossing issues */);
565 return pool->pool ? 0 : -ENOMEM;
569 dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
570 unsigned long large_buffer_size)
572 struct dmabounce_device_info *device_info;
575 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
578 "Could not allocated dmabounce_device_info\n");
582 ret = dmabounce_init_pool(&device_info->small, dev,
583 "small_dmabounce_pool", small_buffer_size);
586 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
591 if (large_buffer_size) {
592 ret = dmabounce_init_pool(&device_info->large, dev,
593 "large_dmabounce_pool",
597 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
603 device_info->dev = dev;
604 INIT_LIST_HEAD(&device_info->safe_buffers);
605 rwlock_init(&device_info->lock);
608 device_info->total_allocs = 0;
609 device_info->map_op_count = 0;
610 device_info->bounce_count = 0;
611 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
614 dev->archdata.dmabounce = device_info;
616 dev_info(dev, "dmabounce: registered device\n");
621 dma_pool_destroy(device_info->small.pool);
628 dmabounce_unregister_dev(struct device *dev)
630 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
632 dev->archdata.dmabounce = NULL;
636 "Never registered with dmabounce but attempting"
641 if (!list_empty(&device_info->safe_buffers)) {
643 "Removing from dmabounce with pending buffers!\n");
647 if (device_info->small.pool)
648 dma_pool_destroy(device_info->small.pool);
649 if (device_info->large.pool)
650 dma_pool_destroy(device_info->large.pool);
653 if (device_info->attr_res == 0)
654 device_remove_file(dev, &dev_attr_dmabounce_stats);
659 dev_info(dev, "dmabounce: device unregistered\n");
663 EXPORT_SYMBOL(dma_map_single);
664 EXPORT_SYMBOL(dma_unmap_single);
665 EXPORT_SYMBOL(dma_map_sg);
666 EXPORT_SYMBOL(dma_unmap_sg);
667 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
668 EXPORT_SYMBOL(dma_sync_sg_for_device);
669 EXPORT_SYMBOL(dmabounce_register_dev);
670 EXPORT_SYMBOL(dmabounce_unregister_dev);
672 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
673 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
674 MODULE_LICENSE("GPL");