]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/common/dmabounce.c
[ARM] dma: add validation of DMA params
[linux-2.6-omap-h63xx.git] / arch / arm / common / dmabounce.c
1 /*
2  *  arch/arm/common/dmabounce.c
3  *
4  *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
5  *  limited DMA windows. These functions utilize bounce buffers to
6  *  copy data to/from buffers located outside the DMA region. This
7  *  only works for systems in which DMA memory is at the bottom of
8  *  RAM, the remainder of memory is at the top and the DMA memory
9  *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
10  *  DMA windows will require custom implementations that reserve memory
11  *  areas at early bootup.
12  *
13  *  Original version by Brad Parker (brad@heeltoe.com)
14  *  Re-written by Christopher Hoover <ch@murgatroid.com>
15  *  Made generic by Deepak Saxena <dsaxena@plexity.net>
16  *
17  *  Copyright (C) 2002 Hewlett Packard Company.
18  *  Copyright (C) 2004 MontaVista Software, Inc.
19  *
20  *  This program is free software; you can redistribute it and/or
21  *  modify it under the terms of the GNU General Public License
22  *  version 2 as published by the Free Software Foundation.
23  */
24
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
32 #include <linux/scatterlist.h>
33
34 #include <asm/cacheflush.h>
35
36 #undef STATS
37
38 #ifdef STATS
39 #define DO_STATS(X) do { X ; } while (0)
40 #else
41 #define DO_STATS(X) do { } while (0)
42 #endif
43
44 /* ************************************************** */
45
46 struct safe_buffer {
47         struct list_head node;
48
49         /* original request */
50         void            *ptr;
51         size_t          size;
52         int             direction;
53
54         /* safe buffer info */
55         struct dmabounce_pool *pool;
56         void            *safe;
57         dma_addr_t      safe_dma_addr;
58 };
59
60 struct dmabounce_pool {
61         unsigned long   size;
62         struct dma_pool *pool;
63 #ifdef STATS
64         unsigned long   allocs;
65 #endif
66 };
67
68 struct dmabounce_device_info {
69         struct device *dev;
70         struct list_head safe_buffers;
71 #ifdef STATS
72         unsigned long total_allocs;
73         unsigned long map_op_count;
74         unsigned long bounce_count;
75         int attr_res;
76 #endif
77         struct dmabounce_pool   small;
78         struct dmabounce_pool   large;
79
80         rwlock_t lock;
81 };
82
83 #ifdef STATS
84 static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
85                               char *buf)
86 {
87         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
88         return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
89                 device_info->small.allocs,
90                 device_info->large.allocs,
91                 device_info->total_allocs - device_info->small.allocs -
92                         device_info->large.allocs,
93                 device_info->total_allocs,
94                 device_info->map_op_count,
95                 device_info->bounce_count);
96 }
97
98 static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
99 #endif
100
101
102 /* allocate a 'safe' buffer and keep track of it */
103 static inline struct safe_buffer *
104 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
105                   size_t size, enum dma_data_direction dir)
106 {
107         struct safe_buffer *buf;
108         struct dmabounce_pool *pool;
109         struct device *dev = device_info->dev;
110         unsigned long flags;
111
112         dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
113                 __func__, ptr, size, dir);
114
115         if (size <= device_info->small.size) {
116                 pool = &device_info->small;
117         } else if (size <= device_info->large.size) {
118                 pool = &device_info->large;
119         } else {
120                 pool = NULL;
121         }
122
123         buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
124         if (buf == NULL) {
125                 dev_warn(dev, "%s: kmalloc failed\n", __func__);
126                 return NULL;
127         }
128
129         buf->ptr = ptr;
130         buf->size = size;
131         buf->direction = dir;
132         buf->pool = pool;
133
134         if (pool) {
135                 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
136                                            &buf->safe_dma_addr);
137         } else {
138                 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
139                                                GFP_ATOMIC);
140         }
141
142         if (buf->safe == NULL) {
143                 dev_warn(dev,
144                          "%s: could not alloc dma memory (size=%d)\n",
145                          __func__, size);
146                 kfree(buf);
147                 return NULL;
148         }
149
150 #ifdef STATS
151         if (pool)
152                 pool->allocs++;
153         device_info->total_allocs++;
154 #endif
155
156         write_lock_irqsave(&device_info->lock, flags);
157         list_add(&buf->node, &device_info->safe_buffers);
158         write_unlock_irqrestore(&device_info->lock, flags);
159
160         return buf;
161 }
162
163 /* determine if a buffer is from our "safe" pool */
164 static inline struct safe_buffer *
165 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
166 {
167         struct safe_buffer *b, *rb = NULL;
168         unsigned long flags;
169
170         read_lock_irqsave(&device_info->lock, flags);
171
172         list_for_each_entry(b, &device_info->safe_buffers, node)
173                 if (b->safe_dma_addr == safe_dma_addr) {
174                         rb = b;
175                         break;
176                 }
177
178         read_unlock_irqrestore(&device_info->lock, flags);
179         return rb;
180 }
181
182 static inline void
183 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
184 {
185         unsigned long flags;
186
187         dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
188
189         write_lock_irqsave(&device_info->lock, flags);
190
191         list_del(&buf->node);
192
193         write_unlock_irqrestore(&device_info->lock, flags);
194
195         if (buf->pool)
196                 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
197         else
198                 dma_free_coherent(device_info->dev, buf->size, buf->safe,
199                                     buf->safe_dma_addr);
200
201         kfree(buf);
202 }
203
204 /* ************************************************** */
205
206 static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
207                 dma_addr_t dma_addr, const char *where)
208 {
209         if (!dev || !dev->archdata.dmabounce)
210                 return NULL;
211         if (dma_mapping_error(dev, dma_addr)) {
212                 if (dev)
213                         dev_err(dev, "Trying to %s invalid mapping\n", where);
214                 else
215                         pr_err("unknown device: Trying to %s invalid mapping\n", where);
216                 return NULL;
217         }
218         return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
219 }
220
221 static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
222                 enum dma_data_direction dir)
223 {
224         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
225         dma_addr_t dma_addr;
226         int needs_bounce = 0;
227
228         if (device_info)
229                 DO_STATS ( device_info->map_op_count++ );
230
231         dma_addr = virt_to_dma(dev, ptr);
232
233         if (dev->dma_mask) {
234                 unsigned long mask = *dev->dma_mask;
235                 unsigned long limit;
236
237                 limit = (mask + 1) & ~mask;
238                 if (limit && size > limit) {
239                         dev_err(dev, "DMA mapping too big (requested %#x "
240                                 "mask %#Lx)\n", size, *dev->dma_mask);
241                         return ~0;
242                 }
243
244                 /*
245                  * Figure out if we need to bounce from the DMA mask.
246                  */
247                 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
248         }
249
250         if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
251                 struct safe_buffer *buf;
252
253                 buf = alloc_safe_buffer(device_info, ptr, size, dir);
254                 if (buf == 0) {
255                         dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
256                                __func__, ptr);
257                         return 0;
258                 }
259
260                 dev_dbg(dev,
261                         "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
262                         __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
263                         buf->safe, buf->safe_dma_addr);
264
265                 if ((dir == DMA_TO_DEVICE) ||
266                     (dir == DMA_BIDIRECTIONAL)) {
267                         dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
268                                 __func__, ptr, buf->safe, size);
269                         memcpy(buf->safe, ptr, size);
270                 }
271                 ptr = buf->safe;
272
273                 dma_addr = buf->safe_dma_addr;
274         } else {
275                 /*
276                  * We don't need to sync the DMA buffer since
277                  * it was allocated via the coherent allocators.
278                  */
279                 dma_cache_maint(ptr, size, dir);
280         }
281
282         return dma_addr;
283 }
284
285 static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
286                 size_t size, enum dma_data_direction dir)
287 {
288         struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
289
290         if (buf) {
291                 BUG_ON(buf->size != size);
292                 BUG_ON(buf->direction != dir);
293
294                 dev_dbg(dev,
295                         "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
296                         __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
297                         buf->safe, buf->safe_dma_addr);
298
299                 DO_STATS(dev->archdata.dmabounce->bounce_count++);
300
301                 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
302                         void *ptr = buf->ptr;
303
304                         dev_dbg(dev,
305                                 "%s: copy back safe %p to unsafe %p size %d\n",
306                                 __func__, buf->safe, ptr, size);
307                         memcpy(ptr, buf->safe, size);
308
309                         /*
310                          * DMA buffers must have the same cache properties
311                          * as if they were really used for DMA - which means
312                          * data must be written back to RAM.  Note that
313                          * we don't use dmac_flush_range() here for the
314                          * bidirectional case because we know the cache
315                          * lines will be coherent with the data written.
316                          */
317                         dmac_clean_range(ptr, ptr + size);
318                         outer_clean_range(__pa(ptr), __pa(ptr) + size);
319                 }
320                 free_safe_buffer(dev->archdata.dmabounce, buf);
321         }
322 }
323
324 /* ************************************************** */
325
326 /*
327  * see if a buffer address is in an 'unsafe' range.  if it is
328  * allocate a 'safe' buffer and copy the unsafe buffer into it.
329  * substitute the safe buffer for the unsafe one.
330  * (basically move the buffer from an unsafe area to a safe one)
331  */
332 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
333                 enum dma_data_direction dir)
334 {
335         dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
336                 __func__, ptr, size, dir);
337
338         BUG_ON(!valid_dma_direction(dir));
339
340         return map_single(dev, ptr, size, dir);
341 }
342 EXPORT_SYMBOL(dma_map_single);
343
344 dma_addr_t dma_map_page(struct device *dev, struct page *page,
345                 unsigned long offset, size_t size, enum dma_data_direction dir)
346 {
347         dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
348                 __func__, page, offset, size, dir);
349
350         BUG_ON(!valid_dma_direction(dir));
351
352         return map_single(dev, page_address(page) + offset, size, dir);
353 }
354 EXPORT_SYMBOL(dma_map_page);
355
356 /*
357  * see if a mapped address was really a "safe" buffer and if so, copy
358  * the data from the safe buffer back to the unsafe buffer and free up
359  * the safe buffer.  (basically return things back to the way they
360  * should be)
361  */
362
363 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
364                 enum dma_data_direction dir)
365 {
366         dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
367                 __func__, (void *) dma_addr, size, dir);
368
369         unmap_single(dev, dma_addr, size, dir);
370 }
371 EXPORT_SYMBOL(dma_unmap_single);
372
373 int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
374                 unsigned long off, size_t sz, enum dma_data_direction dir)
375 {
376         struct safe_buffer *buf;
377
378         dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
379                 __func__, addr, off, sz, dir);
380
381         buf = find_safe_buffer_dev(dev, addr, __func__);
382         if (!buf)
383                 return 1;
384
385         BUG_ON(buf->direction != dir);
386
387         dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
388                 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
389                 buf->safe, buf->safe_dma_addr);
390
391         DO_STATS(dev->archdata.dmabounce->bounce_count++);
392
393         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
394                 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
395                         __func__, buf->safe + off, buf->ptr + off, sz);
396                 memcpy(buf->ptr + off, buf->safe + off, sz);
397         }
398         return 0;
399 }
400 EXPORT_SYMBOL(dmabounce_sync_for_cpu);
401
402 int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
403                 unsigned long off, size_t sz, enum dma_data_direction dir)
404 {
405         struct safe_buffer *buf;
406
407         dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
408                 __func__, addr, off, sz, dir);
409
410         buf = find_safe_buffer_dev(dev, addr, __func__);
411         if (!buf)
412                 return 1;
413
414         BUG_ON(buf->direction != dir);
415
416         dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
417                 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
418                 buf->safe, buf->safe_dma_addr);
419
420         DO_STATS(dev->archdata.dmabounce->bounce_count++);
421
422         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
423                 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
424                         __func__,buf->ptr + off, buf->safe + off, sz);
425                 memcpy(buf->safe + off, buf->ptr + off, sz);
426         }
427         return 0;
428 }
429 EXPORT_SYMBOL(dmabounce_sync_for_device);
430
431 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
432                 const char *name, unsigned long size)
433 {
434         pool->size = size;
435         DO_STATS(pool->allocs = 0);
436         pool->pool = dma_pool_create(name, dev, size,
437                                      0 /* byte alignment */,
438                                      0 /* no page-crossing issues */);
439
440         return pool->pool ? 0 : -ENOMEM;
441 }
442
443 int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
444                 unsigned long large_buffer_size)
445 {
446         struct dmabounce_device_info *device_info;
447         int ret;
448
449         device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
450         if (!device_info) {
451                 dev_err(dev,
452                         "Could not allocated dmabounce_device_info\n");
453                 return -ENOMEM;
454         }
455
456         ret = dmabounce_init_pool(&device_info->small, dev,
457                                   "small_dmabounce_pool", small_buffer_size);
458         if (ret) {
459                 dev_err(dev,
460                         "dmabounce: could not allocate DMA pool for %ld byte objects\n",
461                         small_buffer_size);
462                 goto err_free;
463         }
464
465         if (large_buffer_size) {
466                 ret = dmabounce_init_pool(&device_info->large, dev,
467                                           "large_dmabounce_pool",
468                                           large_buffer_size);
469                 if (ret) {
470                         dev_err(dev,
471                                 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
472                                 large_buffer_size);
473                         goto err_destroy;
474                 }
475         }
476
477         device_info->dev = dev;
478         INIT_LIST_HEAD(&device_info->safe_buffers);
479         rwlock_init(&device_info->lock);
480
481 #ifdef STATS
482         device_info->total_allocs = 0;
483         device_info->map_op_count = 0;
484         device_info->bounce_count = 0;
485         device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
486 #endif
487
488         dev->archdata.dmabounce = device_info;
489
490         dev_info(dev, "dmabounce: registered device\n");
491
492         return 0;
493
494  err_destroy:
495         dma_pool_destroy(device_info->small.pool);
496  err_free:
497         kfree(device_info);
498         return ret;
499 }
500 EXPORT_SYMBOL(dmabounce_register_dev);
501
502 void dmabounce_unregister_dev(struct device *dev)
503 {
504         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
505
506         dev->archdata.dmabounce = NULL;
507
508         if (!device_info) {
509                 dev_warn(dev,
510                          "Never registered with dmabounce but attempting"
511                          "to unregister!\n");
512                 return;
513         }
514
515         if (!list_empty(&device_info->safe_buffers)) {
516                 dev_err(dev,
517                         "Removing from dmabounce with pending buffers!\n");
518                 BUG();
519         }
520
521         if (device_info->small.pool)
522                 dma_pool_destroy(device_info->small.pool);
523         if (device_info->large.pool)
524                 dma_pool_destroy(device_info->large.pool);
525
526 #ifdef STATS
527         if (device_info->attr_res == 0)
528                 device_remove_file(dev, &dev_attr_dmabounce_stats);
529 #endif
530
531         kfree(device_info);
532
533         dev_info(dev, "dmabounce: device unregistered\n");
534 }
535 EXPORT_SYMBOL(dmabounce_unregister_dev);
536
537 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
538 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
539 MODULE_LICENSE("GPL");