]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kernel/pci-dma_32.c
x86: isolate coherent mapping functions
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / pci-dma_32.c
1 /*
2  * Dynamic DMA mapping support.
3  *
4  * On i386 there is no hardware dynamic DMA address translation,
5  * so consistent alloc/free are merely page allocation/freeing.
6  * The rest of the dynamic DMA mapping interface is implemented
7  * in asm/pci.h.
8  */
9
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16
17 /* For i386, we make it point to the NULL address */
18 dma_addr_t bad_dma_address __read_mostly = 0x0;
19 EXPORT_SYMBOL(bad_dma_address);
20
21 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
22                                        dma_addr_t *dma_handle, void **ret)
23 {
24         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
25         int order = get_order(size);
26
27         if (mem) {
28                 int page = bitmap_find_free_region(mem->bitmap, mem->size,
29                                                      order);
30                 if (page >= 0) {
31                         *dma_handle = mem->device_base + (page << PAGE_SHIFT);
32                         *ret = mem->virt_base + (page << PAGE_SHIFT);
33                         memset(*ret, 0, size);
34                 }
35                 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
36                         *ret = NULL;
37         }
38         return (mem != NULL);
39 }
40
41 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
42 {
43         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
44
45         if (mem && vaddr >= mem->virt_base && vaddr <
46                    (mem->virt_base + (mem->size << PAGE_SHIFT))) {
47                 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
48
49                 bitmap_release_region(mem->bitmap, page, order);
50                 return 1;
51         }
52         return 0;
53 }
54
55 void *dma_alloc_coherent(struct device *dev, size_t size,
56                            dma_addr_t *dma_handle, gfp_t gfp)
57 {
58         void *ret = NULL;
59         int order = get_order(size);
60         /* ignore region specifiers */
61         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
62
63         if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
64                 return ret;
65
66         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
67                 gfp |= GFP_DMA;
68
69         ret = (void *)__get_free_pages(gfp, order);
70
71         if (ret != NULL) {
72                 memset(ret, 0, size);
73                 *dma_handle = virt_to_phys(ret);
74         }
75         return ret;
76 }
77 EXPORT_SYMBOL(dma_alloc_coherent);
78
79 void dma_free_coherent(struct device *dev, size_t size,
80                          void *vaddr, dma_addr_t dma_handle)
81 {
82         int order = get_order(size);
83
84         WARN_ON(irqs_disabled());       /* for portability */
85         if (dma_release_coherent(dev, order, vaddr))
86                 return;
87         free_pages((unsigned long)vaddr, order);
88 }
89 EXPORT_SYMBOL(dma_free_coherent);