2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
10 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
17 /* For i386, we make it point to the NULL address */
18 dma_addr_t bad_dma_address __read_mostly = 0x0;
19 EXPORT_SYMBOL(bad_dma_address);
21 struct dma_coherent_mem {
26 unsigned long *bitmap;
29 void *dma_alloc_coherent(struct device *dev, size_t size,
30 dma_addr_t *dma_handle, gfp_t gfp)
33 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
34 int order = get_order(size);
35 /* ignore region specifiers */
36 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
39 int page = bitmap_find_free_region(mem->bitmap, mem->size,
42 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
43 ret = mem->virt_base + (page << PAGE_SHIFT);
47 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
51 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
54 ret = (void *)__get_free_pages(gfp, order);
58 *dma_handle = virt_to_phys(ret);
62 EXPORT_SYMBOL(dma_alloc_coherent);
64 void dma_free_coherent(struct device *dev, size_t size,
65 void *vaddr, dma_addr_t dma_handle)
67 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
68 int order = get_order(size);
70 WARN_ON(irqs_disabled()); /* for portability */
71 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
72 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
74 bitmap_release_region(mem->bitmap, page, order);
76 free_pages((unsigned long)vaddr, order);
78 EXPORT_SYMBOL(dma_free_coherent);
80 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
81 dma_addr_t device_addr, size_t size, int flags)
83 void __iomem *mem_base = NULL;
84 int pages = size >> PAGE_SHIFT;
85 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
87 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
94 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
96 mem_base = ioremap(bus_addr, size);
100 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
103 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
104 if (!dev->dma_mem->bitmap)
107 dev->dma_mem->virt_base = mem_base;
108 dev->dma_mem->device_base = device_addr;
109 dev->dma_mem->size = pages;
110 dev->dma_mem->flags = flags;
112 if (flags & DMA_MEMORY_MAP)
113 return DMA_MEMORY_MAP;
115 return DMA_MEMORY_IO;
124 EXPORT_SYMBOL(dma_declare_coherent_memory);
126 void dma_release_declared_memory(struct device *dev)
128 struct dma_coherent_mem *mem = dev->dma_mem;
133 iounmap(mem->virt_base);
137 EXPORT_SYMBOL(dma_release_declared_memory);
139 void *dma_mark_declared_memory_occupied(struct device *dev,
140 dma_addr_t device_addr, size_t size)
142 struct dma_coherent_mem *mem = dev->dma_mem;
143 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
147 return ERR_PTR(-EINVAL);
149 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
150 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
153 return mem->virt_base + (pos << PAGE_SHIFT);
155 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);