static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
- u64 mask = *dev->dma_mask;
- int high = addr + size > mask;
- int mmu = high;
-
- if (force_iommu)
- mmu = 1;
-
- return mmu;
+ return force_iommu ||
+ !is_buffer_dma_capable(*dev->dma_mask, addr, size);
}
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
- u64 mask = *dev->dma_mask;
- int high = addr + size > mask;
- int mmu = high;
-
- return mmu;
+ return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
}
/* Map a single continuous physical area into the IOMMU.
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
-static dma_addr_t
-gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
-{
- dma_addr_t map;
- unsigned long align_mask;
-
- align_mask = (1UL << get_order(size)) - 1;
- map = dma_map_area(dev, paddr, size, dir, align_mask);
-
- flush_gart();
-
- return map;
-}
-
/* Map a single area into the IOMMU */
static dma_addr_t
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
gfp_t flag)
{
void *vaddr;
+ dma_addr_t paddr;
+ unsigned long align_mask;
+ u64 dma_mask = dma_alloc_coherent_mask(dev, flag);
vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
if (!vaddr)
return NULL;
- *dma_addr = gart_map_single(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL);
+ paddr = virt_to_phys(vaddr);
+ if (is_buffer_dma_capable(dma_mask, paddr, size)) {
+ *dma_addr = paddr;
+ return vaddr;
+ }
+
+ align_mask = (1UL << get_order(size)) - 1;
+
+ *dma_addr = dma_map_area(dev, paddr, size, DMA_BIDIRECTIONAL,
+ align_mask);
+ flush_gart();
+
if (*dma_addr != bad_dma_address)
return vaddr;
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
- set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+ iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
agp_memory_reserved = iommu_size;
printk(KERN_INFO