]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
swiotlb: convert swiotlb to use is_buffer_dma_capable helper function
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Tue, 9 Sep 2008 16:06:49 +0000 (01:06 +0900)
committerIngo Molnar <mingo@elte.hu>
Wed, 10 Sep 2008 09:33:45 +0000 (11:33 +0200)
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
lib/swiotlb.c

index b5f5d11330420fb502ca13c2acba6848f1ca17a7..240a67c2c979f8f7f3230f27b28bc610288c373e 100644 (file)
@@ -274,13 +274,13 @@ cleanup1:
 }
 
 static int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
 {
        dma_addr_t mask = 0xffffffff;
        /* If the device has a mask, use it, otherwise default to 32 bits */
        if (hwdev && hwdev->dma_mask)
                mask = *hwdev->dma_mask;
-       return (addr & ~mask) != 0;
+       return !is_buffer_dma_capable(mask, addr, size);
 }
 
 static int is_swiotlb_buffer(char *addr)
@@ -473,7 +473,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
        int order = get_order(size);
 
        ret = (void *)__get_free_pages(flags, order);
-       if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+       if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
                /*
                 * The allocated memory isn't reachable by the device.
                 * Fall back on swiotlb_map_single().
@@ -497,7 +497,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
        dev_addr = virt_to_bus(ret);
 
        /* Confirm address can be DMA'd by device */
-       if (address_needs_mapping(hwdev, dev_addr)) {
+       if (address_needs_mapping(hwdev, dev_addr, size)) {
                printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
                       (unsigned long long)*hwdev->dma_mask,
                       (unsigned long long)dev_addr);
@@ -561,7 +561,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+       if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
                return dev_addr;
 
        /*
@@ -578,7 +578,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
        /*
         * Ensure that the address returned is DMA'ble
         */
-       if (address_needs_mapping(hwdev, dev_addr))
+       if (address_needs_mapping(hwdev, dev_addr, size))
                panic("map_single: bounce buffer is not DMA'ble");
 
        return dev_addr;
@@ -721,7 +721,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
        for_each_sg(sgl, sg, nelems, i) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
                dev_addr = virt_to_bus(addr);
-               if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+               if (swiotlb_force ||
+                   address_needs_mapping(hwdev, dev_addr, sg->length)) {
                        void *map = map_single(hwdev, addr, sg->length, dir);
                        if (!map) {
                                /* Don't panic here, we expect map_sg users