]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge commit 'v2.6.27-rc6' into x86/iommu
authorIngo Molnar <mingo@elte.hu>
Wed, 10 Sep 2008 09:32:52 +0000 (11:32 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 10 Sep 2008 09:32:52 +0000 (11:32 +0200)
14 files changed:
arch/ia64/include/asm/dma-mapping.h
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/k8.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/pci-nommu.c
drivers/pci/intel-iommu.c
include/asm-x86/dma-mapping.h
include/asm-x86/iommu.h
include/linux/pci_ids.h
kernel/dma-coherent.c
lib/swiotlb.c

index 9f0df9bd46b7f1a772f5a121782a73b70b753af4..06ff1ba21465a38dff682a5b19d0a6a05eab938a 100644 (file)
@@ -8,7 +8,9 @@
 #include <asm/machvec.h>
 #include <linux/scatterlist.h>
 
-#define dma_alloc_coherent     platform_dma_alloc_coherent
+#define dma_alloc_coherent(dev, size, handle, gfp)     \
+       platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
+
 /* coherent mem. is cheap */
 static inline void *
 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
index 69b4d060b21c4c32e8e40b45c9dc2171c1b56505..01c68c38840d8b8997e5d801e508dd20185605f0 100644 (file)
@@ -1038,8 +1038,6 @@ out:
 
 /*
  * The exported free_coherent function for dma_ops.
- * FIXME: fix the generic x86 DMA layer so that it actually calls that
- *        function.
  */
 static void free_coherent(struct device *dev, size_t size,
                          void *virt_addr, dma_addr_t dma_addr)
index 4353cf5e6fac8b4d329e18def887dadd3f55bbb8..24bb5faf5efaefedcb578d4f4f27415a015833c7 100644 (file)
@@ -95,6 +95,20 @@ static void __init nvidia_bugs(int num, int slot, int func)
 
 }
 
+#ifdef CONFIG_DMAR
+static void __init intel_g33_dmar(int num, int slot, int func)
+{
+       struct acpi_table_header *dmar_tbl;
+       acpi_status status;
+
+       status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
+       if (ACPI_SUCCESS(status)) {
+               printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
+               dmar_disabled = 1;
+       }
+}
+#endif
+
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -114,6 +128,10 @@ static struct chipset early_qrk[] __initdata = {
          PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs },
        { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
          PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config },
+#ifdef CONFIG_DMAR
+       { PCI_VENDOR_ID_INTEL, 0x29c0,
+         PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
+#endif
        {}
 };
 
index 7377ccb213350242ad7e7777ad266541b05a3e8b..304d8bad6559f94f0e278e80496ca841741574b4 100644 (file)
@@ -16,8 +16,9 @@ EXPORT_SYMBOL(num_k8_northbridges);
 static u32 *flush_words;
 
 struct pci_device_id k8_nb_ids[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
-       { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
        {}
 };
 EXPORT_SYMBOL(k8_nb_ids);
index dcdac6c826e98047138f1e915e2d6b6ffbaf5ddd..8415d92853c4a474ba486912fd40808b6ecebcf4 100644 (file)
@@ -510,8 +510,22 @@ error:
        return ret;
 }
 
+static void calgary_free_coherent(struct device *dev, size_t size,
+                                 void *vaddr, dma_addr_t dma_handle)
+{
+       unsigned int npages;
+       struct iommu_table *tbl = find_iommu_table(dev);
+
+       size = PAGE_ALIGN(size);
+       npages = size >> PAGE_SHIFT;
+
+       iommu_free(tbl, dma_handle, npages);
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
 static struct dma_mapping_ops calgary_dma_ops = {
        .alloc_coherent = calgary_alloc_coherent,
+       .free_coherent = calgary_free_coherent,
        .map_single = calgary_map_single,
        .unmap_single = calgary_unmap_single,
        .map_sg = calgary_map_sg,
index 87d4d6964ec2b9ecb5d83ad01c081589218dc303..0a1408abcc6244c832eb4801175dcec135cface4 100644 (file)
@@ -41,11 +41,12 @@ EXPORT_SYMBOL(bad_dma_address);
 /* Dummy device used for NULL arguments (normally ISA). Better would
    be probably a smaller DMA mask, but this is bug-to-bug compatible
    to older i386. */
-struct device fallback_dev = {
+struct device x86_dma_fallback_dev = {
        .bus_id = "fallback device",
        .coherent_dma_mask = DMA_32BIT_MASK,
-       .dma_mask = &fallback_dev.coherent_dma_mask,
+       .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
 };
+EXPORT_SYMBOL(x86_dma_fallback_dev);
 
 int dma_set_mask(struct device *dev, u64 mask)
 {
@@ -241,147 +242,6 @@ int dma_supported(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_supported);
 
-/* Allocate DMA memory on node near device */
-static noinline struct page *
-dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
-{
-       int node;
-
-       node = dev_to_node(dev);
-
-       return alloc_pages_node(node, gfp, order);
-}
-
-/*
- * Allocate memory for a coherent mapping.
- */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  gfp_t gfp)
-{
-       struct dma_mapping_ops *ops = get_dma_ops(dev);
-       void *memory = NULL;
-       struct page *page;
-       unsigned long dma_mask = 0;
-       dma_addr_t bus;
-       int noretry = 0;
-
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
-
-       if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
-               return memory;
-
-       if (!dev) {
-               dev = &fallback_dev;
-               gfp |= GFP_DMA;
-       }
-       dma_mask = dev->coherent_dma_mask;
-       if (dma_mask == 0)
-               dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
-
-       /* Device not DMA able */
-       if (dev->dma_mask == NULL)
-               return NULL;
-
-       /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
-       if (gfp & __GFP_DMA)
-               noretry = 1;
-
-#ifdef CONFIG_X86_64
-       /* Why <=? Even when the mask is smaller than 4GB it is often
-          larger than 16MB and in this case we have a chance of
-          finding fitting memory in the next higher zone first. If
-          not retry with true GFP_DMA. -AK */
-       if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
-               gfp |= GFP_DMA32;
-               if (dma_mask < DMA_32BIT_MASK)
-                       noretry = 1;
-       }
-#endif
-
- again:
-       page = dma_alloc_pages(dev,
-               noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
-       if (page == NULL)
-               return NULL;
-
-       {
-               int high, mmu;
-               bus = page_to_phys(page);
-               memory = page_address(page);
-               high = (bus + size) >= dma_mask;
-               mmu = high;
-               if (force_iommu && !(gfp & GFP_DMA))
-                       mmu = 1;
-               else if (high) {
-                       free_pages((unsigned long)memory,
-                                  get_order(size));
-
-                       /* Don't use the 16MB ZONE_DMA unless absolutely
-                          needed. It's better to use remapping first. */
-                       if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
-                               gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
-                               goto again;
-                       }
-
-                       /* Let low level make its own zone decisions */
-                       gfp &= ~(GFP_DMA32|GFP_DMA);
-
-                       if (ops->alloc_coherent)
-                               return ops->alloc_coherent(dev, size,
-                                                          dma_handle, gfp);
-                       return NULL;
-               }
-
-               memset(memory, 0, size);
-               if (!mmu) {
-                       *dma_handle = bus;
-                       return memory;
-               }
-       }
-
-       if (ops->alloc_coherent) {
-               free_pages((unsigned long)memory, get_order(size));
-               gfp &= ~(GFP_DMA|GFP_DMA32);
-               return ops->alloc_coherent(dev, size, dma_handle, gfp);
-       }
-
-       if (ops->map_simple) {
-               *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
-                                             size,
-                                             PCI_DMA_BIDIRECTIONAL);
-               if (*dma_handle != bad_dma_address)
-                       return memory;
-       }
-
-       if (panic_on_overflow)
-               panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
-                     (unsigned long)size);
-       free_pages((unsigned long)memory, get_order(size));
-       return NULL;
-}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-/*
- * Unmap coherent memory.
- * The caller must ensure that the device has finished accessing the mapping.
- */
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t bus)
-{
-       struct dma_mapping_ops *ops = get_dma_ops(dev);
-
-       int order = get_order(size);
-       WARN_ON(irqs_disabled());       /* for portability */
-       if (dma_release_from_coherent(dev, order, vaddr))
-               return;
-       if (ops->unmap_single)
-               ops->unmap_single(dev, bus, size, 0);
-       free_pages((unsigned long)vaddr, order);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
 static int __init pci_iommu_init(void)
 {
        calgary_iommu_init();
index 49285f8fd4d54005d5efb9a9fad6bc498420e6d6..0b99d4a06f74a8599248ab0768ee63a1e5b33356 100644 (file)
@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table;
 static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
 static int need_flush;         /* global flush state. set for each gart wrap */
 
-static unsigned long alloc_iommu(struct device *dev, int size)
+static unsigned long alloc_iommu(struct device *dev, int size,
+                                unsigned long align_mask)
 {
        unsigned long offset, flags;
        unsigned long boundary_size;
@@ -90,16 +91,17 @@ static unsigned long alloc_iommu(struct device *dev, int size)
 
        base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
                           PAGE_SIZE) >> PAGE_SHIFT;
-       boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+       boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
                              PAGE_SIZE) >> PAGE_SHIFT;
 
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
        offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
-                                 size, base_index, boundary_size, 0);
+                                 size, base_index, boundary_size, align_mask);
        if (offset == -1) {
                need_flush = 1;
                offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
-                                         size, base_index, boundary_size, 0);
+                                         size, base_index, boundary_size,
+                                         align_mask);
        }
        if (offset != -1) {
                next_bit = offset+size;
@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
  * Caller needs to check if the iommu is needed and flush.
  */
 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
-                               size_t size, int dir)
+                               size_t size, int dir, unsigned long align_mask)
 {
        unsigned long npages = iommu_num_pages(phys_mem, size);
-       unsigned long iommu_page = alloc_iommu(dev, npages);
+       unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
        int i;
 
        if (iommu_page == -1) {
@@ -259,16 +261,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
        return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
 }
 
-static dma_addr_t
-gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
-{
-       dma_addr_t map = dma_map_area(dev, paddr, size, dir);
-
-       flush_gart();
-
-       return map;
-}
-
 /* Map a single area into the IOMMU */
 static dma_addr_t
 gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
@@ -276,12 +268,13 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
        unsigned long bus;
 
        if (!dev)
-               dev = &fallback_dev;
+               dev = &x86_dma_fallback_dev;
 
        if (!need_iommu(dev, paddr, size))
                return paddr;
 
-       bus = gart_map_simple(dev, paddr, size, dir);
+       bus = dma_map_area(dev, paddr, size, dir, 0);
+       flush_gart();
 
        return bus;
 }
@@ -340,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
                unsigned long addr = sg_phys(s);
 
                if (nonforced_iommu(dev, addr, s->length)) {
-                       addr = dma_map_area(dev, addr, s->length, dir);
+                       addr = dma_map_area(dev, addr, s->length, dir, 0);
                        if (addr == bad_dma_address) {
                                if (i > 0)
                                        gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +355,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
                          int nelems, struct scatterlist *sout,
                          unsigned long pages)
 {
-       unsigned long iommu_start = alloc_iommu(dev, pages);
+       unsigned long iommu_start = alloc_iommu(dev, pages, 0);
        unsigned long iommu_page = iommu_start;
        struct scatterlist *s;
        int i;
@@ -427,7 +420,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
                return 0;
 
        if (!dev)
-               dev = &fallback_dev;
+               dev = &x86_dma_fallback_dev;
 
        out = 0;
        start = 0;
@@ -499,6 +492,41 @@ error:
        return 0;
 }
 
+/* allocate and map a coherent mapping */
+static void *
+gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
+                   gfp_t flag)
+{
+       void *vaddr;
+       unsigned long align_mask;
+
+       vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
+       if (!vaddr)
+               return NULL;
+
+       align_mask = (1UL << get_order(size)) - 1;
+
+       *dma_addr = dma_map_area(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL,
+                                align_mask);
+       flush_gart();
+
+       if (*dma_addr != bad_dma_address)
+               return vaddr;
+
+       free_pages((unsigned long)vaddr, get_order(size));
+
+       return NULL;
+}
+
+/* free a coherent mapping */
+static void
+gart_free_coherent(struct device *dev, size_t size, void *vaddr,
+                  dma_addr_t dma_addr)
+{
+       gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
 static int no_agp;
 
 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -691,7 +719,6 @@ extern int agp_amd64_init(void);
 
 static struct dma_mapping_ops gart_dma_ops = {
        .map_single                     = gart_map_single,
-       .map_simple                     = gart_map_simple,
        .unmap_single                   = gart_unmap_single,
        .sync_single_for_cpu            = NULL,
        .sync_single_for_device         = NULL,
@@ -701,6 +728,8 @@ static struct dma_mapping_ops gart_dma_ops = {
        .sync_sg_for_device             = NULL,
        .map_sg                         = gart_map_sg,
        .unmap_sg                       = gart_unmap_sg,
+       .alloc_coherent                 = gart_alloc_coherent,
+       .free_coherent                  = gart_free_coherent,
 };
 
 void gart_iommu_shutdown(void)
index 3f91f71cdc3eac766be0891511afd738001c6918..8e398b56f50baa953cd9475b135cea5a2fb9b687 100644 (file)
@@ -72,7 +72,50 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
        return nents;
 }
 
+static void *
+nommu_alloc_coherent(struct device *hwdev, size_t size,
+                    dma_addr_t *dma_addr, gfp_t gfp)
+{
+       unsigned long dma_mask;
+       int node;
+       struct page *page;
+
+       dma_mask = dma_alloc_coherent_mask(hwdev, gfp);
+
+       gfp |= __GFP_ZERO;
+
+       node = dev_to_node(hwdev);
+again:
+       page = alloc_pages_node(node, gfp, get_order(size));
+       if (!page)
+               return NULL;
+
+       if ((page_to_phys(page) + size > dma_mask) && !(gfp & GFP_DMA)) {
+               free_pages((unsigned long)page_address(page), get_order(size));
+               gfp |= GFP_DMA;
+               goto again;
+       }
+
+       *dma_addr = page_to_phys(page);
+       if (check_addr("alloc_coherent", hwdev, *dma_addr, size)) {
+               flush_write_buffers();
+               return page_address(page);
+       }
+
+       free_pages((unsigned long)page_address(page), get_order(size));
+
+       return NULL;
+}
+
+static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
+                               dma_addr_t dma_addr)
+{
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
 struct dma_mapping_ops nommu_dma_ops = {
+       .alloc_coherent = nommu_alloc_coherent,
+       .free_coherent = nommu_free_coherent,
        .map_single = nommu_map_single,
        .map_sg = nommu_map_sg,
        .is_phys = 1,
index 8d0e60ac849cb5f34e609fdaf9215688091728a7..eaba6ecc2adb75ca5c8f3de2c727cb6bd430c114 100644 (file)
@@ -80,7 +80,7 @@ static long list_size;
 
 static void domain_remove_dev_info(struct dmar_domain *domain);
 
-static int dmar_disabled;
+int dmar_disabled;
 static int __initdata dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
index ad9cd6d49bfc9bc624747709da188301b35f5b55..56075320b81383db36b063955e7e28d597d2ab67 100644 (file)
@@ -9,10 +9,11 @@
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 #include <asm/swiotlb.h>
+#include <asm-generic/dma-coherent.h>
 
 extern dma_addr_t bad_dma_address;
 extern int iommu_merge;
-extern struct device fallback_dev;
+extern struct device x86_dma_fallback_dev;
 extern int panic_on_overflow;
 extern int force_iommu;
 
@@ -25,9 +26,6 @@ struct dma_mapping_ops {
                                void *vaddr, dma_addr_t dma_handle);
        dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
                                size_t size, int direction);
-       /* like map_single, but doesn't check the device mask */
-       dma_addr_t      (*map_simple)(struct device *hwdev, phys_addr_t ptr,
-                               size_t size, int direction);
        void            (*unmap_single)(struct device *dev, dma_addr_t addr,
                                size_t size, int direction);
        void            (*sync_single_for_cpu)(struct device *hwdev,
@@ -87,13 +85,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle);
-
+#define dma_is_consistent(d, h)        (1)
 
 extern int dma_supported(struct device *hwdev, u64 mask);
 extern int dma_set_mask(struct device *dev, u64 mask);
@@ -247,7 +239,68 @@ static inline int dma_get_cache_alignment(void)
        return boot_cpu_data.x86_clflush_size;
 }
 
-#define dma_is_consistent(d, h)        (1)
+static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
+                                                   gfp_t gfp)
+{
+       unsigned long dma_mask = 0;
+
+       dma_mask = dev->coherent_dma_mask;
+       if (!dma_mask)
+               dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
+
+       return dma_mask;
+}
+
+static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
+{
+#ifdef CONFIG_X86_64
+       unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+
+       if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
+               gfp |= GFP_DMA32;
+#endif
+       return gfp;
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp)
+{
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+       void *memory;
+
+       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+               return memory;
+
+       if (!dev) {
+               dev = &x86_dma_fallback_dev;
+               gfp |= GFP_DMA;
+       }
+
+       if (!dev->dma_mask)
+               return NULL;
+
+       if (!ops->alloc_coherent)
+               return NULL;
+
+       return ops->alloc_coherent(dev, size, dma_handle,
+                                  dma_alloc_coherent_gfp_flags(dev, gfp));
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+                                    void *vaddr, dma_addr_t bus)
+{
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
+       WARN_ON(irqs_disabled());       /* for portability */
+
+       if (dma_release_from_coherent(dev, get_order(size), vaddr))
+               return;
+
+       if (ops->free_coherent)
+               ops->free_coherent(dev, size, vaddr, bus);
+}
 
-#include <asm-generic/dma-coherent.h>
 #endif
index 5f888cc5be49a0eb87fab02d32ec050a686d0387..621a1af94c4c1bc0fc1840b1e4ac95c95364e7f6 100644 (file)
@@ -6,6 +6,7 @@ extern void no_iommu_init(void);
 extern struct dma_mapping_ops nommu_dma_ops;
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
+extern int dmar_disabled;
 
 extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
 
index f1624b3967548fc116211aee27114e65e91f5d36..2886b0eb53ec5d9efbf09512608de851bd29d5a1 100644 (file)
 #define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP        0x1101
 #define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102
 #define PCI_DEVICE_ID_AMD_K8_NB_MISC   0x1103
+#define PCI_DEVICE_ID_AMD_11H_NB_HT    0x1300
+#define PCI_DEVICE_ID_AMD_11H_NB_MAP   0x1301
+#define PCI_DEVICE_ID_AMD_11H_NB_DRAM  0x1302
+#define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
+#define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
 #define PCI_DEVICE_ID_AMD_SCSI         0x2020
index c1d4d5b4c61ccb6fb95262fc34c2166158e00cd3..f013a0c2e111b09b2045d600d3d3c9944b3ffe9a 100644 (file)
@@ -124,6 +124,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
        }
        return (mem != NULL);
 }
+EXPORT_SYMBOL(dma_alloc_from_coherent);
 
 /**
  * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
@@ -151,3 +152,4 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
        }
        return 0;
 }
+EXPORT_SYMBOL(dma_release_from_coherent);
index 977edbdbc1debada5937958f5835b1dc88c9beba..b5f5d11330420fb502ca13c2acba6848f1ca17a7 100644 (file)
@@ -283,6 +283,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
        return (addr & ~mask) != 0;
 }
 
+static int is_swiotlb_buffer(char *addr)
+{
+       return addr >= io_tlb_start && addr < io_tlb_end;
+}
+
 /*
  * Allocates bounce buffer and returns its kernel virtual address.
  */
@@ -467,13 +472,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
        void *ret;
        int order = get_order(size);
 
-       /*
-        * XXX fix me: the DMA API should pass us an explicit DMA mask
-        * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
-        * bit range instead of a 16MB one).
-        */
-       flags |= GFP_DMA;
-
        ret = (void *)__get_free_pages(flags, order);
        if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
                /*
@@ -490,12 +488,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                 * swiotlb_map_single(), which will grab memory from
                 * the lowest available address range.
                 */
-               dma_addr_t handle;
-               handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
-               if (swiotlb_dma_mapping_error(hwdev, handle))
+               ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+               if (!ret)
                        return NULL;
-
-               ret = bus_to_virt(handle);
        }
 
        memset(ret, 0, size);
@@ -518,12 +513,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
                      dma_addr_t dma_handle)
 {
        WARN_ON(irqs_disabled());
-       if (!(vaddr >= (void *)io_tlb_start
-                    && vaddr < (void *)io_tlb_end))
+       if (!is_swiotlb_buffer(vaddr))
                free_pages((unsigned long) vaddr, get_order(size));
        else
                /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
-               swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+               unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
 }
 
 static void
@@ -612,7 +606,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
        char *dma_addr = bus_to_virt(dev_addr);
 
        BUG_ON(dir == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+       if (is_swiotlb_buffer(dma_addr))
                unmap_single(hwdev, dma_addr, size, dir);
        else if (dir == DMA_FROM_DEVICE)
                dma_mark_clean(dma_addr, size);
@@ -642,7 +636,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
        char *dma_addr = bus_to_virt(dev_addr);
 
        BUG_ON(dir == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+       if (is_swiotlb_buffer(dma_addr))
                sync_single(hwdev, dma_addr, size, dir, target);
        else if (dir == DMA_FROM_DEVICE)
                dma_mark_clean(dma_addr, size);
@@ -673,7 +667,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
        char *dma_addr = bus_to_virt(dev_addr) + offset;
 
        BUG_ON(dir == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+       if (is_swiotlb_buffer(dma_addr))
                sync_single(hwdev, dma_addr, size, dir, target);
        else if (dir == DMA_FROM_DEVICE)
                dma_mark_clean(dma_addr, size);