]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/pci/intel-iommu.c
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[linux-2.6-omap-h63xx.git] / drivers / pci / intel-iommu.c
index 25fc1df486bbad54799c67538fd0521470df9346..9dbd5066acafee0594b7d20938822d6f7a3f3dbd 100644 (file)
@@ -1782,7 +1782,7 @@ static inline void iommu_prepare_isa(void)
        ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
 
        if (ret)
-               printk("IOMMU: Failed to create 0-64M identity map, "
+               printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
                        "floppy might not work\n");
 
 }
@@ -1855,11 +1855,40 @@ static int __init init_dmars(void)
                }
        }
 
+       /*
+        * Start from the sane iommu hardware state.
+        */
        for_each_drhd_unit(drhd) {
                if (drhd->ignored)
                        continue;
 
                iommu = drhd->iommu;
+
+               /*
+                * If the queued invalidation is already initialized by us
+                * (for example, while enabling interrupt-remapping) then
+                * we got the things already rolling from a sane state.
+                */
+               if (iommu->qi)
+                       continue;
+
+               /*
+                * Clear any previous faults.
+                */
+               dmar_fault(-1, iommu);
+               /*
+                * Disable queued invalidation if supported and already enabled
+                * before OS handover.
+                */
+               dmar_disable_qi(iommu);
+       }
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               iommu = drhd->iommu;
+
                if (dmar_enable_qi(iommu)) {
                        /*
                         * Queued Invalidate not enabled, use Register Based
@@ -2095,11 +2124,13 @@ error:
        return 0;
 }
 
-dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
-                           size_t size, int dir)
+static dma_addr_t intel_map_page(struct device *dev, struct page *page,
+                                unsigned long offset, size_t size,
+                                enum dma_data_direction dir,
+                                struct dma_attrs *attrs)
 {
-       return __intel_map_single(hwdev, paddr, size, dir,
-                                 to_pci_dev(hwdev)->dma_mask);
+       return __intel_map_single(dev, page_to_phys(page) + offset, size,
+                                 dir, to_pci_dev(dev)->dma_mask);
 }
 
 static void flush_unmaps(void)
@@ -2163,8 +2194,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
        spin_unlock_irqrestore(&async_umap_flush_lock, flags);
 }
 
-void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
-                       int dir)
+static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
+                            size_t size, enum dma_data_direction dir,
+                            struct dma_attrs *attrs)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct dmar_domain *domain;
@@ -2208,8 +2240,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
        }
 }
 
-void *intel_alloc_coherent(struct device *hwdev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flags)
+static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+                              int dir)
+{
+       intel_unmap_page(dev, dev_addr, size, dir, NULL);
+}
+
+static void *intel_alloc_coherent(struct device *hwdev, size_t size,
+                                 dma_addr_t *dma_handle, gfp_t flags)
 {
        void *vaddr;
        int order;
@@ -2232,8 +2270,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
        return NULL;
 }
 
-void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-                        dma_addr_t dma_handle)
+static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+                               dma_addr_t dma_handle)
 {
        int order;
 
@@ -2246,8 +2284,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 
 #define SG_ENT_VIRT_ADDRESS(sg)        (sg_virt((sg)))
 
-void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
-                   int nelems, int dir)
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+                          int nelems, enum dma_data_direction dir,
+                          struct dma_attrs *attrs)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2304,8 +2343,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
        return nelems;
 }
 
-int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
-                int dir)
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+                       enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        void *addr;
        int i;
@@ -2385,13 +2424,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
        return nelems;
 }
 
-static struct dma_mapping_ops intel_dma_ops = {
+static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return !dma_addr;
+}
+
+struct dma_map_ops intel_dma_ops = {
        .alloc_coherent = intel_alloc_coherent,
        .free_coherent = intel_free_coherent,
-       .map_single = intel_map_single,
-       .unmap_single = intel_unmap_single,
        .map_sg = intel_map_sg,
        .unmap_sg = intel_unmap_sg,
+       .map_page = intel_map_page,
+       .unmap_page = intel_unmap_page,
+       .mapping_error = intel_mapping_error,
 };
 
 static inline int iommu_domain_cache_init(void)