]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
drm: fix for non-coherent DMA PowerPC
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Fri, 28 Mar 2008 21:23:07 +0000 (14:23 -0700)
committerDave Airlie <airlied@linux.ie>
Sat, 29 Mar 2008 21:57:57 +0000 (07:57 +1000)
This patch fixes bits of the DRM so to make the radeon DRI work on
non-cache coherent PCI DMA variants of the PowerPC processors.

It moves the few places that needs change to wrappers to that
other architectures with similar issues can easily add their
own changes to those wrappers, at least until we have more useful
generic kernel API.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Airlie <airlied@linux.ie>
drivers/char/drm/ati_pcigart.c
drivers/char/drm/drm_scatter.c
drivers/char/drm/drm_vm.c

index 35d25d821c38715c8f0dc659ad8c592b84f0a6d5..141f4dfa0a117cdd6639abab27424bc529fae053 100644 (file)
@@ -168,6 +168,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
                }
        }
 
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+               dma_sync_single_for_device(&dev->pdev->dev,
+                                          bus_address,
+                                          max_pages * sizeof(u32),
+                                          PCI_DMA_TODEVICE);
+
        ret = 1;
 
 #if defined(__i386__) || defined(__x86_64__)
index 26d8f675ed5df8e785900b7fb9e8f09c0e7aff17..b2b0f3d4171492de22ebb6949b9bb0ce5f9d65c6 100644 (file)
 
 #define DEBUG_SCATTER 0
 
+static inline void *drm_vmalloc_dma(unsigned long size)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+#else
+       return vmalloc_32(size);
+#endif
+}
+
 void drm_sg_cleanup(struct drm_sg_mem * entry)
 {
        struct page *page;
@@ -104,7 +113,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
        }
        memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
 
-       entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
+       entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
        if (!entry->virtual) {
                drm_free(entry->busaddr,
                         entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
index 3d65c4dcd0c661d9aa21fb9effdc5149c8eac005..945df72a51a9f2df78aac8f9a1c128ac85016a0e 100644 (file)
@@ -54,13 +54,24 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
        pgprot_val(tmp) |= _PAGE_NO_CACHE;
        if (map_type == _DRM_REGISTERS)
                pgprot_val(tmp) |= _PAGE_GUARDED;
-#endif
-#if defined(__ia64__)
+#elif defined(__ia64__)
        if (efi_range_is_wc(vma->vm_start, vma->vm_end -
                                    vma->vm_start))
                tmp = pgprot_writecombine(tmp);
        else
                tmp = pgprot_noncached(tmp);
+#elif defined(__sparc__)
+       tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+       tmp |= _PAGE_NO_CACHE;
 #endif
        return tmp;
 }
@@ -603,9 +614,6 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                offset = dev->driver->get_reg_ofs(dev);
                vma->vm_flags |= VM_IO; /* not in core dump */
                vma->vm_page_prot = drm_io_prot(map->type, vma);
-#ifdef __sparc__
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
                if (io_remap_pfn_range(vma, vma->vm_start,
                                       (map->offset + offset) >> PAGE_SHIFT,
                                       vma->vm_end - vma->vm_start,
@@ -624,6 +632,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                    page_to_pfn(virt_to_page(map->handle)),
                    vma->vm_end - vma->vm_start, vma->vm_page_prot))
                        return -EAGAIN;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
        /* fall through to _DRM_SHM */
        case _DRM_SHM:
                vma->vm_ops = &drm_vm_shm_ops;
@@ -631,6 +640,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                /* Don't let this area swap.  Change when
                   DRM_KERNEL advisory is supported. */
                vma->vm_flags |= VM_RESERVED;
+               vma->vm_page_prot = drm_dma_prot(map->type, vma);
                break;
        case _DRM_SCATTER_GATHER:
                vma->vm_ops = &drm_vm_sg_ops;