dump_cache("cache", cpu, CACHE_ISIZE(info));
                }
        }
+
+       if (arch_is_coherent())
+               printk("Cache coherency enabled\n");
 }
 
 int cpu_architecture(void)
 
                *IXP23XX_PCI_CPP_ADDR_BITS &= ~(1 << 1);
        } else {
                *IXP23XX_PCI_CPP_ADDR_BITS |= (1 << 1);
+
+               /*
+                * Enable coherency on A2 silicon.
+                */
+               if (arch_is_coherent())
+                       *IXP23XX_CPP2XSI_CURR_XFER_REG3 &= ~IXP23XX_CPP2XSI_COH_OFF;
        }
 }
 
 
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 
+#include <asm/memory.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
 {
+       if (arch_is_coherent()) {
+               void *virt;
+
+               virt = kmalloc(size, gfp);
+               if (!virt)
+                       return NULL;
+               *handle =  virt_to_dma(dev, virt);
+
+               return virt;
+       }
+
        return __dma_alloc(dev, size, handle, gfp,
                           pgprot_noncached(pgprot_kernel));
 }
 
        WARN_ON(irqs_disabled());
 
+       if (arch_is_coherent()) {
+               kfree(cpu_addr);
+               return;
+       }
+
        size = PAGE_ALIGN(size);
 
        spin_lock_irqsave(&consistent_lock, flags);
 
        cp = &cache_policies[cachepolicy];
        kern_pgprot = user_pgprot = cp->pte;
 
+       /*
+        * Enable CPU-specific coherency if supported.
+        * (Only available on XSC3 at the moment.)
+        */
+       if (arch_is_coherent()) {
+               if (cpu_is_xsc3()) {
+                       mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
+               }
+       }
+
        /*
         * ARMv6 and above have extended page tables.
         */
 
 ENTRY(cpu_xsc3_set_pte)
        str     r1, [r0], #-2048                @ linux version
 
-       bic     r2, r1, #0xff0
+       bic     r2, r1, #0xdf0                  @ Keep C, B, coherency bits
        orr     r2, r2, #PTE_TYPE_EXT           @ extended page
 
        eor     r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
 
  *             to an address that the kernel can use.
  */
 #ifndef __ASSEMBLY__
+#include <asm/mach-types.h>
 
 #define __virt_to_bus(v)                                               \
        ({ unsigned int ret;                                            \
        data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR);                \
         __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); })
 
+/*
+ * Coherency support.  Only supported on A2 CPUs or on A1
+ * systems that have the cache coherency workaround.
+ */
+static inline int __ixp23xx_arch_is_coherent(void)
+{
+       extern unsigned int processor_id;
+
+       if (((processor_id & 15) >= 2) || machine_is_roadrunner())
+               return 1;
+
+       return 0;
+}
+
+#define arch_is_coherent()     __ixp23xx_arch_is_coherent()
+
 #endif
 
 
 
 
 static inline int dma_is_consistent(dma_addr_t handle)
 {
-       return 0;
+       return !!arch_is_coherent();
 }
 
 /*
 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
               enum dma_data_direction dir)
 {
-       consistent_sync(cpu_addr, size, dir);
+       if (!arch_is_coherent())
+               consistent_sync(cpu_addr, size, dir);
+
        return virt_to_dma(dev, (unsigned long)cpu_addr);
 }
 #else
 
                sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
                virt = page_address(sg->page) + sg->offset;
-               consistent_sync(virt, sg->length, dir);
+
+               if (!arch_is_coherent())
+                       consistent_sync(virt, sg->length, dir);
        }
 
        return nents;
 dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
                        enum dma_data_direction dir)
 {
-       consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+       if (!arch_is_coherent())
+               consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
 }
 
 static inline void
 dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
                           enum dma_data_direction dir)
 {
-       consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+       if (!arch_is_coherent())
+               consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
 }
 #else
 extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
 
        for (i = 0; i < nents; i++, sg++) {
                char *virt = page_address(sg->page) + sg->offset;
-               consistent_sync(virt, sg->length, dir);
+               if (!arch_is_coherent())
+                       consistent_sync(virt, sg->length, dir);
        }
 }
 
 
        for (i = 0; i < nents; i++, sg++) {
                char *virt = page_address(sg->page) + sg->offset;
-               consistent_sync(virt, sg->length, dir);
+               if (!arch_is_coherent())
+                       consistent_sync(virt, sg->length, dir);
        }
 }
 #else
 
 #define virt_to_dma(dev, addr)         (__arch_virt_to_dma(dev, addr))
 #endif
 
+/*
+ * Optional coherency support.  Currently used only by selected
+ * Intel XSC3-based systems.
+ */
+#ifndef arch_is_coherent
+#define arch_is_coherent()             0
+#endif
+
 #endif
 
 #include <asm-generic/memory_model.h>
 
 #define PTE_EXT_AP_URW_SRW     (PTE_EXT_AP1|PTE_EXT_AP0)
 #define PTE_EXT_TEX(x)         ((x) << 6)      /* v5 */
 #define PTE_EXT_APX            (1 << 9)        /* v6 */
+#define PTE_EXT_COHERENT       (1 << 9)        /* XScale3 */
 #define PTE_EXT_SHARED         (1 << 10)       /* v6 */
 #define PTE_EXT_NG             (1 << 11)       /* v6 */
 
 
 #define L_PTE_WRITE            (1 << 5)
 #define L_PTE_EXEC             (1 << 6)
 #define L_PTE_DIRTY            (1 << 7)
+#define L_PTE_COHERENT         (1 << 9)        /* I/O coherent (xsc3) */
 #define L_PTE_SHARED           (1 << 10)       /* shared between CPUs (v6) */
 #define L_PTE_ASID             (1 << 11)       /* non-global (use ASID, v6) */