]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/arm/mm/fault-armv.c
[ARM] Introduce new PTE memory type bits
[linux-2.6-omap-h63xx.git] / arch / arm / mm / fault-armv.c
index 44558d5f9313c0e22d476bc07cd995c466666e66..6f92904a81e9170e0e49678a7f2ac3150a37c88f 100644 (file)
@@ -21,7 +21,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
+static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
 
 /*
  * We take the easy way out of this problem - we make the
@@ -37,7 +37,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte, entry;
-       int ret = 0;
+       int ret;
 
        pgd = pgd_offset(vma->vm_mm, address);
        if (pgd_none(*pgd))
@@ -54,16 +54,21 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
        pte = pte_offset_map(pmd, address);
        entry = *pte;
 
+       /*
+        * If this page is present, it's actually being shared.
+        */
+       ret = pte_present(entry);
+
        /*
         * If this page isn't present, or is already setup to
         * fault (ie, is old), we can safely ignore any issues.
         */
-       if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
+       if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
                flush_cache_page(vma, address, pte_pfn(entry));
-               pte_val(entry) &= ~shared_pte_mask;
+               pte_val(entry) &= ~L_PTE_MT_MASK;
+               pte_val(entry) |= shared_pte_mask;
                set_pte_at(vma->vm_mm, address, pte, entry);
                flush_tlb_page(vma, address);
-               ret = 1;
        }
        pte_unmap(pte);
        return ret;
@@ -144,13 +149,17 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
        page = pfn_to_page(pfn);
        mapping = page_mapping(page);
        if (mapping) {
+#ifndef CONFIG_SMP
                int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 
                if (dirty)
                        __flush_dcache_page(mapping, page);
+#endif
 
                if (cache_is_vivt())
                        make_coherent(mapping, vma, addr, pfn);
+               else if (vma->vm_flags & VM_EXEC)
+                       __flush_icache_all();
        }
 }
 
@@ -189,7 +198,7 @@ void __init check_writebuffer_bugs(void)
                unsigned long *p1, *p2;
                pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
                                         L_PTE_DIRTY|L_PTE_WRITE|
-                                        L_PTE_BUFFERABLE);
+                                        L_PTE_MT_BUFFERABLE);
 
                p1 = vmap(&page, 1, VM_IOREMAP, prot);
                p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -210,7 +219,7 @@ void __init check_writebuffer_bugs(void)
 
        if (v) {
                printk("failed, %s\n", reason);
-               shared_pte_mask |= L_PTE_BUFFERABLE;
+               shared_pte_mask = L_PTE_MT_UNCACHED;
        } else {
                printk("ok\n");
        }