]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/arm/mm/mmu.c
[ARM] 5422/1: ARM: MMU: add a Non-cacheable Normal executable memory type
[linux-2.6-omap-h63xx.git] / arch / arm / mm / mmu.c
index c0b9a78d7b87eb2c38622656cad3a933bce535ea..aa424e1da8a1bf2ac8397adaeb223b53626277af 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/cputype.h>
 #include <asm/mach-types.h>
+#include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
 #include <asm/tlb.h>
@@ -242,6 +243,10 @@ static struct mem_type mem_types[] = {
                .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_NONCACHED] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -405,9 +410,28 @@ static void __init build_mem_type_table(void)
                kern_pgprot |= L_PTE_SHARED;
                vecs_pgprot |= L_PTE_SHARED;
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 #endif
        }
 
+       /*
+        * Non-cacheable Normal - intended for memory areas that must
+        * not cause dirty cache line writebacks when used
+        */
+       if (cpu_arch >= CPU_ARCH_ARMv6) {
+               if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+                       /* Non-cacheable Normal is XCB = 001 */
+                       mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+                               PMD_SECT_BUFFERED;
+               } else {
+                       /* For both ARMv6 and non-TEX-remapping ARMv7 */
+                       mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+                               PMD_SECT_TEX(1);
+               }
+       } else {
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+       }
+
        for (i = 0; i < 16; i++) {
                unsigned long v = pgprot_val(protection_map[i]);
                protection_map[i] = __pgprot(v | user_pgprot);
@@ -730,7 +754,7 @@ static inline void prepare_page_table(void)
 
 #ifdef CONFIG_XIP_KERNEL
        /* The XIP kernel is mapped in the module area -- skip over it */
-       addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+       addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
 #endif
        for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
                pmd_clear(pmd_off_k(addr));
@@ -756,10 +780,10 @@ void __init reserve_node_zero(pg_data_t *pgdat)
         * Note that this can only be in node 0.
         */
 #ifdef CONFIG_XIP_KERNEL
-       reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start,
+       reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
                        BOOTMEM_DEFAULT);
 #else
-       reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext,
+       reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
                        BOOTMEM_DEFAULT);
 #endif
 
@@ -826,7 +850,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         * Allocate the vector page early.
         */
        vectors = alloc_bootmem_low_pages(PAGE_SIZE);
-       BUG_ON(!vectors);
 
        for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
                pmd_clear(pmd_off_k(addr));
@@ -838,7 +861,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 #ifdef CONFIG_XIP_KERNEL
        map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
        map.virtual = MODULES_VADDR;
-       map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+       map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
        map.type = MT_ROM;
        create_mapping(&map);
 #endif
@@ -911,10 +934,10 @@ void __init paging_init(struct machine_desc *mdesc)
        top_pmd = pmd_off_k(0xffff0000);
 
        /*
-        * allocate the zero page.  Note that we count on this going ok.
+        * allocate the zero page.  Note that this always succeeds and
+        * returns a zeroed result.
         */
        zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
-       memset(zero_page, 0, PAGE_SIZE);
        empty_zero_page = virt_to_page(zero_page);
        flush_dcache_page(empty_zero_page);
 }