BLANK();
   DEFINE(PROC_INFO_SZ,         sizeof(struct proc_info_list));
   DEFINE(PROCINFO_INITFUNC,    offsetof(struct proc_info_list, __cpu_flush));
-  DEFINE(PROCINFO_MMUFLAGS,    offsetof(struct proc_info_list, __cpu_mmu_flags));
+  DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
+  DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags));
   return 0; 
 }
 
        teq     r0, r6
        bne     1b
 
-       ldr     r7, [r10, #PROCINFO_MMUFLAGS]   @ mmuflags
+       ldr     r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
 
        /*
         * Create identity mapping for first MB of kernel to
 #endif
 
 #ifdef CONFIG_DEBUG_LL
-       bic     r7, r7, #0x0c                   @ turn off cacheable
-                                               @ and bufferable bits
+       ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
        /*
         * Map in IO space for serial debugging.
         * This allows debug messages to be output
 
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_WRITE,
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
                                PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_IO,
        },
        [MT_CACHECLEAN] = {
-               .prot_sect = PMD_TYPE_SECT,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MINICLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_LOW_VECTORS] = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_ROM] = {
-               .prot_sect = PMD_TYPE_SECT,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_WRITE,
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
                                PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
                                PMD_SECT_TEX(1),
                .domain    = DOMAIN_IO,
        },
        [MT_NONSHARED_DEVICE] = {
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
                                PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_IO,
        }
                ecc_mask = 0;
        }
 
-       if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
-               for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+       /*
+        * Xscale must not have PMD bit 4 set for section mappings.
+        */
+       if (cpu_is_xscale())
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+                       mem_types[i].prot_sect &= ~PMD_BIT4;
+
+       /*
+        * ARMv5 and lower, excluding Xscale, bit 4 must be set for
+        * page tables.
+        */
+       if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
                        if (mem_types[i].prot_l1)
                                mem_types[i].prot_l1 |= PMD_BIT4;
-                       if (mem_types[i].prot_sect)
-                               mem_types[i].prot_sect |= PMD_BIT4;
-               }
-       }
 
        cp = &cache_policies[cachepolicy];
        kern_pgprot = user_pgprot = cp->pte;
                 * bit 4 becomes XN which we must clear for the
                 * kernel memory mapping.
                 */
-               mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
-               mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
+               mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
+               mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
 
                /*
                 * Mark cache clean areas and XIP ROM read only
 
 __arm1020_proc_info:
        .long   0x4104a200                      @ ARM 1020T (Architecture v5T)
        .long   0xff0ffff0
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
 
 __arm1020e_proc_info:
        .long   0x4105a200                      @ ARM 1020TE (Architecture v5TE)
        .long   0xff0ffff0
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
 
 __arm1022_proc_info:
        .long   0x4105a220                      @ ARM 1022E (v5TE)
        .long   0xff0ffff0
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
 
 __arm1026_proc_info:
        .long   0x4106a260                      @ ARM 1026EJ-S (v5TEJ)
        .long   0xff0ffff0
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
 
                .long   0x41560600
                .long   0xfffffff0
                .long   0x00000c1e
+               .long   PMD_TYPE_SECT | \
+                       PMD_BIT4 | \
+                       PMD_SECT_AP_WRITE | \
+                       PMD_SECT_AP_READ
                b       __arm6_setup
                .long   cpu_arch_name
                .long   cpu_elf_name
                .long   0x41560610
                .long   0xfffffff0
                .long   0x00000c1e
+               .long   PMD_TYPE_SECT | \
+                       PMD_BIT4 | \
+                       PMD_SECT_AP_WRITE | \
+                       PMD_SECT_AP_READ
                b       __arm6_setup
                .long   cpu_arch_name
                .long   cpu_elf_name
                .long   0x41007000
                .long   0xffffff00
                .long   0x00000c1e
+               .long   PMD_TYPE_SECT | \
+                       PMD_BIT4 | \
+                       PMD_SECT_AP_WRITE | \
+                       PMD_SECT_AP_READ
                b       __arm7_setup
                .long   cpu_arch_name
                .long   cpu_elf_name
                        PMD_BIT4 | \
                        PMD_SECT_AP_WRITE | \
                        PMD_SECT_AP_READ
+               .long   PMD_TYPE_SECT | \
+                       PMD_BIT4 | \
+                       PMD_SECT_AP_WRITE | \
+                       PMD_SECT_AP_READ
                b       __arm7_setup
                .long   cpu_arch_name
                .long   cpu_elf_name
 
                        PMD_BIT4 | \
                        PMD_SECT_AP_WRITE | \
                        PMD_SECT_AP_READ
+               .long   PMD_TYPE_SECT | \
+                       PMD_BIT4 | \
+                       PMD_SECT_AP_WRITE | \
+                       PMD_SECT_AP_READ
                b       __arm710_setup                          @ cpu_flush
                .long   cpu_arch_name                           @ arch_name
                .long   cpu_elf_name                            @ elf_name
                        PMD_BIT4 | \
                        PMD_SECT_AP_WRITE | \
                        PMD_SECT_AP_READ
+               .long   PMD_TYPE_SECT | \
+                       PMD_BIT4 | \
+                       PMD_SECT_AP_WRITE | \
+                       PMD_SECT_AP_READ
                b       __arm720_setup                          @ cpu_flush
                .long   cpu_arch_name                           @ arch_name
                .long   cpu_elf_name                            @ elf_name
 
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __arm920_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __arm922_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
 __arm925_proc_info:
        .long   0x54029250
        .long   0xfffffff0
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
 __arm915_proc_info:
        .long   0x54029150
        .long   0xfffffff0
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
 
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_BIT4 | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __arm926_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __sa110_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __sa1100_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __sa1100_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_XN | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __v6_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
 __xsc3_proc_info:
        .long   0x69056000
        .long   0xffffe000
-       .long   0x00000c0e
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_BUFFERABLE | \
+               PMD_SECT_CACHEABLE | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xsc3_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 __ixp46x_proc_info:
        .long   0x69054200
        .long   0xffffff00
-       .long   0x00000c0e
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_BUFFERABLE | \
+               PMD_SECT_CACHEABLE | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
                PMD_SECT_CACHEABLE | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
        b       __xscale_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
 
  */
 #define PMD_SECT_BUFFERABLE    (1 << 2)
 #define PMD_SECT_CACHEABLE     (1 << 3)
+#define PMD_SECT_XN            (1 << 4)        /* v6 */
 #define PMD_SECT_AP_WRITE      (1 << 10)
 #define PMD_SECT_AP_READ       (1 << 11)
 #define PMD_SECT_TEX(x)                ((x) << 12)     /* v5 */
 
 struct proc_info_list {
        unsigned int            cpu_val;
        unsigned int            cpu_mask;
-       unsigned long           __cpu_mmu_flags;        /* used by head.S */
+       unsigned long           __cpu_mm_mmu_flags;     /* used by head.S */
+       unsigned long           __cpu_io_mmu_flags;     /* used by head.S */
        unsigned long           __cpu_flush;            /* used by head.S */
        const char              *arch_name;
        const char              *elf_name;