]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/powerpc/mm/hash_utils_64.c
powerpc: Make the 64-bit kernel as a position-independent executable
[linux-2.6-omap-h63xx.git] / arch / powerpc / mm / hash_utils_64.c
index 5ce5a4dcd00823c17447737f18ad5ad03c820185..09db4efe19211d4e97e3212d4447c5e731ee2a49 100644 (file)
@@ -151,39 +151,58 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
        },
 };
 
+static unsigned long htab_convert_pte_flags(unsigned long pteflags)
+{
+       unsigned long rflags = pteflags & 0x1fa;
+
+       /* _PAGE_EXEC -> NOEXEC */
+       if ((pteflags & _PAGE_EXEC) == 0)
+               rflags |= HPTE_R_N;
+
+       /* PP bits. PAGE_USER is already PP bit 0x2, so we only
+        * need to add in 0x1 if it's a read-only user page
+        */
+       if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
+                                        (pteflags & _PAGE_DIRTY)))
+               rflags |= 1;
+
+       /* Always add C */
+       return rflags | HPTE_R_C;
+}
 
 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
-                     unsigned long pstart, unsigned long mode,
+                     unsigned long pstart, unsigned long prot,
                      int psize, int ssize)
 {
        unsigned long vaddr, paddr;
        unsigned int step, shift;
-       unsigned long tmp_mode;
        int ret = 0;
 
        shift = mmu_psize_defs[psize].shift;
        step = 1 << shift;
 
+       prot = htab_convert_pte_flags(prot);
+
+       DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
+           vstart, vend, pstart, prot, psize, ssize);
+
        for (vaddr = vstart, paddr = pstart; vaddr < vend;
             vaddr += step, paddr += step) {
                unsigned long hash, hpteg;
                unsigned long vsid = get_kernel_vsid(vaddr, ssize);
                unsigned long va = hpt_va(vaddr, vsid, ssize);
+               unsigned long tprot = prot;
 
-               tmp_mode = mode;
-               
-               /* Make non-kernel text non-executable */
-               if (!in_kernel_text(vaddr))
-                       tmp_mode = mode | HPTE_R_N;
+               /* Make kernel text executable */
+               if (overlaps_kernel_text(vaddr, vaddr + step))
+                       tprot &= ~HPTE_R_N;
 
                hash = hpt_hash(va, shift, ssize);
                hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
-               DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
-
                BUG_ON(!ppc_md.hpte_insert);
-               ret = ppc_md.hpte_insert(hpteg, va, paddr,
-                               tmp_mode, HPTE_V_BOLTED, psize, ssize);
+               ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
+                                        HPTE_V_BOLTED, psize, ssize);
 
                if (ret < 0)
                        break;
@@ -329,6 +348,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
        return 0;
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
 /* Scan for 16G memory blocks that have been set aside for huge pages
  * and reserve those blocks for 16G huge pages.
  */
@@ -366,6 +386,7 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
        add_gpage(phys_addr, block_size, expected_pages);
        return 0;
 }
+#endif /* CONFIG_HUGETLB_PAGE */
 
 static void __init htab_init_page_sizes(void)
 {
@@ -519,9 +540,9 @@ static unsigned long __init htab_get_table_size(void)
 #ifdef CONFIG_MEMORY_HOTPLUG
 void create_section_mapping(unsigned long start, unsigned long end)
 {
-               BUG_ON(htab_bolt_mapping(start, end, __pa(start),
-                       _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
-                       mmu_linear_psize, mmu_kernel_ssize));
+       BUG_ON(htab_bolt_mapping(start, end, __pa(start),
+                                PAGE_KERNEL, mmu_linear_psize,
+                                mmu_kernel_ssize));
 }
 
 int remove_section_mapping(unsigned long start, unsigned long end)
@@ -570,7 +591,7 @@ void __init htab_initialize(void)
 {
        unsigned long table;
        unsigned long pteg_count;
-       unsigned long mode_rw;
+       unsigned long prot;
        unsigned long base = 0, size = 0, limit;
        int i;
 
@@ -628,7 +649,7 @@ void __init htab_initialize(void)
                mtspr(SPRN_SDR1, _SDR1);
        }
 
-       mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
+       prot = PAGE_KERNEL;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -647,7 +668,8 @@ void __init htab_initialize(void)
                base = (unsigned long)__va(lmb.memory.region[i].base);
                size = lmb.memory.region[i].size;
 
-               DBG("creating mapping for region: %lx : %lx\n", base, size);
+               DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
+                   base, size, prot);
 
 #ifdef CONFIG_U3_DART
                /* Do not map the DART space. Fortunately, it will be aligned
@@ -664,21 +686,21 @@ void __init htab_initialize(void)
                        unsigned long dart_table_end = dart_tablebase + 16 * MB;
                        if (base != dart_tablebase)
                                BUG_ON(htab_bolt_mapping(base, dart_tablebase,
-                                                       __pa(base), mode_rw,
+                                                       __pa(base), prot,
                                                        mmu_linear_psize,
                                                        mmu_kernel_ssize));
                        if ((base + size) > dart_table_end)
                                BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
                                                        base + size,
                                                        __pa(dart_table_end),
-                                                        mode_rw,
+                                                        prot,
                                                         mmu_linear_psize,
                                                         mmu_kernel_ssize));
                        continue;
                }
 #endif /* CONFIG_U3_DART */
                BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
-                               mode_rw, mmu_linear_psize, mmu_kernel_ssize));
+                               prot, mmu_linear_psize, mmu_kernel_ssize));
        }
 
        /*
@@ -696,7 +718,7 @@ void __init htab_initialize(void)
                        tce_alloc_start = base + size + 1;
 
                BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
-                                        __pa(tce_alloc_start), mode_rw,
+                                        __pa(tce_alloc_start), prot,
                                         mmu_linear_psize, mmu_kernel_ssize));
        }
 
@@ -1117,8 +1139,7 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
        unsigned long hash, hpteg;
        unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
        unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
-       unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
-               _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
+       unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
        int ret;
 
        hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);