]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[POWERPC] Use SLB size from the device tree
authorMichael Neuling <mikey@neuling.org>
Thu, 6 Dec 2007 06:24:48 +0000 (17:24 +1100)
committerPaul Mackerras <paulus@samba.org>
Tue, 11 Dec 2007 02:45:56 +0000 (13:45 +1100)
Currently we hardwire the number of SLBs to 64, but PAPR says we
should use the ibm,slb-size property to obtain the number of SLB
entries.  This uses this property instead of assuming 64.  If no
property is found, we assume 64 entries as before.

This soft patches the SLB handler, so it shouldn't change performance
at all.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/kernel/prom.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/slb.c
arch/powerpc/mm/slb_low.S
arch/powerpc/platforms/pasemi/setup.c
arch/powerpc/xmon/xmon.c
include/asm-powerpc/mmu-hash64.h
include/asm-powerpc/reg.h

index acc0d247d3c3a83920c92cb11f33639a86e8da1b..6c2d8836f77dcf9b87c80db13b738449ca1bcdeb 100644 (file)
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
                      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
 }
 
+#ifdef CONFIG_PPC64
+static void __init check_cpu_slb_size(unsigned long node)
+{
+       u32 *slb_size_ptr;
+
+       slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+       if (slb_size_ptr != NULL) {
+               mmu_slb_size = *slb_size_ptr;
+       }
+}
+#else
+#define check_cpu_slb_size(node) do { } while(0)
+#endif
+
 static struct feature_property {
        const char *name;
        u32 min_value;
@@ -713,6 +727,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
 
        check_cpu_feature_properties(node);
        check_cpu_pa_features(node);
+       check_cpu_slb_size(node);
 
 #ifdef CONFIG_PPC_PSERIES
        if (nthreads > 1)
index f09730bf3a335bc20de567bd5e596b727317faca..cbbd8b0bc8f4070d37243491be3e3a0025be88d6 100644 (file)
@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
 int mmu_io_psize = MMU_PAGE_4K;
 int mmu_kernel_ssize = MMU_SEGSIZE_256M;
 int mmu_highuser_ssize = MMU_SEGSIZE_256M;
+u16 mmu_slb_size = 64;
 #ifdef CONFIG_HUGETLB_PAGE
 int mmu_huge_psize = MMU_PAGE_16M;
 unsigned int HPAGE_SHIFT;
index 27922dff8b94e34614890f7764ce10c014b6fb65..3cf0802cd2b6b23078dc3d12aeb254480a563771 100644 (file)
@@ -256,6 +256,7 @@ void slb_initialize(void)
        static int slb_encoding_inited;
        extern unsigned int *slb_miss_kernel_load_linear;
        extern unsigned int *slb_miss_kernel_load_io;
+       extern unsigned int *slb_compare_rr_to_size;
 
        /* Prepare our SLB miss handler based on our page size */
        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -269,6 +270,8 @@ void slb_initialize(void)
                                   SLB_VSID_KERNEL | linear_llp);
                patch_slb_encoding(slb_miss_kernel_load_io,
                                   SLB_VSID_KERNEL | io_llp);
+               patch_slb_encoding(slb_compare_rr_to_size,
+                                  mmu_slb_size);
 
                DBG("SLB: linear  LLP = %04x\n", linear_llp);
                DBG("SLB: io      LLP = %04x\n", io_llp);
index 1328a81a84aa16d46bb11cb9ba207a6848354f07..657f6b37e9df58892fe4ece4a2a02eef51fe7d44 100644 (file)
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 
 7:     ld      r10,PACASTABRR(r13)
        addi    r10,r10,1
-       /* use a cpu feature mask if we ever change our slb size */
-       cmpldi  r10,SLB_NUM_ENTRIES
+       /* This gets soft patched on boot. */
+_GLOBAL(slb_compare_rr_to_size)
+       cmpldi  r10,0
 
        blt+    4f
        li      r10,SLB_NUM_BOLTED
index 5748194a667fcffa4f5d67c7eb65b987cd5e925f..6d7d068ceba099be37ea5ec6c2542a2838991ffc 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/mpic.h>
 #include <asm/smp.h>
 #include <asm/time.h>
+#include <asm/mmu.h>
 
 #include <pcmcia/ss.h>
 #include <pcmcia/cistpl.h>
@@ -302,7 +303,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
                int i;
 
                printk(KERN_ERR "slb contents:\n");
-               for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+               for (i = 0; i < mmu_slb_size; i++) {
                        asm volatile("slbmfee  %0,%1" : "=r" (e) : "r" (i));
                        asm volatile("slbmfev  %0,%1" : "=r" (v) : "r" (i));
                        printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
index 381d467cf55b80bc45963167cdd2c14105e16bea..c60d123e9f1f88dba595c6e29a90a6e17bd2c12b 100644 (file)
@@ -2543,7 +2543,7 @@ static void dump_slb(void)
 
        printf("SLB contents of cpu %x\n", smp_processor_id());
 
-       for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+       for (i = 0; i < mmu_slb_size; i++) {
                asm volatile("slbmfee  %0,%1" : "=r" (tmp) : "r" (i));
                printf("%02d %016lx ", i, tmp);
 
index 82328dec2b527d2ecdd05b0694ca2d8562c0558d..12e5e773c67e765b6c1efbc1bae9e2a44fba8641 100644 (file)
@@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize;
 extern int mmu_io_psize;
 extern int mmu_kernel_ssize;
 extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
 
 /*
  * If the processor supports 64k normal pages but not 64k cache
index e775ff1ca413e96ff5cf8b926f1b08c6105f58bf..1f685047c6ffe80c983a5bff36e53777206ff7bb 100644 (file)
 #define PV_BE          0x0070
 #define PV_PA6T                0x0090
 
-/*
- * Number of entries in the SLB. If this ever changes we should handle
- * it with a use a cpu feature fixup.
- */
-#define SLB_NUM_ENTRIES 64
-
 /* Macros for setting and retrieving special purpose registers */
 #ifndef __ASSEMBLY__
 #define mfmsr()                ({unsigned long rval; \