]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
sh: Preparation for uncached jumps through PMB.
authorStuart Menefy <stuart.menefy@st.com>
Fri, 30 Nov 2007 08:06:36 +0000 (17:06 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 28 Jan 2008 04:18:59 +0000 (13:18 +0900)
Presently most of the 29-bit physical parts do P1/P2 segmentation
with a 1:1 cached/uncached mapping, jumping between the two to
control the caching behaviour. This provides the basic infrastructure
to maintain this behaviour on 32-bit physical parts that don't map
P1/P2 at all, using a shiny new linker section and corresponding
fixmap entry.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
14 files changed:
arch/sh/kernel/cpu/init.c
arch/sh/kernel/cpu/sh3/probe.c
arch/sh/kernel/vmlinux_32.lds.S
arch/sh/mm/cache-debugfs.c
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh7705.c
arch/sh/mm/init.c
arch/sh/mm/pmb.c
arch/sh/mm/tlb-sh4.c
include/asm-sh/fixmap.h
include/asm-sh/sections.h
include/asm-sh/system.h
include/asm-sh/system_32.h
include/asm-sh/system_64.h

index fd1688e6c61c392b8a9747d5add2db766013c938..0f0c76a842e43e3922510ad3d50869b23a507269 100644 (file)
@@ -64,11 +64,11 @@ static void __init speculative_execution_init(void)
  * Generic first-level cache init
  */
 #ifdef CONFIG_SUPERH32
-static void __init cache_init(void)
+static void __uses_jump_to_uncached cache_init(void)
 {
        unsigned long ccr, flags;
 
-       jump_to_P2();
+       jump_to_uncached();
        ccr = ctrl_inl(CCR);
 
        /*
@@ -145,7 +145,7 @@ static void __init cache_init(void)
 #endif
 
        ctrl_outl(flags, CCR);
-       back_to_P1();
+       back_to_cached();
 }
 #else
 #define cache_init()   do { } while (0)
index bf579e061e097a638f1aa03a3842cc118f578033..22070e43e34d6e8801d311e435178e42fd67c6fb 100644 (file)
 #include <asm/cache.h>
 #include <asm/io.h>
 
-int __init detect_cpu_and_cache_system(void)
+int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
 {
        unsigned long addr0, addr1, data0, data1, data2, data3;
 
-       jump_to_P2();
+       jump_to_uncached();
        /*
         * Check if the entry shadows or not.
         * When shadowed, it's 128-entry system.
@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
        ctrl_outl(data0&~SH_CACHE_VALID, addr0);
        ctrl_outl(data2&~SH_CACHE_VALID, addr1);
 
-       back_to_P1();
+       back_to_cached();
 
        boot_cpu_data.dcache.ways               = 4;
        boot_cpu_data.dcache.entry_shift        = 4;
index 0956fb3681a316dfa4cbc538326fff5c35724903..50c69c18dced77a3473c3d006d0fd6a065932741 100644 (file)
@@ -43,6 +43,15 @@ SECTIONS
        NOTES
        RO_DATA(PAGE_SIZE)
 
+       /*
+        * Code which must be executed uncached and the associated data
+        */
+       . = ALIGN(PAGE_SIZE);
+       __uncached_start = .;
+       .uncached.text : { *(.uncached.text) }
+       .uncached.data : { *(.uncached.data) }
+       __uncached_end = .;
+
        . = ALIGN(THREAD_SIZE);
        .data : {                       /* Data */
                *(.data.init_task)
index de6d2c9aa4773c1b1b989b66fd2e764524c8c03d..db6d950b6f5e2f768204982a4b2e2cc015429a68 100644 (file)
@@ -22,7 +22,8 @@ enum cache_type {
        CACHE_TYPE_UNIFIED,
 };
 
-static int cache_seq_show(struct seq_file *file, void *iter)
+static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file,
+                                                 void *iter)
 {
        unsigned int cache_type = (unsigned int)file->private;
        struct cache_info *cache;
@@ -34,11 +35,11 @@ static int cache_seq_show(struct seq_file *file, void *iter)
         * Go uncached immediately so we don't skew the results any
         * more than we already are..
         */
-       jump_to_P2();
+       jump_to_uncached();
 
        ccr = ctrl_inl(CCR);
        if ((ccr & CCR_CACHE_ENABLE) == 0) {
-               back_to_P1();
+               back_to_cached();
 
                seq_printf(file, "disabled\n");
                return 0;
@@ -104,7 +105,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
                addrstart += cache->way_incr;
        }
 
-       back_to_P1();
+       back_to_cached();
 
        return 0;
 }
index 226b190c5b9c29f045fdb5041ff6201c4263c493..43d7ff6b6ec7c71cfbaccc244d46bc6bcf0a7ce7 100644 (file)
@@ -190,7 +190,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
  * .. which happens to be the same behavior as flush_icache_range().
  * So, we simply flush out a line.
  */
-void flush_cache_sigtramp(unsigned long addr)
+void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
 {
        unsigned long v, index;
        unsigned long flags;
@@ -205,13 +205,13 @@ void flush_cache_sigtramp(unsigned long addr)
                        (v & boot_cpu_data.icache.entry_mask);
 
        local_irq_save(flags);
-       jump_to_P2();
+       jump_to_uncached();
 
        for (i = 0; i < boot_cpu_data.icache.ways;
             i++, index += boot_cpu_data.icache.way_incr)
                ctrl_outl(0, index);    /* Clear out Valid-bit */
 
-       back_to_P1();
+       back_to_cached();
        wmb();
        local_irq_restore(flags);
 }
@@ -256,12 +256,12 @@ void flush_dcache_page(struct page *page)
 }
 
 /* TODO: Selective icache invalidation through IC address array.. */
-static inline void flush_icache_all(void)
+static inline void __uses_jump_to_uncached flush_icache_all(void)
 {
        unsigned long flags, ccr;
 
        local_irq_save(flags);
-       jump_to_P2();
+       jump_to_uncached();
 
        /* Flush I-cache */
        ccr = ctrl_inl(CCR);
@@ -269,11 +269,11 @@ static inline void flush_icache_all(void)
        ctrl_outl(ccr, CCR);
 
        /*
-        * back_to_P1() will take care of the barrier for us, don't add
+        * back_to_cached() will take care of the barrier for us, don't add
         * another one!
         */
 
-       back_to_P1();
+       back_to_cached();
        local_irq_restore(flags);
 }
 
index 4896d737692616146f497492b8a399ac1ef7ae3d..22dacc7788236c96162b9df9e6d3483b04c9a67b 100644 (file)
@@ -71,7 +71,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
 /*
  * Writeback&Invalidate the D-cache of the page
  */
-static void __flush_dcache_page(unsigned long phys)
+static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
 {
        unsigned long ways, waysize, addrstart;
        unsigned long flags;
@@ -92,7 +92,7 @@ static void __flush_dcache_page(unsigned long phys)
         * possible.
         */
        local_irq_save(flags);
-       jump_to_P2();
+       jump_to_uncached();
 
        ways = current_cpu_data.dcache.ways;
        waysize = current_cpu_data.dcache.sets;
@@ -118,7 +118,7 @@ static void __flush_dcache_page(unsigned long phys)
                addrstart += current_cpu_data.dcache.way_incr;
        } while (--ways);
 
-       back_to_P1();
+       back_to_cached();
        local_irq_restore(flags);
 }
 
@@ -132,15 +132,15 @@ void flush_dcache_page(struct page *page)
                __flush_dcache_page(PHYSADDR(page_address(page)));
 }
 
-void flush_cache_all(void)
+void __uses_jump_to_uncached flush_cache_all(void)
 {
        unsigned long flags;
 
        local_irq_save(flags);
-       jump_to_P2();
+       jump_to_uncached();
 
        cache_wback_all();
-       back_to_P1();
+       back_to_cached();
        local_irq_restore(flags);
 }
 
index 79c309780f953a16bf65c621aae490ffe10e9c8d..094225e0d722d2dcd57f1c34abbbf948deff3e33 100644 (file)
@@ -23,6 +23,7 @@
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
+unsigned long cached_to_uncached = 0;
 
 void show_mem(void)
 {
@@ -99,7 +100,8 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 
        set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 
-       flush_tlb_one(get_asid(), addr);
+       if (cached_to_uncached)
+               flush_tlb_one(get_asid(), addr);
 }
 
 /*
@@ -164,6 +166,18 @@ void __init paging_init(void)
        }
 
        free_area_init_nodes(max_zone_pfns);
+
+       /* Set up the uncached fixmap */
+       set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
+
+#ifdef CONFIG_29BIT
+       /*
+        * Handle trivial transitions between cached and uncached
+        * segments, making use of the 1:1 mapping relationship in
+        * 512MB lowmem.
+        */
+       cached_to_uncached = P2SEG - P1SEG;
+#endif
 }
 
 static struct kcore_list kcore_mem, kcore_vmalloc;
index ef6ab39eaf656fd7ae410d317268b27b85e926b3..ab81c602295f063906c6de58b398325b2ccd49cc 100644 (file)
@@ -163,18 +163,18 @@ repeat:
        return 0;
 }
 
-int set_pmb_entry(struct pmb_entry *pmbe)
+int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
 {
        int ret;
 
-       jump_to_P2();
+       jump_to_uncached();
        ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
-       back_to_P1();
+       back_to_cached();
 
        return ret;
 }
 
-void clear_pmb_entry(struct pmb_entry *pmbe)
+void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
 {
        unsigned int entry = pmbe->entry;
        unsigned long addr;
@@ -188,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
                     entry >= NR_PMB_ENTRIES))
                return;
 
-       jump_to_P2();
+       jump_to_uncached();
 
        /* Clear V-bit */
        addr = mk_pmb_addr(entry);
@@ -197,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
        addr = mk_pmb_data(entry);
        ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
 
-       back_to_P1();
+       back_to_cached();
 
        clear_bit(entry, &pmb_map);
 }
@@ -302,7 +302,7 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
        pmbe->entry = PMB_NO_ENTRY;
 }
 
-static int __init pmb_init(void)
+static int __uses_jump_to_uncached pmb_init(void)
 {
        unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
        unsigned int entry, i;
@@ -312,7 +312,7 @@ static int __init pmb_init(void)
        pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
                                      SLAB_PANIC, pmb_cache_ctor);
 
-       jump_to_P2();
+       jump_to_uncached();
 
        /*
         * Ordering is important, P2 must be mapped in the PMB before we
@@ -335,7 +335,7 @@ static int __init pmb_init(void)
        i |= MMUCR_TI;
        ctrl_outl(i, MMUCR);
 
-       back_to_P1();
+       back_to_cached();
 
        return 0;
 }
index 2d1dd6044307e1d3b6049b74708c4e7e00aca7f9..f0c7b7397fa655804e59f161ffbf48ad37a801fd 100644 (file)
@@ -79,7 +79,8 @@ void update_mmu_cache(struct vm_area_struct * vma,
        local_irq_restore(flags);
 }
 
-void local_flush_tlb_one(unsigned long asid, unsigned long page)
+void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
+                                                unsigned long page)
 {
        unsigned long addr, data;
 
@@ -91,7 +92,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
         */
        addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
        data = page | asid; /* VALID bit is off */
-       jump_to_P2();
+       jump_to_uncached();
        ctrl_outl(data, addr);
-       back_to_P1();
+       back_to_cached();
 }
index 09463cd9bbb9389e27dbcb12ce72ac0c8370257e..721fcc4d5e98ed20a90d7ee187d17ada4944ee83 100644 (file)
@@ -49,6 +49,7 @@ enum fixed_addresses {
 #define FIX_N_COLOURS 16
        FIX_CMAP_BEGIN,
        FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
+       FIX_UNCACHED,
 #ifdef CONFIG_HIGHMEM
        FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
        FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
index bd9cbc967c2ab0548b64d080825fd27d4708b3df..8f8f4ad400dfd5a5b6d05d592abd96258810a3be 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm-generic/sections.h>
 
 extern long __machvec_start, __machvec_end;
+extern char __uncached_start, __uncached_end;
 extern char _ebss[];
 
 #endif /* __ASM_SH_SECTIONS_H */
index 969f3d4afe2ac2ee8b238cefad27824eeb0c777a..9bda8d063ecf70e3c9b7e95f957ec904c03523ea 100644 (file)
@@ -144,6 +144,8 @@ extern unsigned int instruction_size(unsigned int insn);
 #define instruction_size(insn) (4)
 #endif
 
+extern unsigned long cached_to_uncached;
+
 /* XXX
  * disable hlt during certain critical i/o operations
  */
index ad37e8d5f31e1605b6f14da2d6bf1cb94bfb0230..e918bacd5ecf1aa56034e99b9e386748a387f425 100644 (file)
@@ -58,29 +58,31 @@ do {                                                                \
        last = __last;                                          \
 } while (0)
 
+#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
+
 /*
- * Jump to P2 area.
- * When handling TLB or caches, we need to do it from P2 area.
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
  */
-#define jump_to_P2()                   \
-do {                                   \
-       unsigned long __dummy;          \
-       __asm__ __volatile__(           \
-               "mov.l  1f, %0\n\t"     \
-               "or     %1, %0\n\t"     \
-               "jmp    @%0\n\t"        \
-               " nop\n\t"              \
-               ".balign 4\n"           \
-               "1:     .long 2f\n"     \
-               "2:"                    \
-               : "=&r" (__dummy)       \
-               : "r" (0x20000000));    \
+#define jump_to_uncached()                     \
+do {                                           \
+       unsigned long __dummy;                  \
+                                               \
+       __asm__ __volatile__(                   \
+               "mova   1f, %0\n\t"             \
+               "add    %1, %0\n\t"             \
+               "jmp    @%0\n\t"                \
+               " nop\n\t"                      \
+               ".balign 4\n"                   \
+               "1:"                            \
+               : "=&z" (__dummy)               \
+               : "r" (cached_to_uncached));    \
 } while (0)
 
 /*
- * Back to P1 area.
+ * Back to cached area.
  */
-#define back_to_P1()                                   \
+#define back_to_cached()                               \
 do {                                                   \
        unsigned long __dummy;                          \
        ctrl_barrier();                                 \
index 0e466e991f7dcd959170432a8cddbe14a4f3e613..943acf5ea07c3a1056f847f969da98a3219e908a 100644 (file)
@@ -32,8 +32,9 @@ do {                                                          \
                              &next->thread);                   \
 } while (0)
 
-/* No segmentation.. */
-#define jump_to_P2()   do { } while (0)
-#define back_to_P1()   do { } while (0)
+#define __uses_jump_to_uncached
+
+#define jump_to_uncached()     do { } while (0)
+#define back_to_cached()       do { } while (0)
 
 #endif /* __ASM_SH_SYSTEM_64_H */