2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/bitops.h>
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cache.h>
20 #include <asm/cacheops.h>
22 #include <asm/cpu-features.h>
25 #include <asm/pgtable.h>
26 #include <asm/r4kcache.h>
27 #include <asm/system.h>
28 #include <asm/mmu_context.h>
30 #include <asm/cacheflush.h> /* for run_uncached() */
35 static unsigned long icache_size __read_mostly;
36 static unsigned long dcache_size __read_mostly;
37 static unsigned long scache_size __read_mostly;
40 * Dummy cache handling routines for machines without boardcaches
42 static void no_sc_noop(void) {}
44 static struct bcache_ops no_sc_ops = {
45 .bc_enable = (void *)no_sc_noop,
46 .bc_disable = (void *)no_sc_noop,
47 .bc_wback_inv = (void *)no_sc_noop,
48 .bc_inv = (void *)no_sc_noop
51 struct bcache_ops *bcops = &no_sc_ops;
53 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
54 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
56 #define R4600_HIT_CACHEOP_WAR_IMPL \
58 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
59 *(volatile unsigned long *)CKSEG1; \
60 if (R4600_V1_HIT_CACHEOP_WAR) \
61 __asm__ __volatile__("nop;nop;nop;nop"); \
64 static void (*r4k_blast_dcache_page)(unsigned long addr);
66 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
68 R4600_HIT_CACHEOP_WAR_IMPL;
69 blast_dcache32_page(addr);
72 static inline void r4k_blast_dcache_page_setup(void)
74 unsigned long dc_lsize = cpu_dcache_line_size();
77 r4k_blast_dcache_page = blast_dcache16_page;
78 else if (dc_lsize == 32)
79 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
82 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
84 static inline void r4k_blast_dcache_page_indexed_setup(void)
86 unsigned long dc_lsize = cpu_dcache_line_size();
89 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
90 else if (dc_lsize == 32)
91 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
94 static void (* r4k_blast_dcache)(void);
96 static inline void r4k_blast_dcache_setup(void)
98 unsigned long dc_lsize = cpu_dcache_line_size();
101 r4k_blast_dcache = blast_dcache16;
102 else if (dc_lsize == 32)
103 r4k_blast_dcache = blast_dcache32;
106 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
107 #define JUMP_TO_ALIGN(order) \
108 __asm__ __volatile__( \
110 ".align\t" #order "\n\t" \
113 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
114 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
116 static inline void blast_r4600_v1_icache32(void)
120 local_irq_save(flags);
122 local_irq_restore(flags);
125 static inline void tx49_blast_icache32(void)
127 unsigned long start = INDEX_BASE;
128 unsigned long end = start + current_cpu_data.icache.waysize;
129 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
130 unsigned long ws_end = current_cpu_data.icache.ways <<
131 current_cpu_data.icache.waybit;
132 unsigned long ws, addr;
134 CACHE32_UNROLL32_ALIGN2;
135 /* I'm in even chunk. blast odd chunks */
136 for (ws = 0; ws < ws_end; ws += ws_inc)
137 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
138 cache32_unroll32(addr|ws,Index_Invalidate_I);
139 CACHE32_UNROLL32_ALIGN;
140 /* I'm in odd chunk. blast even chunks */
141 for (ws = 0; ws < ws_end; ws += ws_inc)
142 for (addr = start; addr < end; addr += 0x400 * 2)
143 cache32_unroll32(addr|ws,Index_Invalidate_I);
146 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
150 local_irq_save(flags);
151 blast_icache32_page_indexed(page);
152 local_irq_restore(flags);
155 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
157 unsigned long start = page;
158 unsigned long end = start + PAGE_SIZE;
159 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
160 unsigned long ws_end = current_cpu_data.icache.ways <<
161 current_cpu_data.icache.waybit;
162 unsigned long ws, addr;
164 CACHE32_UNROLL32_ALIGN2;
165 /* I'm in even chunk. blast odd chunks */
166 for (ws = 0; ws < ws_end; ws += ws_inc)
167 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
168 cache32_unroll32(addr|ws,Index_Invalidate_I);
169 CACHE32_UNROLL32_ALIGN;
170 /* I'm in odd chunk. blast even chunks */
171 for (ws = 0; ws < ws_end; ws += ws_inc)
172 for (addr = start; addr < end; addr += 0x400 * 2)
173 cache32_unroll32(addr|ws,Index_Invalidate_I);
176 static void (* r4k_blast_icache_page)(unsigned long addr);
178 static inline void r4k_blast_icache_page_setup(void)
180 unsigned long ic_lsize = cpu_icache_line_size();
183 r4k_blast_icache_page = blast_icache16_page;
184 else if (ic_lsize == 32)
185 r4k_blast_icache_page = blast_icache32_page;
186 else if (ic_lsize == 64)
187 r4k_blast_icache_page = blast_icache64_page;
191 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
193 static inline void r4k_blast_icache_page_indexed_setup(void)
195 unsigned long ic_lsize = cpu_icache_line_size();
198 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
199 else if (ic_lsize == 32) {
200 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
201 r4k_blast_icache_page_indexed =
202 blast_icache32_r4600_v1_page_indexed;
203 else if (TX49XX_ICACHE_INDEX_INV_WAR)
204 r4k_blast_icache_page_indexed =
205 tx49_blast_icache32_page_indexed;
207 r4k_blast_icache_page_indexed =
208 blast_icache32_page_indexed;
209 } else if (ic_lsize == 64)
210 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
213 static void (* r4k_blast_icache)(void);
215 static inline void r4k_blast_icache_setup(void)
217 unsigned long ic_lsize = cpu_icache_line_size();
220 r4k_blast_icache = blast_icache16;
221 else if (ic_lsize == 32) {
222 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
223 r4k_blast_icache = blast_r4600_v1_icache32;
224 else if (TX49XX_ICACHE_INDEX_INV_WAR)
225 r4k_blast_icache = tx49_blast_icache32;
227 r4k_blast_icache = blast_icache32;
228 } else if (ic_lsize == 64)
229 r4k_blast_icache = blast_icache64;
232 static void (* r4k_blast_scache_page)(unsigned long addr);
234 static inline void r4k_blast_scache_page_setup(void)
236 unsigned long sc_lsize = cpu_scache_line_size();
239 r4k_blast_scache_page = blast_scache16_page;
240 else if (sc_lsize == 32)
241 r4k_blast_scache_page = blast_scache32_page;
242 else if (sc_lsize == 64)
243 r4k_blast_scache_page = blast_scache64_page;
244 else if (sc_lsize == 128)
245 r4k_blast_scache_page = blast_scache128_page;
248 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
250 static inline void r4k_blast_scache_page_indexed_setup(void)
252 unsigned long sc_lsize = cpu_scache_line_size();
255 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
256 else if (sc_lsize == 32)
257 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
258 else if (sc_lsize == 64)
259 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
260 else if (sc_lsize == 128)
261 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
264 static void (* r4k_blast_scache)(void);
266 static inline void r4k_blast_scache_setup(void)
268 unsigned long sc_lsize = cpu_scache_line_size();
271 r4k_blast_scache = blast_scache16;
272 else if (sc_lsize == 32)
273 r4k_blast_scache = blast_scache32;
274 else if (sc_lsize == 64)
275 r4k_blast_scache = blast_scache64;
276 else if (sc_lsize == 128)
277 r4k_blast_scache = blast_scache128;
281 * This is former mm's flush_cache_all() which really should be
282 * flush_cache_vunmap these days ...
284 static inline void local_r4k_flush_cache_all(void * args)
290 static void r4k_flush_cache_all(void)
292 if (!cpu_has_dc_aliases)
295 on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
298 static inline void local_r4k___flush_cache_all(void * args)
303 switch (current_cpu_data.cputype) {
314 static void r4k___flush_cache_all(void)
316 on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
319 static inline void local_r4k_flush_cache_range(void * args)
321 struct vm_area_struct *vma = args;
324 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
327 exec = vma->vm_flags & VM_EXEC;
328 if (cpu_has_dc_aliases || exec)
334 static void r4k_flush_cache_range(struct vm_area_struct *vma,
335 unsigned long start, unsigned long end)
337 on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
340 static inline void local_r4k_flush_cache_mm(void * args)
342 struct mm_struct *mm = args;
344 if (!cpu_context(smp_processor_id(), mm))
351 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
352 * only flush the primary caches but R10000 and R12000 behave sane ...
354 if (current_cpu_data.cputype == CPU_R4000SC ||
355 current_cpu_data.cputype == CPU_R4000MC ||
356 current_cpu_data.cputype == CPU_R4400SC ||
357 current_cpu_data.cputype == CPU_R4400MC)
361 static void r4k_flush_cache_mm(struct mm_struct *mm)
363 if (!cpu_has_dc_aliases)
366 on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
369 struct flush_cache_page_args {
370 struct vm_area_struct *vma;
374 static inline void local_r4k_flush_cache_page(void *args)
376 struct flush_cache_page_args *fcp_args = args;
377 struct vm_area_struct *vma = fcp_args->vma;
378 unsigned long page = fcp_args->page;
379 int exec = vma->vm_flags & VM_EXEC;
380 struct mm_struct *mm = vma->vm_mm;
387 * If ownes no valid ASID yet, cannot possibly have gotten
388 * this page into the cache.
390 if (cpu_context(smp_processor_id(), mm) == 0)
394 pgdp = pgd_offset(mm, page);
395 pudp = pud_offset(pgdp, page);
396 pmdp = pmd_offset(pudp, page);
397 ptep = pte_offset(pmdp, page);
400 * If the page isn't marked valid, the page cannot possibly be
403 if (!(pte_val(*ptep) & _PAGE_PRESENT))
407 * Doing flushes for another ASID than the current one is
408 * too difficult since stupid R4k caches do a TLB translation
409 * for every cache flush operation. So we do indexed flushes
410 * in that case, which doesn't overly flush the cache too much.
412 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
413 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
414 r4k_blast_dcache_page(page);
415 if (exec && !cpu_icache_snoops_remote_store)
416 r4k_blast_scache_page(page);
419 r4k_blast_icache_page(page);
425 * Do indexed flush, too much work to get the (possible) TLB refills
428 page = INDEX_BASE + (page & (dcache_size - 1));
429 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
430 r4k_blast_dcache_page_indexed(page);
431 if (exec && !cpu_icache_snoops_remote_store)
432 r4k_blast_scache_page_indexed(page);
435 if (cpu_has_vtag_icache) {
436 int cpu = smp_processor_id();
438 if (cpu_context(cpu, mm) != 0)
439 drop_mmu_context(mm, cpu);
441 r4k_blast_icache_page_indexed(page);
445 static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
447 struct flush_cache_page_args args;
452 on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
455 static inline void local_r4k_flush_data_cache_page(void * addr)
457 r4k_blast_dcache_page((unsigned long) addr);
460 static void r4k_flush_data_cache_page(unsigned long addr)
462 on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
465 struct flush_icache_range_args {
466 unsigned long __user start;
467 unsigned long __user end;
470 static inline void local_r4k_flush_icache_range(void *args)
472 struct flush_icache_range_args *fir_args = args;
473 unsigned long dc_lsize = cpu_dcache_line_size();
474 unsigned long ic_lsize = cpu_icache_line_size();
475 unsigned long sc_lsize = cpu_scache_line_size();
476 unsigned long start = fir_args->start;
477 unsigned long end = fir_args->end;
478 unsigned long addr, aend;
480 if (!cpu_has_ic_fills_f_dc) {
481 if (end - start > dcache_size) {
484 addr = start & ~(dc_lsize - 1);
485 aend = (end - 1) & ~(dc_lsize - 1);
488 /* Hit_Writeback_Inv_D */
489 protected_writeback_dcache_line(addr);
496 if (!cpu_icache_snoops_remote_store) {
497 if (end - start > scache_size) {
500 addr = start & ~(sc_lsize - 1);
501 aend = (end - 1) & ~(sc_lsize - 1);
504 /* Hit_Writeback_Inv_SD */
505 protected_writeback_scache_line(addr);
514 if (end - start > icache_size)
517 addr = start & ~(ic_lsize - 1);
518 aend = (end - 1) & ~(ic_lsize - 1);
520 /* Hit_Invalidate_I */
521 protected_flush_icache_line(addr);
529 static void r4k_flush_icache_range(unsigned long __user start,
530 unsigned long __user end)
532 struct flush_icache_range_args args;
537 on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
538 instruction_hazard();
542 * Ok, this seriously sucks. We use them to flush a user page but don't
543 * know the virtual address, so we have to blast away the whole icache
544 * which is significantly more expensive than the real thing. Otoh we at
545 * least know the kernel address of the page so we can flush it
549 struct flush_icache_page_args {
550 struct vm_area_struct *vma;
554 static inline void local_r4k_flush_icache_page(void *args)
556 struct flush_icache_page_args *fip_args = args;
557 struct vm_area_struct *vma = fip_args->vma;
558 struct page *page = fip_args->page;
561 * Tricky ... Because we don't know the virtual address we've got the
562 * choice of either invalidating the entire primary and secondary
563 * caches or invalidating the secondary caches also. With the subset
564 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
565 * secondary cache will result in any entries in the primary caches
566 * also getting invalidated which hopefully is a bit more economical.
568 if (cpu_has_subset_pcaches) {
569 unsigned long addr = (unsigned long) page_address(page);
571 r4k_blast_scache_page(addr);
572 ClearPageDcacheDirty(page);
577 if (!cpu_has_ic_fills_f_dc) {
578 unsigned long addr = (unsigned long) page_address(page);
579 r4k_blast_dcache_page(addr);
580 if (!cpu_icache_snoops_remote_store)
581 r4k_blast_scache_page(addr);
582 ClearPageDcacheDirty(page);
586 * We're not sure of the virtual address(es) involved here, so
587 * we have to flush the entire I-cache.
589 if (cpu_has_vtag_icache) {
590 int cpu = smp_processor_id();
592 if (cpu_context(cpu, vma->vm_mm) != 0)
593 drop_mmu_context(vma->vm_mm, cpu);
598 static void r4k_flush_icache_page(struct vm_area_struct *vma,
601 struct flush_icache_page_args args;
604 * If there's no context yet, or the page isn't executable, no I-cache
607 if (!(vma->vm_flags & VM_EXEC))
613 on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
617 #ifdef CONFIG_DMA_NONCOHERENT
619 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
621 unsigned long end, a;
623 /* Catch bad driver code */
626 if (cpu_has_subset_pcaches) {
627 unsigned long sc_lsize = cpu_scache_line_size();
629 if (size >= scache_size) {
634 a = addr & ~(sc_lsize - 1);
635 end = (addr + size - 1) & ~(sc_lsize - 1);
637 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
646 * Either no secondary cache or the available caches don't have the
647 * subset property so we have to flush the primary caches
650 if (size >= dcache_size) {
653 unsigned long dc_lsize = cpu_dcache_line_size();
655 R4600_HIT_CACHEOP_WAR_IMPL;
656 a = addr & ~(dc_lsize - 1);
657 end = (addr + size - 1) & ~(dc_lsize - 1);
659 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
666 bc_wback_inv(addr, size);
669 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
671 unsigned long end, a;
673 /* Catch bad driver code */
676 if (cpu_has_subset_pcaches) {
677 unsigned long sc_lsize = cpu_scache_line_size();
679 if (size >= scache_size) {
684 a = addr & ~(sc_lsize - 1);
685 end = (addr + size - 1) & ~(sc_lsize - 1);
687 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
695 if (size >= dcache_size) {
698 unsigned long dc_lsize = cpu_dcache_line_size();
700 R4600_HIT_CACHEOP_WAR_IMPL;
701 a = addr & ~(dc_lsize - 1);
702 end = (addr + size - 1) & ~(dc_lsize - 1);
704 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
713 #endif /* CONFIG_DMA_NONCOHERENT */
716 * While we're protected against bad userland addresses we don't care
717 * very much about what happens in that case. Usually a segmentation
718 * fault will dump the process later on anyway ...
720 static void local_r4k_flush_cache_sigtramp(void * arg)
722 unsigned long ic_lsize = cpu_icache_line_size();
723 unsigned long dc_lsize = cpu_dcache_line_size();
724 unsigned long sc_lsize = cpu_scache_line_size();
725 unsigned long addr = (unsigned long) arg;
727 R4600_HIT_CACHEOP_WAR_IMPL;
728 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
729 if (!cpu_icache_snoops_remote_store)
730 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
731 protected_flush_icache_line(addr & ~(ic_lsize - 1));
732 if (MIPS4K_ICACHE_REFILL_WAR) {
733 __asm__ __volatile__ (
748 : "i" (Hit_Invalidate_I));
750 if (MIPS_CACHE_SYNC_WAR)
751 __asm__ __volatile__ ("sync");
754 static void r4k_flush_cache_sigtramp(unsigned long addr)
756 on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
759 static void r4k_flush_icache_all(void)
761 if (cpu_has_vtag_icache)
765 static inline void rm7k_erratum31(void)
767 const unsigned long ic_lsize = 32;
770 /* RM7000 erratum #31. The icache is screwed at startup. */
774 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
775 __asm__ __volatile__ (
779 "cache\t%1, 0(%0)\n\t"
780 "cache\t%1, 0x1000(%0)\n\t"
781 "cache\t%1, 0x2000(%0)\n\t"
782 "cache\t%1, 0x3000(%0)\n\t"
783 "cache\t%2, 0(%0)\n\t"
784 "cache\t%2, 0x1000(%0)\n\t"
785 "cache\t%2, 0x2000(%0)\n\t"
786 "cache\t%2, 0x3000(%0)\n\t"
787 "cache\t%1, 0(%0)\n\t"
788 "cache\t%1, 0x1000(%0)\n\t"
789 "cache\t%1, 0x2000(%0)\n\t"
790 "cache\t%1, 0x3000(%0)\n\t"
793 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
797 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
798 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
801 static void __init probe_pcache(void)
803 struct cpuinfo_mips *c = ¤t_cpu_data;
804 unsigned int config = read_c0_config();
805 unsigned int prid = read_c0_prid();
806 unsigned long config1;
809 switch (c->cputype) {
810 case CPU_R4600: /* QED style two way caches? */
814 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
815 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
817 c->icache.waybit = ffs(icache_size/2) - 1;
819 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
820 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
822 c->dcache.waybit= ffs(dcache_size/2) - 1;
824 c->options |= MIPS_CPU_CACHE_CDEX_P;
829 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
830 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
834 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
835 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
837 c->dcache.waybit = 0;
839 c->options |= MIPS_CPU_CACHE_CDEX_P;
843 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
844 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
848 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
849 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
851 c->dcache.waybit = 0;
853 c->options |= MIPS_CPU_CACHE_CDEX_P;
863 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
864 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
866 c->icache.waybit = 0; /* doesn't matter */
868 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
869 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
871 c->dcache.waybit = 0; /* does not matter */
873 c->options |= MIPS_CPU_CACHE_CDEX_P;
878 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
879 c->icache.linesz = 64;
881 c->icache.waybit = 0;
883 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
884 c->dcache.linesz = 32;
886 c->dcache.waybit = 0;
888 c->options |= MIPS_CPU_PREFETCH;
892 write_c0_config(config & ~CONF_EB);
894 /* Workaround for cache instruction bug of VR4131 */
895 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
896 c->processor_id == 0x0c82U) {
897 config &= ~0x00000030U;
898 config |= 0x00410000U;
899 write_c0_config(config);
901 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
902 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
904 c->icache.waybit = ffs(icache_size/2) - 1;
906 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
907 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
909 c->dcache.waybit = ffs(dcache_size/2) - 1;
911 c->options |= MIPS_CPU_CACHE_CDEX_P;
920 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
921 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
923 c->icache.waybit = 0; /* doesn't matter */
925 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
926 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
928 c->dcache.waybit = 0; /* does not matter */
930 c->options |= MIPS_CPU_CACHE_CDEX_P;
937 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
938 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
940 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
942 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
943 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
945 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
947 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
948 c->options |= MIPS_CPU_CACHE_CDEX_P;
950 c->options |= MIPS_CPU_PREFETCH;
954 if (!(config & MIPS_CONF_M))
955 panic("Don't know how to probe P-caches on this cpu.");
958 * So we seem to be a MIPS32 or MIPS64 CPU
959 * So let's probe the I-cache ...
961 config1 = read_c0_config1();
963 if ((lsize = ((config1 >> 19) & 7)))
964 c->icache.linesz = 2 << lsize;
966 c->icache.linesz = lsize;
967 c->icache.sets = 64 << ((config1 >> 22) & 7);
968 c->icache.ways = 1 + ((config1 >> 16) & 7);
970 icache_size = c->icache.sets *
973 c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
975 if (config & 0x8) /* VI bit */
976 c->icache.flags |= MIPS_CACHE_VTAG;
979 * Now probe the MIPS32 / MIPS64 data cache.
983 if ((lsize = ((config1 >> 10) & 7)))
984 c->dcache.linesz = 2 << lsize;
986 c->dcache.linesz= lsize;
987 c->dcache.sets = 64 << ((config1 >> 13) & 7);
988 c->dcache.ways = 1 + ((config1 >> 7) & 7);
990 dcache_size = c->dcache.sets *
993 c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
995 c->options |= MIPS_CPU_PREFETCH;
1000 * Processor configuration sanity check for the R4000SC erratum
1001 * #5. With page sizes larger than 32kB there is no possibility
1002 * to get a VCE exception anymore so we don't care about this
1003 * misconfiguration. The case is rather theoretical anyway;
1004 * presumably no vendor is shipping his hardware in the "bad"
1007 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
1008 !(config & CONF_SC) && c->icache.linesz != 16 &&
1009 PAGE_SIZE <= 0x8000)
1010 panic("Improper R4000SC processor configuration detected");
1012 /* compute a couple of other cache variables */
1013 c->icache.waysize = icache_size / c->icache.ways;
1014 c->dcache.waysize = dcache_size / c->dcache.ways;
1016 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
1017 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
1020 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
1021 * 2-way virtually indexed so normally would suffer from aliases. So
1022 * normally they'd suffer from aliases but magic in the hardware deals
1023 * with that for us so we don't need to take care ourselves.
1025 switch (c->cputype) {
1033 if (!(read_c0_config7() & (1 << 16)))
1035 if (c->dcache.waysize > PAGE_SIZE)
1036 c->dcache.flags |= MIPS_CACHE_ALIASES;
1039 switch (c->cputype) {
1042 * Some older 20Kc chips doesn't have the 'VI' bit in
1043 * the config register.
1045 c->icache.flags |= MIPS_CACHE_VTAG;
1053 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1057 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1059 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
1060 way_string[c->icache.ways], c->icache.linesz);
1062 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
1063 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
1067 * If you even _breathe_ on this function, look at the gcc output and make sure
1068 * it does not pop things on and off the stack for the cache sizing loop that
1069 * executes in KSEG1 space or else you will crash and burn badly. You have
1072 static int __init probe_scache(void)
1074 extern unsigned long stext;
1075 unsigned long flags, addr, begin, end, pow2;
1076 unsigned int config = read_c0_config();
1077 struct cpuinfo_mips *c = ¤t_cpu_data;
1080 if (config & CONF_SC)
1083 begin = (unsigned long) &stext;
1084 begin &= ~((4 * 1024 * 1024) - 1);
1085 end = begin + (4 * 1024 * 1024);
1088 * This is such a bitch, you'd think they would make it easy to do
1089 * this. Away you daemons of stupidity!
1091 local_irq_save(flags);
1093 /* Fill each size-multiple cache line with a valid tag. */
1095 for (addr = begin; addr < end; addr = (begin + pow2)) {
1096 unsigned long *p = (unsigned long *) addr;
1097 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1101 /* Load first line with zero (therefore invalid) tag. */
1104 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1105 cache_op(Index_Store_Tag_I, begin);
1106 cache_op(Index_Store_Tag_D, begin);
1107 cache_op(Index_Store_Tag_SD, begin);
1109 /* Now search for the wrap around point. */
1110 pow2 = (128 * 1024);
1112 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1113 cache_op(Index_Load_Tag_SD, addr);
1114 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1115 if (!read_c0_taglo())
1119 local_irq_restore(flags);
1123 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1125 c->dcache.waybit = 0; /* does not matter */
1130 extern int r5k_sc_init(void);
1131 extern int rm7k_sc_init(void);
1133 static void __init setup_scache(void)
1135 struct cpuinfo_mips *c = ¤t_cpu_data;
1136 unsigned int config = read_c0_config();
1140 * Do the probing thing on R4000SC and R4400SC processors. Other
1141 * processors don't have a S-cache that would be relevant to the
1142 * Linux memory managment.
1144 switch (c->cputype) {
1149 sc_present = run_uncached(probe_scache);
1151 c->options |= MIPS_CPU_CACHE_CDEX_S;
1156 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1157 c->scache.linesz = 64 << ((config >> 13) & 1);
1159 c->scache.waybit= 0;
1165 #ifdef CONFIG_R5000_CPU_SCACHE
1172 #ifdef CONFIG_RM7000_CPU_SCACHE
1184 if ((c->isa_level == MIPS_CPU_ISA_M32 ||
1185 c->isa_level == MIPS_CPU_ISA_M64) &&
1186 !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1187 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1189 /* compute a couple of other cache variables */
1190 c->scache.waysize = scache_size / c->scache.ways;
1192 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1194 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1195 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1197 c->options |= MIPS_CPU_SUBSET_CACHES;
1200 static inline void coherency_setup(void)
1202 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1205 * c0_status.cu=0 specifies that updates by the sc instruction use
1206 * the coherency mode specified by the TLB; 1 means cachable
1207 * coherent update on write will be used. Not all processors have
1208 * this bit and; some wire it to zero, others like Toshiba had the
1209 * silly idea of putting something else there ...
1211 switch (current_cpu_data.cputype) {
1218 clear_c0_config(CONF_CU);
1223 void __init ld_mmu_r4xx0(void)
1225 extern void build_clear_page(void);
1226 extern void build_copy_page(void);
1227 extern char except_vec2_generic;
1228 struct cpuinfo_mips *c = ¤t_cpu_data;
1230 /* Default cache error handler for R4000 and R5000 family */
1231 set_uncached_handler (0x100, &except_vec2_generic, 0x80);
1236 r4k_blast_dcache_page_setup();
1237 r4k_blast_dcache_page_indexed_setup();
1238 r4k_blast_dcache_setup();
1239 r4k_blast_icache_page_setup();
1240 r4k_blast_icache_page_indexed_setup();
1241 r4k_blast_icache_setup();
1242 r4k_blast_scache_page_setup();
1243 r4k_blast_scache_page_indexed_setup();
1244 r4k_blast_scache_setup();
1247 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1248 * This code supports virtually indexed processors and will be
1249 * unnecessarily inefficient on physically indexed processors.
1251 shm_align_mask = max_t( unsigned long,
1252 c->dcache.sets * c->dcache.linesz - 1,
1255 flush_cache_all = r4k_flush_cache_all;
1256 __flush_cache_all = r4k___flush_cache_all;
1257 flush_cache_mm = r4k_flush_cache_mm;
1258 flush_cache_page = r4k_flush_cache_page;
1259 flush_icache_page = r4k_flush_icache_page;
1260 flush_cache_range = r4k_flush_cache_range;
1262 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1263 flush_icache_all = r4k_flush_icache_all;
1264 flush_data_cache_page = r4k_flush_data_cache_page;
1265 flush_icache_range = r4k_flush_icache_range;
1267 #ifdef CONFIG_DMA_NONCOHERENT
1268 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1269 _dma_cache_wback = r4k_dma_cache_wback_inv;
1270 _dma_cache_inv = r4k_dma_cache_inv;
1275 local_r4k___flush_cache_all(NULL);