2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
20 * This macro return a properly sign-extended address suitable as base address
21 * for indexed cache operations. Two issues here:
23 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
24 * the index bits from the virtual address. This breaks with tradition
25 * set by the R4000. To keep unpleasant surprises from happening we pick
26 * an address in KSEG0 / CKSEG0.
27 * - We need a properly sign extended address for 64-bit code. To get away
28 * without ifdefs we let the compiler do it by a type cast.
30 #define INDEX_BASE CKSEG0
32 #define cache_op(op,addr) \
33 __asm__ __volatile__( \
35 " .set noreorder \n" \
36 " .set mips3\n\t \n" \
40 : "i" (op), "m" (*(unsigned char *)(addr)))
42 static inline void flush_icache_line_indexed(unsigned long addr)
44 cache_op(Index_Invalidate_I, addr);
47 static inline void flush_dcache_line_indexed(unsigned long addr)
49 cache_op(Index_Writeback_Inv_D, addr);
52 static inline void flush_scache_line_indexed(unsigned long addr)
54 cache_op(Index_Writeback_Inv_SD, addr);
57 static inline void flush_icache_line(unsigned long addr)
59 cache_op(Hit_Invalidate_I, addr);
62 static inline void flush_dcache_line(unsigned long addr)
64 cache_op(Hit_Writeback_Inv_D, addr);
67 static inline void invalidate_dcache_line(unsigned long addr)
69 cache_op(Hit_Invalidate_D, addr);
72 static inline void invalidate_scache_line(unsigned long addr)
74 cache_op(Hit_Invalidate_SD, addr);
77 static inline void flush_scache_line(unsigned long addr)
79 cache_op(Hit_Writeback_Inv_SD, addr);
82 #define protected_cache_op(op,addr) \
83 __asm__ __volatile__( \
85 " .set noreorder \n" \
87 "1: cache %0, (%1) \n" \
89 " .section __ex_table,\"a\" \n" \
90 " "STR(PTR)" 1b, 2b \n" \
93 : "i" (op), "r" (addr))
96 * The next two are for badland addresses like signal trampolines.
98 static inline void protected_flush_icache_line(unsigned long addr)
100 protected_cache_op(Hit_Invalidate_I, addr);
104 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
105 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
106 * caches. We're talking about one cacheline unnecessarily getting invalidated
107 * here so the penalty isn't overly hard.
109 static inline void protected_writeback_dcache_line(unsigned long addr)
111 protected_cache_op(Hit_Writeback_Inv_D, addr);
114 static inline void protected_writeback_scache_line(unsigned long addr)
116 protected_cache_op(Hit_Writeback_Inv_SD, addr);
120 * This one is RM7000-specific
122 static inline void invalidate_tcache_page(unsigned long addr)
124 cache_op(Page_Invalidate_T, addr);
127 #define cache16_unroll32(base,op) \
128 __asm__ __volatile__( \
130 " .set noreorder \n" \
132 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
133 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
134 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
135 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
136 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
137 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
138 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
139 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
140 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
141 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
142 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
143 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
144 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
145 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
146 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
147 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
153 #define cache32_unroll32(base,op) \
154 __asm__ __volatile__( \
156 " .set noreorder \n" \
158 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
159 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
160 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
161 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
162 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
163 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
164 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
165 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
166 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
167 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
168 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
169 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
170 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
171 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
172 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
173 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
179 #define cache64_unroll32(base,op) \
180 __asm__ __volatile__( \
182 " .set noreorder \n" \
184 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
185 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
186 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
187 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
188 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
189 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
190 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
191 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
192 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
193 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
194 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
195 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
196 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
197 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
198 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
199 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
205 #define cache128_unroll32(base,op) \
206 __asm__ __volatile__( \
208 " .set noreorder \n" \
210 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
211 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
212 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
213 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
214 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
215 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
216 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
217 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
218 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
219 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
220 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
221 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
222 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
223 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
224 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
225 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
231 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
232 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
233 static inline void blast_##pfx##cache##lsize(void) \
235 unsigned long start = INDEX_BASE; \
236 unsigned long end = start + current_cpu_data.desc.waysize; \
237 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
238 unsigned long ws_end = current_cpu_data.desc.ways << \
239 current_cpu_data.desc.waybit; \
240 unsigned long ws, addr; \
242 for (ws = 0; ws < ws_end; ws += ws_inc) \
243 for (addr = start; addr < end; addr += lsize * 32) \
244 cache##lsize##_unroll32(addr|ws,indexop); \
247 static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
249 unsigned long start = page; \
250 unsigned long end = page + PAGE_SIZE; \
253 cache##lsize##_unroll32(start,hitop); \
254 start += lsize * 32; \
255 } while (start < end); \
258 static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
260 unsigned long start = page; \
261 unsigned long end = start + PAGE_SIZE; \
262 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
263 unsigned long ws_end = current_cpu_data.desc.ways << \
264 current_cpu_data.desc.waybit; \
265 unsigned long ws, addr; \
267 for (ws = 0; ws < ws_end; ws += ws_inc) \
268 for (addr = start; addr < end; addr += lsize * 32) \
269 cache##lsize##_unroll32(addr|ws,indexop); \
272 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
273 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
274 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
275 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
276 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
277 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
278 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
279 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
280 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
282 /* build blast_xxx_range, protected_blast_xxx_range */
283 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
284 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
287 unsigned long lsize = cpu_##desc##_line_size(); \
288 unsigned long addr = start & ~(lsize - 1); \
289 unsigned long aend = (end - 1) & ~(lsize - 1); \
291 prot##cache_op(hitop, addr); \
298 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
299 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
300 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
301 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
302 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
303 /* blast_inv_dcache_range */
304 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
306 #endif /* _ASM_R4KCACHE_H */