]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-mips/hazards.h
6111a0ce58c4cf4bb4b1db0b5aa6fb064fc3d413
[linux-2.6-omap-h63xx.git] / include / asm-mips / hazards.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 2004 Ralf Baechle
7  */
8 #ifndef _ASM_HAZARDS_H
9 #define _ASM_HAZARDS_H
10
11 #include <linux/config.h>
12
13 #ifdef __ASSEMBLY__
14
15         .macro  _ssnop
16         sll     $0, $0, 1
17         .endm
18
19         .macro  _ehb
20         sll     $0, $0, 3
21         .endm
22
23 /*
24  * RM9000 hazards.  When the JTLB is updated by tlbwi or tlbwr, a subsequent
25  * use of the JTLB for instructions should not occur for 4 cpu cycles and use
26  * for data translations should not occur for 3 cpu cycles.
27  */
28 #ifdef CONFIG_CPU_RM9000
29
30         .macro  mtc0_tlbw_hazard
31         .set    push
32         .set    mips32
33         _ssnop; _ssnop; _ssnop; _ssnop
34         .set    pop
35         .endm
36
37         .macro  tlbw_eret_hazard
38         .set    push
39         .set    mips32
40         _ssnop; _ssnop; _ssnop; _ssnop
41         .set    pop
42         .endm
43
44 #else
45
46 /*
47  * The taken branch will result in a two cycle penalty for the two killed
48  * instructions on R4000 / R4400.  Other processors only have a single cycle
49  * hazard so this is nice trick to have an optimal code for a range of
50  * processors.
51  */
52         .macro  mtc0_tlbw_hazard
53         b       . + 8
54         .endm
55
56         .macro  tlbw_eret_hazard
57         .endm
58 #endif
59
60 /*
61  * mtc0->mfc0 hazard
62  * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
63  * It is a MIPS32R2 processor so ehb will clear the hazard.
64  */
65
66 #ifdef CONFIG_CPU_MIPSR2
67 /*
68  * Use a macro for ehb unless explicit support for MIPSR2 is enabled
69  */
70
71 #define irq_enable_hazard
72         _ehb
73
74 #define irq_disable_hazard
75         _ehb
76
77 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
78       defined(CONFIG_CPU_SB1)
79
80 /*
81  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
82  */
83
84 #define irq_enable_hazard
85
86 #define irq_disable_hazard
87
88 #else
89
90 /*
91  * Classic MIPS needs 1 - 3 nops or ssnops
92  */
93 #define irq_enable_hazard
94 #define irq_disable_hazard                                              \
95         _ssnop; _ssnop; _ssnop
96
97 #endif
98
99 #else /* __ASSEMBLY__ */
100
101 __asm__(
102         "       .macro  _ssnop                                  \n\t"
103         "       sll     $0, $0, 1                               \n\t"
104         "       .endm                                           \n\t"
105         "                                                       \n\t"
106         "       .macro  _ehb                                    \n\t"
107         "       sll     $0, $0, 3                               \n\t"
108         "       .endm                                           \n\t");
109
110 #ifdef CONFIG_CPU_RM9000
111
112 /*
113  * RM9000 hazards.  When the JTLB is updated by tlbwi or tlbwr, a subsequent
114  * use of the JTLB for instructions should not occur for 4 cpu cycles and use
115  * for data translations should not occur for 3 cpu cycles.
116  */
117
118 #define mtc0_tlbw_hazard()                                              \
119         __asm__ __volatile__(                                           \
120                 ".set\tmips32\n\t"                                      \
121                 "_ssnop; _ssnop; _ssnop; _ssnop\n\t"                    \
122                 ".set\tmips0")
123
124 #define tlbw_use_hazard()                                               \
125         __asm__ __volatile__(                                           \
126                 ".set\tmips32\n\t"                                      \
127                 "_ssnop; _ssnop; _ssnop; _ssnop\n\t"                    \
128                 ".set\tmips0")
129
130 #define back_to_back_c0_hazard()        do { } while (0)
131
132 #else
133
134 /*
135  * Overkill warning ...
136  */
137 #define mtc0_tlbw_hazard()                                              \
138         __asm__ __volatile__(                                           \
139                 ".set noreorder\n\t"                                    \
140                 "nop; nop; nop; nop; nop; nop;\n\t"                     \
141                 ".set reorder\n\t")
142
143 #define tlbw_use_hazard()                                               \
144         __asm__ __volatile__(                                           \
145                 ".set noreorder\n\t"                                    \
146                 "nop; nop; nop; nop; nop; nop;\n\t"                     \
147                 ".set reorder\n\t")
148
149 #endif
150
151 /*
152  * Interrupt enable/disable hazards
153  * Some processors have hazards when modifying
154  * the status register to change the interrupt state
155  */
156
157 #ifdef CONFIG_CPU_MIPSR2
158
159 __asm__(
160         "       .macro\tirq_enable_hazard                       \n\t"
161         "       _ehb                                            \n\t"
162         "       .endm                                           \n\t"
163         "                                                       \n\t"
164         "       .macro\tirq_disable_hazard                      \n\t"
165         "       _ehb                                            \n\t"
166         "       .endm                                           \n\t"
167         "                                                       \n\t"
168         "       .macro\tback_to_back_c0_hazard                  \n\t"
169         "       _ehb                                            \n\t"
170         "       .endm");
171
172 #define irq_enable_hazard()                                             \
173         __asm__ __volatile__(                                           \
174         "irq_enable_hazard")
175
176 #define irq_disable_hazard()                                            \
177         __asm__ __volatile__(                                           \
178         "irq_disable_hazard")
179
180 #define back_to_back_c0_hazard()                                        \
181         __asm__ __volatile__(                                           \
182         "back_to_back_c0_hazard")
183
184 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
185       defined(CONFIG_CPU_SB1)
186
187 /*
188  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
189  */
190
191 __asm__(
192         "       .macro\tirq_enable_hazard                       \n\t"
193         "       .endm                                           \n\t"
194         "                                                       \n\t"
195         "       .macro\tirq_disable_hazard                      \n\t"
196         "       .endm");
197
198 #define irq_enable_hazard()     do { } while (0)
199 #define irq_disable_hazard()    do { } while (0)
200
201 #define back_to_back_c0_hazard()        do { } while (0)
202
203 #else
204
205 /*
206  * Default for classic MIPS processors.  Assume worst case hazards but don't
207  * care about the irq_enable_hazard - sooner or later the hardware will
208  * enable it and we don't care when exactly.
209  */
210
211 __asm__(
212         "       #                                               \n\t"
213         "       # There is a hazard but we do not care          \n\t"
214         "       #                                               \n\t"
215         "       .macro\tirq_enable_hazard                       \n\t"
216         "       .endm                                           \n\t"
217         "                                                       \n\t"
218         "       .macro\tirq_disable_hazard                      \n\t"
219         "       _ssnop; _ssnop; _ssnop                          \n\t"
220         "       .endm");
221
222 #define irq_enable_hazard()     do { } while (0)
223 #define irq_disable_hazard()                                            \
224         __asm__ __volatile__(                                           \
225         "irq_disable_hazard")
226
227 #define back_to_back_c0_hazard()                                        \
228         __asm__ __volatile__(                                           \
229         "       .set noreorder                          \n"             \
230         "       nop; nop; nop                           \n"             \
231         "       .set reorder                            \n")
232
233 #endif
234
235 #ifdef CONFIG_CPU_MIPSR2
236 /*
237  * gcc has a tradition of misscompiling the previous construct using the
238  * address of a label as argument to inline assembler.  Gas otoh has the
239  * annoying difference between la and dla which are only usable for 32-bit
240  * rsp. 64-bit code, so can't be used without conditional compilation.
241  * The alterantive is switching the assembler to 64-bit code which happens
242  * to work right even for 32-bit code ...
243  */
244 #define instruction_hazard()                                            \
245 do {                                                                    \
246         unsigned long tmp;                                              \
247                                                                         \
248         __asm__ __volatile__(                                           \
249         "       .set    mips64r2                                \n"     \
250         "       dla     %0, 1f                                  \n"     \
251         "       jr.hb   %0                                      \n"     \
252         "       .set    mips0                                   \n"     \
253         "1:                                                     \n"     \
254         : "=r" (tmp));                                                  \
255 } while (0)
256
257 #else
258 #define instruction_hazard() do { } while (0)
259 #endif
260
261 #endif /* __ASSEMBLY__ */
262
263 #endif /* _ASM_HAZARDS_H */