]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/irqflags.h
x86, 64-bit: swapgs pvop with a user-stack can never be called
[linux-2.6-omap-h63xx.git] / include / asm-x86 / irqflags.h
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
3
4 #include <asm/processor-flags.h>
5
6 #ifndef __ASSEMBLY__
7 /*
8  * Interrupt control:
9  */
10
11 static inline unsigned long native_save_fl(void)
12 {
13         unsigned long flags;
14
15         asm volatile("# __raw_save_flags\n\t"
16                      "pushf ; pop %0"
17                      : "=g" (flags)
18                      : /* no input */
19                      : "memory");
20
21         return flags;
22 }
23
24 static inline void native_restore_fl(unsigned long flags)
25 {
26         asm volatile("push %0 ; popf"
27                      : /* no output */
28                      :"g" (flags)
29                      :"memory", "cc");
30 }
31
32 static inline void native_irq_disable(void)
33 {
34         asm volatile("cli": : :"memory");
35 }
36
37 static inline void native_irq_enable(void)
38 {
39         asm volatile("sti": : :"memory");
40 }
41
42 static inline void native_safe_halt(void)
43 {
44         asm volatile("sti; hlt": : :"memory");
45 }
46
47 static inline void native_halt(void)
48 {
49         asm volatile("hlt": : :"memory");
50 }
51
52 #endif
53
54 #ifdef CONFIG_PARAVIRT
55 #include <asm/paravirt.h>
56 #else
57 #ifndef __ASSEMBLY__
58
59 static inline unsigned long __raw_local_save_flags(void)
60 {
61         return native_save_fl();
62 }
63
64 static inline void raw_local_irq_restore(unsigned long flags)
65 {
66         native_restore_fl(flags);
67 }
68
69 static inline void raw_local_irq_disable(void)
70 {
71         native_irq_disable();
72 }
73
74 static inline void raw_local_irq_enable(void)
75 {
76         native_irq_enable();
77 }
78
79 /*
80  * Used in the idle loop; sti takes one instruction cycle
81  * to complete:
82  */
83 static inline void raw_safe_halt(void)
84 {
85         native_safe_halt();
86 }
87
88 /*
89  * Used when interrupts are already enabled or to
90  * shutdown the processor:
91  */
92 static inline void halt(void)
93 {
94         native_halt();
95 }
96
97 /*
98  * For spinlocks, etc:
99  */
100 static inline unsigned long __raw_local_irq_save(void)
101 {
102         unsigned long flags = __raw_local_save_flags();
103
104         raw_local_irq_disable();
105
106         return flags;
107 }
108 #else
109
110 #define ENABLE_INTERRUPTS(x)    sti
111 #define DISABLE_INTERRUPTS(x)   cli
112
113 #ifdef CONFIG_X86_64
114 #define SWAPGS_UNSAFE_STACK     swapgs
115 #define INTERRUPT_RETURN        iretq
116 #define USERGS_SYSRET64                         \
117         swapgs;                                 \
118         sysretq;
119 #define USERGS_SYSRET32                         \
120         swapgs;                                 \
121         sysretl
122 #define ENABLE_INTERRUPTS_SYSEXIT32             \
123         swapgs;                                 \
124         sti;                                    \
125         sysexit
126
127 #else
128 #define INTERRUPT_RETURN                iret
129 #define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
130 #define GET_CR0_INTO_EAX                movl %cr0, %eax
131 #endif
132
133
134 #endif /* __ASSEMBLY__ */
135 #endif /* CONFIG_PARAVIRT */
136
137 #ifndef __ASSEMBLY__
138 #define raw_local_save_flags(flags)                             \
139         do { (flags) = __raw_local_save_flags(); } while (0)
140
141 #define raw_local_irq_save(flags)                               \
142         do { (flags) = __raw_local_irq_save(); } while (0)
143
144 static inline int raw_irqs_disabled_flags(unsigned long flags)
145 {
146         return !(flags & X86_EFLAGS_IF);
147 }
148
149 static inline int raw_irqs_disabled(void)
150 {
151         unsigned long flags = __raw_local_save_flags();
152
153         return raw_irqs_disabled_flags(flags);
154 }
155
156 /*
157  * makes the traced hardirq state match with the machine state
158  *
159  * should be a rarely used function, only in places where its
160  * otherwise impossible to know the irq state, like in traps.
161  */
162 static inline void trace_hardirqs_fixup_flags(unsigned long flags)
163 {
164         if (raw_irqs_disabled_flags(flags))
165                 trace_hardirqs_off();
166         else
167                 trace_hardirqs_on();
168 }
169
170 static inline void trace_hardirqs_fixup(void)
171 {
172         unsigned long flags = __raw_local_save_flags();
173
174         trace_hardirqs_fixup_flags(flags);
175 }
176
177 #else
178
179 #ifdef CONFIG_X86_64
180 /*
181  * Currently paravirt can't handle swapgs nicely when we
182  * don't have a stack we can rely on (such as a user space
183  * stack).  So we either find a way around these or just fault
184  * and emulate if a guest tries to call swapgs directly.
185  *
186  * Either way, this is a good way to document that we don't
187  * have a reliable stack. x86_64 only.
188  */
189 #define ARCH_TRACE_IRQS_ON              call trace_hardirqs_on_thunk
190 #define ARCH_TRACE_IRQS_OFF             call trace_hardirqs_off_thunk
191 #define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
192 #define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
193         TRACE_IRQS_ON; \
194         sti; \
195         SAVE_REST; \
196         LOCKDEP_SYS_EXIT; \
197         RESTORE_REST; \
198         cli; \
199         TRACE_IRQS_OFF;
200
201 #else
202 #define ARCH_TRACE_IRQS_ON                      \
203         pushl %eax;                             \
204         pushl %ecx;                             \
205         pushl %edx;                             \
206         call trace_hardirqs_on;                 \
207         popl %edx;                              \
208         popl %ecx;                              \
209         popl %eax;
210
211 #define ARCH_TRACE_IRQS_OFF                     \
212         pushl %eax;                             \
213         pushl %ecx;                             \
214         pushl %edx;                             \
215         call trace_hardirqs_off;                \
216         popl %edx;                              \
217         popl %ecx;                              \
218         popl %eax;
219
220 #define ARCH_LOCKDEP_SYS_EXIT                   \
221         pushl %eax;                             \
222         pushl %ecx;                             \
223         pushl %edx;                             \
224         call lockdep_sys_exit;                  \
225         popl %edx;                              \
226         popl %ecx;                              \
227         popl %eax;
228
229 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
230 #endif
231
232 #ifdef CONFIG_TRACE_IRQFLAGS
233 #  define TRACE_IRQS_ON         ARCH_TRACE_IRQS_ON
234 #  define TRACE_IRQS_OFF        ARCH_TRACE_IRQS_OFF
235 #else
236 #  define TRACE_IRQS_ON
237 #  define TRACE_IRQS_OFF
238 #endif
239 #ifdef CONFIG_DEBUG_LOCK_ALLOC
240 #  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
241 #  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
242 # else
243 #  define LOCKDEP_SYS_EXIT
244 #  define LOCKDEP_SYS_EXIT_IRQ
245 # endif
246
247 #endif /* __ASSEMBLY__ */
248 #endif