]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/spinlock.h
Merge branch 'linus' into x86/xen
[linux-2.6-omap-h63xx.git] / include / asm-x86 / spinlock.h
1 #ifndef ASM_X86__SPINLOCK_H
2 #define ASM_X86__SPINLOCK_H
3
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
10 /*
11  * Your basic SMP spinlocks, allowing only a single CPU anywhere
12  *
13  * Simple spin lock operations.  There are two variants, one clears IRQ's
14  * on the local processor, one does not.
15  *
16  * These are fair FIFO ticket locks, which are currently limited to 256
17  * CPUs.
18  *
19  * (the type definitions are in asm/spinlock_types.h)
20  */
21
22 #ifdef CONFIG_X86_32
23 # define LOCK_PTR_REG "a"
24 #else
25 # define LOCK_PTR_REG "D"
26 #endif
27
28 #if defined(CONFIG_X86_32) && \
29         (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
30 /*
31  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
32  * (PPro errata 66, 92)
33  */
34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
35 #else
36 # define UNLOCK_LOCK_PREFIX
37 #endif
38
39 /*
40  * Ticket locks are conceptually two parts, one indicating the current head of
41  * the queue, and the other indicating the current tail. The lock is acquired
42  * by atomically noting the tail and incrementing it by one (thus adding
43  * ourself to the queue and noting our position), then waiting until the head
44  * becomes equal to the the initial value of the tail.
45  *
46  * We use an xadd covering *both* parts of the lock, to increment the tail and
47  * also load the position of the head, which takes care of memory ordering
48  * issues and should be optimal for the uncontended case. Note the tail must be
49  * in the high part, because a wide xadd increment of the low part would carry
50  * up and contaminate the high part.
51  *
52  * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
53  * save some instructions and make the code more elegant. There really isn't
54  * much between them in performance though, especially as locks are out of line.
55  */
56 #if (NR_CPUS < 256)
57 static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
58 {
59         int tmp = ACCESS_ONCE(lock->slock);
60
61         return (((tmp >> 8) & 0xff) != (tmp & 0xff));
62 }
63
64 static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
65 {
66         int tmp = ACCESS_ONCE(lock->slock);
67
68         return (((tmp >> 8) - tmp) & 0xff) > 1;
69 }
70
71 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
72 {
73         short inc = 0x0100;
74
75         asm volatile (
76                 LOCK_PREFIX "xaddw %w0, %1\n"
77                 "1:\t"
78                 "cmpb %h0, %b0\n\t"
79                 "je 2f\n\t"
80                 "rep ; nop\n\t"
81                 "movb %1, %b0\n\t"
82                 /* don't need lfence here, because loads are in-order */
83                 "jmp 1b\n"
84                 "2:"
85                 : "+Q" (inc), "+m" (lock->slock)
86                 :
87                 : "memory", "cc");
88 }
89
90 static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
91 {
92         int tmp;
93         short new;
94
95         asm volatile("movw %2,%w0\n\t"
96                      "cmpb %h0,%b0\n\t"
97                      "jne 1f\n\t"
98                      "movw %w0,%w1\n\t"
99                      "incb %h1\n\t"
100                      LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
101                      "1:"
102                      "sete %b1\n\t"
103                      "movzbl %b1,%0\n\t"
104                      : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
105                      :
106                      : "memory", "cc");
107
108         return tmp;
109 }
110
111 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
112 {
113         asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
114                      : "+m" (lock->slock)
115                      :
116                      : "memory", "cc");
117 }
118 #else
119 static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
120 {
121         int tmp = ACCESS_ONCE(lock->slock);
122
123         return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
124 }
125
126 static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
127 {
128         int tmp = ACCESS_ONCE(lock->slock);
129
130         return (((tmp >> 16) - tmp) & 0xffff) > 1;
131 }
132
133 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
134 {
135         int inc = 0x00010000;
136         int tmp;
137
138         asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
139                      "movzwl %w0, %2\n\t"
140                      "shrl $16, %0\n\t"
141                      "1:\t"
142                      "cmpl %0, %2\n\t"
143                      "je 2f\n\t"
144                      "rep ; nop\n\t"
145                      "movzwl %1, %2\n\t"
146                      /* don't need lfence here, because loads are in-order */
147                      "jmp 1b\n"
148                      "2:"
149                      : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
150                      :
151                      : "memory", "cc");
152 }
153
154 static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
155 {
156         int tmp;
157         int new;
158
159         asm volatile("movl %2,%0\n\t"
160                      "movl %0,%1\n\t"
161                      "roll $16, %0\n\t"
162                      "cmpl %0,%1\n\t"
163                      "jne 1f\n\t"
164                      "addl $0x00010000, %1\n\t"
165                      LOCK_PREFIX "cmpxchgl %1,%2\n\t"
166                      "1:"
167                      "sete %b1\n\t"
168                      "movzbl %b1,%0\n\t"
169                      : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
170                      :
171                      : "memory", "cc");
172
173         return tmp;
174 }
175
176 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
177 {
178         asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
179                      : "+m" (lock->slock)
180                      :
181                      : "memory", "cc");
182 }
183 #endif
184
185 #ifdef CONFIG_PARAVIRT
186 /*
187  * Define virtualization-friendly old-style lock byte lock, for use in
188  * pv_lock_ops if desired.
189  *
190  * This differs from the pre-2.6.24 spinlock by always using xchgb
191  * rather than decb to take the lock; this allows it to use a
192  * zero-initialized lock structure.  It also maintains a 1-byte
193  * contention counter, so that we can implement
194  * __byte_spin_is_contended.
195  */
196 struct __byte_spinlock {
197         s8 lock;
198         s8 spinners;
199 };
200
201 static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
202 {
203         struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
204         return bl->lock != 0;
205 }
206
207 static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
208 {
209         struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
210         return bl->spinners != 0;
211 }
212
213 static inline void __byte_spin_lock(raw_spinlock_t *lock)
214 {
215         struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
216         s8 val = 1;
217
218         asm("1: xchgb %1, %0\n"
219             "   test %1,%1\n"
220             "   jz 3f\n"
221             "   " LOCK_PREFIX "incb %2\n"
222             "2: rep;nop\n"
223             "   cmpb $1, %0\n"
224             "   je 2b\n"
225             "   " LOCK_PREFIX "decb %2\n"
226             "   jmp 1b\n"
227             "3:"
228             : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
229 }
230
231 static inline int __byte_spin_trylock(raw_spinlock_t *lock)
232 {
233         struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
234         u8 old = 1;
235
236         asm("xchgb %1,%0"
237             : "+m" (bl->lock), "+q" (old) : : "memory");
238
239         return old == 0;
240 }
241
242 static inline void __byte_spin_unlock(raw_spinlock_t *lock)
243 {
244         struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
245         smp_wmb();
246         bl->lock = 0;
247 }
248 #else  /* !CONFIG_PARAVIRT */
249 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
250 {
251         return __ticket_spin_is_locked(lock);
252 }
253
254 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
255 {
256         return __ticket_spin_is_contended(lock);
257 }
258
259 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
260 {
261         __ticket_spin_lock(lock);
262 }
263
264 static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
265 {
266         return __ticket_spin_trylock(lock);
267 }
268
269 static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
270 {
271         __ticket_spin_unlock(lock);
272 }
273
274 static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
275                                                   unsigned long flags)
276 {
277         __raw_spin_lock(lock);
278 }
279
280 #endif  /* CONFIG_PARAVIRT */
281
282 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
283 {
284         while (__raw_spin_is_locked(lock))
285                 cpu_relax();
286 }
287
288 /*
289  * Read-write spinlocks, allowing multiple readers
290  * but only one writer.
291  *
292  * NOTE! it is quite common to have readers in interrupts
293  * but no interrupt writers. For those circumstances we
294  * can "mix" irq-safe locks - any writer needs to get a
295  * irq-safe write-lock, but readers can get non-irqsafe
296  * read-locks.
297  *
298  * On x86, we implement read-write locks as a 32-bit counter
299  * with the high bit (sign) being the "contended" bit.
300  */
301
302 /**
303  * read_can_lock - would read_trylock() succeed?
304  * @lock: the rwlock in question.
305  */
306 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
307 {
308         return (int)(lock)->lock > 0;
309 }
310
311 /**
312  * write_can_lock - would write_trylock() succeed?
313  * @lock: the rwlock in question.
314  */
315 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
316 {
317         return (lock)->lock == RW_LOCK_BIAS;
318 }
319
320 static inline void __raw_read_lock(raw_rwlock_t *rw)
321 {
322         asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
323                      "jns 1f\n"
324                      "call __read_lock_failed\n\t"
325                      "1:\n"
326                      ::LOCK_PTR_REG (rw) : "memory");
327 }
328
329 static inline void __raw_write_lock(raw_rwlock_t *rw)
330 {
331         asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
332                      "jz 1f\n"
333                      "call __write_lock_failed\n\t"
334                      "1:\n"
335                      ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
336 }
337
338 static inline int __raw_read_trylock(raw_rwlock_t *lock)
339 {
340         atomic_t *count = (atomic_t *)lock;
341
342         atomic_dec(count);
343         if (atomic_read(count) >= 0)
344                 return 1;
345         atomic_inc(count);
346         return 0;
347 }
348
349 static inline int __raw_write_trylock(raw_rwlock_t *lock)
350 {
351         atomic_t *count = (atomic_t *)lock;
352
353         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
354                 return 1;
355         atomic_add(RW_LOCK_BIAS, count);
356         return 0;
357 }
358
359 static inline void __raw_read_unlock(raw_rwlock_t *rw)
360 {
361         asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
362 }
363
364 static inline void __raw_write_unlock(raw_rwlock_t *rw)
365 {
366         asm volatile(LOCK_PREFIX "addl %1, %0"
367                      : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
368 }
369
370 #define _raw_spin_relax(lock)   cpu_relax()
371 #define _raw_read_relax(lock)   cpu_relax()
372 #define _raw_write_relax(lock)  cpu_relax()
373
374 #endif /* ASM_X86__SPINLOCK_H */