]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-alpha/atomic.h
atomic.h: add atomic64 cmpxchg, xchg and add_unless to alpha
[linux-2.6-omap-h63xx.git] / include / asm-alpha / atomic.h
1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
3
4 #include <asm/barrier.h>
5
6 /*
7  * Atomic operations that C can't guarantee us.  Useful for
8  * resource counting etc...
9  *
10  * But use these as seldom as possible since they are much slower
11  * than regular operations.
12  */
13
14
15 /*
16  * Counter is volatile to make sure gcc doesn't try to be clever
17  * and move things around on us. We need to use _exactly_ the address
18  * the user gave us, not some alias that contains the same information.
19  */
20 typedef struct { volatile int counter; } atomic_t;
21 typedef struct { volatile long counter; } atomic64_t;
22
23 #define ATOMIC_INIT(i)          ( (atomic_t) { (i) } )
24 #define ATOMIC64_INIT(i)        ( (atomic64_t) { (i) } )
25
26 #define atomic_read(v)          ((v)->counter + 0)
27 #define atomic64_read(v)        ((v)->counter + 0)
28
29 #define atomic_set(v,i)         ((v)->counter = (i))
30 #define atomic64_set(v,i)       ((v)->counter = (i))
31
32 /*
33  * To get proper branch prediction for the main line, we must branch
34  * forward to code at the end of this object's .text section, then
35  * branch back to restart the operation.
36  */
37
38 static __inline__ void atomic_add(int i, atomic_t * v)
39 {
40         unsigned long temp;
41         __asm__ __volatile__(
42         "1:     ldl_l %0,%1\n"
43         "       addl %0,%2,%0\n"
44         "       stl_c %0,%1\n"
45         "       beq %0,2f\n"
46         ".subsection 2\n"
47         "2:     br 1b\n"
48         ".previous"
49         :"=&r" (temp), "=m" (v->counter)
50         :"Ir" (i), "m" (v->counter));
51 }
52
53 static __inline__ void atomic64_add(long i, atomic64_t * v)
54 {
55         unsigned long temp;
56         __asm__ __volatile__(
57         "1:     ldq_l %0,%1\n"
58         "       addq %0,%2,%0\n"
59         "       stq_c %0,%1\n"
60         "       beq %0,2f\n"
61         ".subsection 2\n"
62         "2:     br 1b\n"
63         ".previous"
64         :"=&r" (temp), "=m" (v->counter)
65         :"Ir" (i), "m" (v->counter));
66 }
67
68 static __inline__ void atomic_sub(int i, atomic_t * v)
69 {
70         unsigned long temp;
71         __asm__ __volatile__(
72         "1:     ldl_l %0,%1\n"
73         "       subl %0,%2,%0\n"
74         "       stl_c %0,%1\n"
75         "       beq %0,2f\n"
76         ".subsection 2\n"
77         "2:     br 1b\n"
78         ".previous"
79         :"=&r" (temp), "=m" (v->counter)
80         :"Ir" (i), "m" (v->counter));
81 }
82
83 static __inline__ void atomic64_sub(long i, atomic64_t * v)
84 {
85         unsigned long temp;
86         __asm__ __volatile__(
87         "1:     ldq_l %0,%1\n"
88         "       subq %0,%2,%0\n"
89         "       stq_c %0,%1\n"
90         "       beq %0,2f\n"
91         ".subsection 2\n"
92         "2:     br 1b\n"
93         ".previous"
94         :"=&r" (temp), "=m" (v->counter)
95         :"Ir" (i), "m" (v->counter));
96 }
97
98
99 /*
100  * Same as above, but return the result value
101  */
102 static __inline__ long atomic_add_return(int i, atomic_t * v)
103 {
104         long temp, result;
105         smp_mb();
106         __asm__ __volatile__(
107         "1:     ldl_l %0,%1\n"
108         "       addl %0,%3,%2\n"
109         "       addl %0,%3,%0\n"
110         "       stl_c %0,%1\n"
111         "       beq %0,2f\n"
112         ".subsection 2\n"
113         "2:     br 1b\n"
114         ".previous"
115         :"=&r" (temp), "=m" (v->counter), "=&r" (result)
116         :"Ir" (i), "m" (v->counter) : "memory");
117         smp_mb();
118         return result;
119 }
120
121 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
122 {
123         long temp, result;
124         smp_mb();
125         __asm__ __volatile__(
126         "1:     ldq_l %0,%1\n"
127         "       addq %0,%3,%2\n"
128         "       addq %0,%3,%0\n"
129         "       stq_c %0,%1\n"
130         "       beq %0,2f\n"
131         ".subsection 2\n"
132         "2:     br 1b\n"
133         ".previous"
134         :"=&r" (temp), "=m" (v->counter), "=&r" (result)
135         :"Ir" (i), "m" (v->counter) : "memory");
136         smp_mb();
137         return result;
138 }
139
140 static __inline__ long atomic_sub_return(int i, atomic_t * v)
141 {
142         long temp, result;
143         smp_mb();
144         __asm__ __volatile__(
145         "1:     ldl_l %0,%1\n"
146         "       subl %0,%3,%2\n"
147         "       subl %0,%3,%0\n"
148         "       stl_c %0,%1\n"
149         "       beq %0,2f\n"
150         ".subsection 2\n"
151         "2:     br 1b\n"
152         ".previous"
153         :"=&r" (temp), "=m" (v->counter), "=&r" (result)
154         :"Ir" (i), "m" (v->counter) : "memory");
155         smp_mb();
156         return result;
157 }
158
159 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
160 {
161         long temp, result;
162         smp_mb();
163         __asm__ __volatile__(
164         "1:     ldq_l %0,%1\n"
165         "       subq %0,%3,%2\n"
166         "       subq %0,%3,%0\n"
167         "       stq_c %0,%1\n"
168         "       beq %0,2f\n"
169         ".subsection 2\n"
170         "2:     br 1b\n"
171         ".previous"
172         :"=&r" (temp), "=m" (v->counter), "=&r" (result)
173         :"Ir" (i), "m" (v->counter) : "memory");
174         smp_mb();
175         return result;
176 }
177
178 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
179 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
180
181 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
182 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
183
184 /**
185  * atomic_add_unless - add unless the number is a given value
186  * @v: pointer of type atomic_t
187  * @a: the amount to add to v...
188  * @u: ...unless v is equal to u.
189  *
190  * Atomically adds @a to @v, so long as it was not @u.
191  * Returns non-zero if @v was not @u, and zero otherwise.
192  */
193 #define atomic_add_unless(v, a, u)                              \
194 ({                                                              \
195         __typeof__((v)->counter) c, old;                        \
196         c = atomic_read(v);                                     \
197         for (;;) {                                              \
198                 if (unlikely(c == (u)))                         \
199                         break;                                  \
200                 old = atomic_cmpxchg((v), c, c + (a));          \
201                 if (likely(old == c))                           \
202                         break;                                  \
203                 c = old;                                        \
204         }                                                       \
205         c != (u);                                               \
206 })
207 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
208
209 /**
210  * atomic64_add_unless - add unless the number is a given value
211  * @v: pointer of type atomic64_t
212  * @a: the amount to add to v...
213  * @u: ...unless v is equal to u.
214  *
215  * Atomically adds @a to @v, so long as it was not @u.
216  * Returns non-zero if @v was not @u, and zero otherwise.
217  */
218 #define atomic64_add_unless(v, a, u)                            \
219 ({                                                              \
220         __typeof__((v)->counter) c, old;                        \
221         c = atomic64_read(v);                                   \
222         for (;;) {                                              \
223                 if (unlikely(c == (u)))                         \
224                         break;                                  \
225                 old = atomic64_cmpxchg((v), c, c + (a));        \
226                 if (likely(old == c))                           \
227                         break;                                  \
228                 c = old;                                        \
229         }                                                       \
230         c != (u);                                               \
231 })
232 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
233
234 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
235 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
236
237 #define atomic_dec_return(v) atomic_sub_return(1,(v))
238 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
239
240 #define atomic_inc_return(v) atomic_add_return(1,(v))
241 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
242
243 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
244 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
245
246 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
247 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
248
249 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
250 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
251
252 #define atomic_inc(v) atomic_add(1,(v))
253 #define atomic64_inc(v) atomic64_add(1,(v))
254
255 #define atomic_dec(v) atomic_sub(1,(v))
256 #define atomic64_dec(v) atomic64_sub(1,(v))
257
258 #define smp_mb__before_atomic_dec()     smp_mb()
259 #define smp_mb__after_atomic_dec()      smp_mb()
260 #define smp_mb__before_atomic_inc()     smp_mb()
261 #define smp_mb__after_atomic_inc()      smp_mb()
262
263 #include <asm-generic/atomic.h>
264 #endif /* _ALPHA_ATOMIC_H */