]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-ia64/intrinsics.h
[IA64] pvops: preparation: introduce ia64_set_rr0_to_rr4() to make kernel paravirtual...
[linux-2.6-omap-h63xx.git] / include / asm-ia64 / intrinsics.h
1 #ifndef _ASM_IA64_INTRINSICS_H
2 #define _ASM_IA64_INTRINSICS_H
3
4 /*
5  * Compiler-dependent intrinsics.
6  *
7  * Copyright (C) 2002-2003 Hewlett-Packard Co
8  *      David Mosberger-Tang <davidm@hpl.hp.com>
9  */
10
11 #ifndef __ASSEMBLY__
12
13 /* include compiler specific intrinsics */
14 #include <asm/ia64regs.h>
15 #ifdef __INTEL_COMPILER
16 # include <asm/intel_intrin.h>
17 #else
18 # include <asm/gcc_intrin.h>
19 #endif
20
21 #define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4)       \
22 do {                                                            \
23         ia64_set_rr(0x0000000000000000UL, (val0));              \
24         ia64_set_rr(0x2000000000000000UL, (val1));              \
25         ia64_set_rr(0x4000000000000000UL, (val2));              \
26         ia64_set_rr(0x6000000000000000UL, (val3));              \
27         ia64_set_rr(0x8000000000000000UL, (val4));              \
28 } while (0)
29
30 /*
31  * Force an unresolved reference if someone tries to use
32  * ia64_fetch_and_add() with a bad value.
33  */
34 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
35 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
36
37 #define IA64_FETCHADD(tmp,v,n,sz,sem)                                           \
38 ({                                                                              \
39         switch (sz) {                                                           \
40               case 4:                                                           \
41                 tmp = ia64_fetchadd4_##sem((unsigned int *) v, n);              \
42                 break;                                                          \
43                                                                                 \
44               case 8:                                                           \
45                 tmp = ia64_fetchadd8_##sem((unsigned long *) v, n);             \
46                 break;                                                          \
47                                                                                 \
48               default:                                                          \
49                 __bad_size_for_ia64_fetch_and_add();                            \
50         }                                                                       \
51 })
52
53 #define ia64_fetchadd(i,v,sem)                                                          \
54 ({                                                                                      \
55         __u64 _tmp;                                                                     \
56         volatile __typeof__(*(v)) *_v = (v);                                            \
57         /* Can't use a switch () here: gcc isn't always smart enough for that... */     \
58         if ((i) == -16)                                                                 \
59                 IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);                        \
60         else if ((i) == -8)                                                             \
61                 IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);                         \
62         else if ((i) == -4)                                                             \
63                 IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);                         \
64         else if ((i) == -1)                                                             \
65                 IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);                         \
66         else if ((i) == 1)                                                              \
67                 IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);                          \
68         else if ((i) == 4)                                                              \
69                 IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);                          \
70         else if ((i) == 8)                                                              \
71                 IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);                          \
72         else if ((i) == 16)                                                             \
73                 IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);                         \
74         else                                                                            \
75                 _tmp = __bad_increment_for_ia64_fetch_and_add();                        \
76         (__typeof__(*(v))) (_tmp);      /* return old value */                          \
77 })
78
79 #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
80
81 /*
82  * This function doesn't exist, so you'll get a linker error if
83  * something tries to do an invalid xchg().
84  */
85 extern void ia64_xchg_called_with_bad_pointer (void);
86
87 #define __xchg(x,ptr,size)                                              \
88 ({                                                                      \
89         unsigned long __xchg_result;                                    \
90                                                                         \
91         switch (size) {                                                 \
92               case 1:                                                   \
93                 __xchg_result = ia64_xchg1((__u8 *)ptr, x);             \
94                 break;                                                  \
95                                                                         \
96               case 2:                                                   \
97                 __xchg_result = ia64_xchg2((__u16 *)ptr, x);            \
98                 break;                                                  \
99                                                                         \
100               case 4:                                                   \
101                 __xchg_result = ia64_xchg4((__u32 *)ptr, x);            \
102                 break;                                                  \
103                                                                         \
104               case 8:                                                   \
105                 __xchg_result = ia64_xchg8((__u64 *)ptr, x);            \
106                 break;                                                  \
107               default:                                                  \
108                 ia64_xchg_called_with_bad_pointer();                    \
109         }                                                               \
110         __xchg_result;                                                  \
111 })
112
113 #define xchg(ptr,x)                                                          \
114   ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
115
116 /*
117  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
118  * store NEW in MEM.  Return the initial value in MEM.  Success is
119  * indicated by comparing RETURN with OLD.
120  */
121
122 #define __HAVE_ARCH_CMPXCHG 1
123
124 /*
125  * This function doesn't exist, so you'll get a linker error
126  * if something tries to do an invalid cmpxchg().
127  */
128 extern long ia64_cmpxchg_called_with_bad_pointer (void);
129
130 #define ia64_cmpxchg(sem,ptr,old,new,size)                                              \
131 ({                                                                                      \
132         __u64 _o_, _r_;                                                                 \
133                                                                                         \
134         switch (size) {                                                                 \
135               case 1: _o_ = (__u8 ) (long) (old); break;                                \
136               case 2: _o_ = (__u16) (long) (old); break;                                \
137               case 4: _o_ = (__u32) (long) (old); break;                                \
138               case 8: _o_ = (__u64) (long) (old); break;                                \
139               default: break;                                                           \
140         }                                                                               \
141         switch (size) {                                                                 \
142               case 1:                                                                   \
143                 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);                      \
144                 break;                                                                  \
145                                                                                         \
146               case 2:                                                                   \
147                _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);                      \
148                 break;                                                                  \
149                                                                                         \
150               case 4:                                                                   \
151                 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);                     \
152                 break;                                                                  \
153                                                                                         \
154               case 8:                                                                   \
155                 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);                     \
156                 break;                                                                  \
157                                                                                         \
158               default:                                                                  \
159                 _r_ = ia64_cmpxchg_called_with_bad_pointer();                           \
160                 break;                                                                  \
161         }                                                                               \
162         (__typeof__(old)) _r_;                                                          \
163 })
164
165 #define cmpxchg_acq(ptr, o, n)  \
166         ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
167 #define cmpxchg_rel(ptr, o, n)  \
168         ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
169
170 /* for compatibility with other platforms: */
171 #define cmpxchg(ptr, o, n)      cmpxchg_acq((ptr), (o), (n))
172 #define cmpxchg64(ptr, o, n)    cmpxchg_acq((ptr), (o), (n))
173
174 #define cmpxchg_local           cmpxchg
175 #define cmpxchg64_local         cmpxchg64
176
177 #ifdef CONFIG_IA64_DEBUG_CMPXCHG
178 # define CMPXCHG_BUGCHECK_DECL  int _cmpxchg_bugcheck_count = 128;
179 # define CMPXCHG_BUGCHECK(v)                                                    \
180   do {                                                                          \
181         if (_cmpxchg_bugcheck_count-- <= 0) {                                   \
182                 void *ip;                                                       \
183                 extern int printk(const char *fmt, ...);                        \
184                 ip = (void *) ia64_getreg(_IA64_REG_IP);                        \
185                 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));  \
186                 break;                                                          \
187         }                                                                       \
188   } while (0)
189 #else /* !CONFIG_IA64_DEBUG_CMPXCHG */
190 # define CMPXCHG_BUGCHECK_DECL
191 # define CMPXCHG_BUGCHECK(v)
192 #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
193
194 #endif
195 #endif /* _ASM_IA64_INTRINSICS_H */