]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/uaccess_64.h
x86: introduce likely in macro.
[linux-2.6-omap-h63xx.git] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define VERIFY_READ 0
13 #define VERIFY_WRITE 1
14
15 /*
16  * The fs value determines whether argument validity checking should be
17  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18  * get_fs() == KERNEL_DS, checking is bypassed.
19  *
20  * For historical reasons, these macros are grossly misnamed.
21  */
22
23 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
27
28 #define get_ds()        (KERNEL_DS)
29 #define get_fs()        (current_thread_info()->addr_limit)
30 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
31
32 #define segment_eq(a, b)        ((a).seg == (b).seg)
33
34 #define __addr_ok(addr) (!((unsigned long)(addr) &                      \
35                            (current_thread_info()->addr_limit.seg)))
36
37 /*
38  * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39  */
40 #define __range_not_ok(addr, size)                                      \
41 ({                                                                      \
42         unsigned long flag, roksum;                                     \
43         __chk_user_ptr(addr);                                           \
44         asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"             \
45             : "=&r" (flag), "=r" (roksum)                               \
46             : "1" (addr), "g" ((long)(size)),                           \
47               "rm" (current_thread_info()->addr_limit.seg));            \
48         flag;                                                           \
49 })
50
51 #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
52
53 /*
54  * The exception table consists of pairs of addresses: the first is the
55  * address of an instruction that is allowed to fault, and the second is
56  * the address at which the program should continue.  No registers are
57  * modified, so it is entirely up to the continuation code to figure out
58  * what to do.
59  *
60  * All the routines below use bits of fixup code that are out of line
61  * with the main instruction path.  This means when everything is well,
62  * we don't even have to jump over them.  Further, they do not intrude
63  * on our cache or tlb entries.
64  */
65
66 struct exception_table_entry {
67         unsigned long insn, fixup;
68 };
69
70 extern int fixup_exception(struct pt_regs *regs);
71
72 #define ARCH_HAS_SEARCH_EXTABLE
73
74 /*
75  * These are the main single-value transfer routines.  They automatically
76  * use the right size if we just have the right pointer type.
77  *
78  * This gets kind of ugly. We want to return _two_ values in "get_user()"
79  * and yet we don't want to do any pointers, because that is too much
80  * of a performance impact. Thus we have a few rather ugly macros here,
81  * and hide all the ugliness from the user.
82  *
83  * The "__xxx" versions of the user access functions are versions that
84  * do not verify the address space, that must have been done previously
85  * with a separate "access_ok()" call (this is used when we do multiple
86  * accesses to the same area of user memory).
87  */
88
89 #define __get_user_x(size, ret, x, ptr)               \
90         asm volatile("call __get_user_" #size         \
91                      : "=a" (ret),"=d" (x)            \
92                      : "0" (ptr))                     \
93
94 /* Careful: we have to cast the result to the type of the pointer
95  * for sign reasons */
96
97 #define get_user(x, ptr)                                                \
98 ({                                                                      \
99         unsigned long __val_gu;                                         \
100         int __ret_gu;                                                   \
101         __chk_user_ptr(ptr);                                            \
102         switch (sizeof(*(ptr))) {                                       \
103         case 1:                                                         \
104                 __get_user_x(1, __ret_gu, __val_gu, ptr);               \
105                 break;                                                  \
106         case 2:                                                         \
107                 __get_user_x(2, __ret_gu, __val_gu, ptr);               \
108                 break;                                                  \
109         case 4:                                                         \
110                 __get_user_x(4, __ret_gu, __val_gu, ptr);               \
111                 break;                                                  \
112         case 8:                                                         \
113                 __get_user_x(8, __ret_gu, __val_gu, ptr);               \
114                 break;                                                  \
115         default:                                                        \
116                 __get_user_bad();                                       \
117                 break;                                                  \
118         }                                                               \
119         (x) = (__force typeof(*(ptr)))__val_gu;                         \
120         __ret_gu;                                                       \
121 })
122
123 extern void __put_user_1(void);
124 extern void __put_user_2(void);
125 extern void __put_user_4(void);
126 extern void __put_user_8(void);
127 extern void __put_user_bad(void);
128
129 #define __put_user_x(size, ret, x, ptr)                                 \
130         asm volatile("call __put_user_" #size                           \
131                      :"=a" (ret)                                        \
132                      :"c" (ptr),"a" (x)                                 \
133                      :"ebx")
134
135 #define put_user(x, ptr)                                                \
136         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
137
138 #define __get_user(x, ptr)                                              \
139         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
140 #define __put_user(x, ptr)                                              \
141         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
142
143 #define __get_user_unaligned __get_user
144 #define __put_user_unaligned __put_user
145
146 #define __put_user_nocheck(x, ptr, size)                \
147 ({                                                      \
148         int __pu_err;                                   \
149         __put_user_size((x), (ptr), (size), __pu_err);  \
150         __pu_err;                                       \
151 })
152
153
154 #define __put_user_check(x, ptr, size)                          \
155 ({                                                              \
156         int __pu_err;                                           \
157         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
158         switch (size) {                                         \
159         case 1:                                                 \
160                 __put_user_x(1, __pu_err, x, __pu_addr);        \
161                 break;                                          \
162         case 2:                                                 \
163                 __put_user_x(2, __pu_err, x, __pu_addr);        \
164                 break;                                          \
165         case 4:                                                 \
166                 __put_user_x(4, __pu_err, x, __pu_addr);        \
167                 break;                                          \
168         case 8:                                                 \
169                 __put_user_x(8, __pu_err, x, __pu_addr);        \
170                 break;                                          \
171         default:                                                \
172                 __put_user_bad();                               \
173         }                                                       \
174         __pu_err;                                               \
175 })
176
177 #define __put_user_size(x, ptr, size, retval)                           \
178 do {                                                                    \
179         retval = 0;                                                     \
180         __chk_user_ptr(ptr);                                            \
181         switch (size) {                                                 \
182         case 1:                                                         \
183                 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
184                 break;                                                  \
185         case 2:                                                         \
186                 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
187                 break;                                                  \
188         case 4:                                                         \
189                 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
190                 break;                                                  \
191         case 8:                                                         \
192                 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
193                 break;                                                  \
194         default:                                                        \
195                 __put_user_bad();                                       \
196         }                                                               \
197 } while (0)
198
199 /* FIXME: this hack is definitely wrong -AK */
200 struct __large_struct { unsigned long buf[100]; };
201 #define __m(x) (*(struct __large_struct __user *)(x))
202
203 /*
204  * Tell gcc we read from memory instead of writing: this is because
205  * we do not write to any memory gcc knows about, so there are no
206  * aliasing issues.
207  */
208 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
209         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
210                      "2:\n"                                             \
211                      ".section .fixup, \"ax\"\n"                        \
212                      "3:        mov %3,%0\n"                            \
213                      "  jmp 2b\n"                                       \
214                      ".previous\n"                                      \
215                      _ASM_EXTABLE(1b, 3b)                               \
216                      : "=r"(err)                                        \
217                      : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
218
219
220 #define __get_user_nocheck(x, ptr, size)                        \
221 ({                                                              \
222         int __gu_err;                                           \
223         unsigned long __gu_val;                                 \
224         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
225         (x) = (__force typeof(*(ptr)))__gu_val;                 \
226         __gu_err;                                               \
227 })
228
229 extern int __get_user_1(void);
230 extern int __get_user_2(void);
231 extern int __get_user_4(void);
232 extern int __get_user_8(void);
233 extern int __get_user_bad(void);
234
235 #define __get_user_size(x, ptr, size, retval)                           \
236 do {                                                                    \
237         retval = 0;                                                     \
238         __chk_user_ptr(ptr);                                            \
239         switch (size) {                                                 \
240         case 1:                                                         \
241                 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
242                 break;                                                  \
243         case 2:                                                         \
244                 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
245                 break;                                                  \
246         case 4:                                                         \
247                 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
248                 break;                                                  \
249         case 8:                                                         \
250                 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
251                 break;                                                  \
252         default:                                                        \
253                 (x) = __get_user_bad();                                 \
254         }                                                               \
255 } while (0)
256
257 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
258         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
259                      "2:\n"                                             \
260                      ".section .fixup, \"ax\"\n"                        \
261                      "3:        mov %3,%0\n"                            \
262                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
263                      "  jmp 2b\n"                                       \
264                      ".previous\n"                                      \
265                      _ASM_EXTABLE(1b, 3b)                               \
266                      : "=r" (err), ltype (x)                            \
267                      : "m" (__m(addr)), "i"(errno), "0"(err))
268
269 /*
270  * Copy To/From Userspace
271  */
272
273 /* Handles exceptions in both to and from, but doesn't do access_ok */
274 __must_check unsigned long
275 copy_user_generic(void *to, const void *from, unsigned len);
276
277 __must_check unsigned long
278 copy_to_user(void __user *to, const void *from, unsigned len);
279 __must_check unsigned long
280 copy_from_user(void *to, const void __user *from, unsigned len);
281 __must_check unsigned long
282 copy_in_user(void __user *to, const void __user *from, unsigned len);
283
284 static __always_inline __must_check
285 int __copy_from_user(void *dst, const void __user *src, unsigned size)
286 {
287         int ret = 0;
288         if (!__builtin_constant_p(size))
289                 return copy_user_generic(dst, (__force void *)src, size);
290         switch (size) {
291         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
292                               ret, "b", "b", "=q", 1);
293                 return ret;
294         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
295                               ret, "w", "w", "=r", 2);
296                 return ret;
297         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
298                               ret, "l", "k", "=r", 4);
299                 return ret;
300         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
301                               ret, "q", "", "=r", 8);
302                 return ret;
303         case 10:
304                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
305                                ret, "q", "", "=r", 16);
306                 if (unlikely(ret))
307                         return ret;
308                 __get_user_asm(*(u16 *)(8 + (char *)dst),
309                                (u16 __user *)(8 + (char __user *)src),
310                                ret, "w", "w", "=r", 2);
311                 return ret;
312         case 16:
313                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
314                                ret, "q", "", "=r", 16);
315                 if (unlikely(ret))
316                         return ret;
317                 __get_user_asm(*(u64 *)(8 + (char *)dst),
318                                (u64 __user *)(8 + (char __user *)src),
319                                ret, "q", "", "=r", 8);
320                 return ret;
321         default:
322                 return copy_user_generic(dst, (__force void *)src, size);
323         }
324 }
325
326 static __always_inline __must_check
327 int __copy_to_user(void __user *dst, const void *src, unsigned size)
328 {
329         int ret = 0;
330         if (!__builtin_constant_p(size))
331                 return copy_user_generic((__force void *)dst, src, size);
332         switch (size) {
333         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
334                               ret, "b", "b", "iq", 1);
335                 return ret;
336         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
337                               ret, "w", "w", "ir", 2);
338                 return ret;
339         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
340                               ret, "l", "k", "ir", 4);
341                 return ret;
342         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
343                               ret, "q", "", "ir", 8);
344                 return ret;
345         case 10:
346                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
347                                ret, "q", "", "ir", 10);
348                 if (unlikely(ret))
349                         return ret;
350                 asm("":::"memory");
351                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
352                                ret, "w", "w", "ir", 2);
353                 return ret;
354         case 16:
355                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
356                                ret, "q", "", "ir", 16);
357                 if (unlikely(ret))
358                         return ret;
359                 asm("":::"memory");
360                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
361                                ret, "q", "", "ir", 8);
362                 return ret;
363         default:
364                 return copy_user_generic((__force void *)dst, src, size);
365         }
366 }
367
368 static __always_inline __must_check
369 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
370 {
371         int ret = 0;
372         if (!__builtin_constant_p(size))
373                 return copy_user_generic((__force void *)dst,
374                                          (__force void *)src, size);
375         switch (size) {
376         case 1: {
377                 u8 tmp;
378                 __get_user_asm(tmp, (u8 __user *)src,
379                                ret, "b", "b", "=q", 1);
380                 if (likely(!ret))
381                         __put_user_asm(tmp, (u8 __user *)dst,
382                                        ret, "b", "b", "iq", 1);
383                 return ret;
384         }
385         case 2: {
386                 u16 tmp;
387                 __get_user_asm(tmp, (u16 __user *)src,
388                                ret, "w", "w", "=r", 2);
389                 if (likely(!ret))
390                         __put_user_asm(tmp, (u16 __user *)dst,
391                                        ret, "w", "w", "ir", 2);
392                 return ret;
393         }
394
395         case 4: {
396                 u32 tmp;
397                 __get_user_asm(tmp, (u32 __user *)src,
398                                ret, "l", "k", "=r", 4);
399                 if (likely(!ret))
400                         __put_user_asm(tmp, (u32 __user *)dst,
401                                        ret, "l", "k", "ir", 4);
402                 return ret;
403         }
404         case 8: {
405                 u64 tmp;
406                 __get_user_asm(tmp, (u64 __user *)src,
407                                ret, "q", "", "=r", 8);
408                 if (likely(!ret))
409                         __put_user_asm(tmp, (u64 __user *)dst,
410                                        ret, "q", "", "ir", 8);
411                 return ret;
412         }
413         default:
414                 return copy_user_generic((__force void *)dst,
415                                          (__force void *)src, size);
416         }
417 }
418
419 __must_check long
420 strncpy_from_user(char *dst, const char __user *src, long count);
421 __must_check long
422 __strncpy_from_user(char *dst, const char __user *src, long count);
423 __must_check long strnlen_user(const char __user *str, long n);
424 __must_check long __strnlen_user(const char __user *str, long n);
425 __must_check long strlen_user(const char __user *str);
426 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
427 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
428
429 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
430                                             unsigned size);
431
432 static __must_check __always_inline int
433 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
434 {
435         return copy_user_generic((__force void *)dst, src, size);
436 }
437
438 #define ARCH_HAS_NOCACHE_UACCESS 1
439 extern long __copy_user_nocache(void *dst, const void __user *src,
440                                 unsigned size, int zerorest);
441
442 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
443                                            unsigned size)
444 {
445         might_sleep();
446         return __copy_user_nocache(dst, src, size, 1);
447 }
448
449 static inline int __copy_from_user_inatomic_nocache(void *dst,
450                                                     const void __user *src,
451                                                     unsigned size)
452 {
453         return __copy_user_nocache(dst, src, size, 0);
454 }
455
456 #endif /* __X86_64_UACCESS_H */