]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/uaccess_64.h
243dbb467f3a1d8b697091af83a8bee226743578
[linux-2.6-omap-h63xx.git] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define __addr_ok(addr) (!((unsigned long)(addr) &                      \
13                            (current_thread_info()->addr_limit.seg)))
14
15 #define ARCH_HAS_SEARCH_EXTABLE
16
17 /* Careful: we have to cast the result to the type of the pointer
18  * for sign reasons */
19
20 #define get_user(x, ptr)                                                \
21 ({                                                                      \
22         unsigned long __val_gu;                                         \
23         int __ret_gu;                                                   \
24         __chk_user_ptr(ptr);                                            \
25         switch (sizeof(*(ptr))) {                                       \
26         case 1:                                                         \
27                 __get_user_x(1, __ret_gu, __val_gu, ptr);               \
28                 break;                                                  \
29         case 2:                                                         \
30                 __get_user_x(2, __ret_gu, __val_gu, ptr);               \
31                 break;                                                  \
32         case 4:                                                         \
33                 __get_user_x(4, __ret_gu, __val_gu, ptr);               \
34                 break;                                                  \
35         case 8:                                                         \
36                 __get_user_x(8, __ret_gu, __val_gu, ptr);               \
37                 break;                                                  \
38         default:                                                        \
39                 __get_user_bad();                                       \
40                 break;                                                  \
41         }                                                               \
42         (x) = (__force typeof(*(ptr)))__val_gu;                         \
43         __ret_gu;                                                       \
44 })
45
46 extern void __put_user_1(void);
47 extern void __put_user_2(void);
48 extern void __put_user_4(void);
49 extern void __put_user_8(void);
50 extern void __put_user_bad(void);
51
52 #define __put_user_x(size, ret, x, ptr)                                 \
53         asm volatile("call __put_user_" #size                           \
54                      :"=a" (ret)                                        \
55                      :"c" (ptr),"a" (x)                                 \
56                      :"ebx")
57
58 #define put_user(x, ptr)                                                \
59         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
60
61 #define __get_user(x, ptr)                                              \
62         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
63 #define __put_user(x, ptr)                                              \
64         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
65
66 #define __get_user_unaligned __get_user
67 #define __put_user_unaligned __put_user
68
69 #define __put_user_nocheck(x, ptr, size)                \
70 ({                                                      \
71         int __pu_err;                                   \
72         __put_user_size((x), (ptr), (size), __pu_err);  \
73         __pu_err;                                       \
74 })
75
76
77 #define __put_user_check(x, ptr, size)                          \
78 ({                                                              \
79         int __pu_err;                                           \
80         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
81         switch (size) {                                         \
82         case 1:                                                 \
83                 __put_user_x(1, __pu_err, x, __pu_addr);        \
84                 break;                                          \
85         case 2:                                                 \
86                 __put_user_x(2, __pu_err, x, __pu_addr);        \
87                 break;                                          \
88         case 4:                                                 \
89                 __put_user_x(4, __pu_err, x, __pu_addr);        \
90                 break;                                          \
91         case 8:                                                 \
92                 __put_user_x(8, __pu_err, x, __pu_addr);        \
93                 break;                                          \
94         default:                                                \
95                 __put_user_bad();                               \
96         }                                                       \
97         __pu_err;                                               \
98 })
99
100 #define __put_user_size(x, ptr, size, retval)                           \
101 do {                                                                    \
102         retval = 0;                                                     \
103         __chk_user_ptr(ptr);                                            \
104         switch (size) {                                                 \
105         case 1:                                                         \
106                 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
107                 break;                                                  \
108         case 2:                                                         \
109                 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
110                 break;                                                  \
111         case 4:                                                         \
112                 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
113                 break;                                                  \
114         case 8:                                                         \
115                 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
116                 break;                                                  \
117         default:                                                        \
118                 __put_user_bad();                                       \
119         }                                                               \
120 } while (0)
121
122 /* FIXME: this hack is definitely wrong -AK */
123 struct __large_struct { unsigned long buf[100]; };
124 #define __m(x) (*(struct __large_struct __user *)(x))
125
126 /*
127  * Tell gcc we read from memory instead of writing: this is because
128  * we do not write to any memory gcc knows about, so there are no
129  * aliasing issues.
130  */
131 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
132         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
133                      "2:\n"                                             \
134                      ".section .fixup, \"ax\"\n"                        \
135                      "3:        mov %3,%0\n"                            \
136                      "  jmp 2b\n"                                       \
137                      ".previous\n"                                      \
138                      _ASM_EXTABLE(1b, 3b)                               \
139                      : "=r"(err)                                        \
140                      : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
141
142
143 #define __get_user_nocheck(x, ptr, size)                        \
144 ({                                                              \
145         int __gu_err;                                           \
146         unsigned long __gu_val;                                 \
147         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
148         (x) = (__force typeof(*(ptr)))__gu_val;                 \
149         __gu_err;                                               \
150 })
151
152 #define __get_user_size(x, ptr, size, retval)                           \
153 do {                                                                    \
154         retval = 0;                                                     \
155         __chk_user_ptr(ptr);                                            \
156         switch (size) {                                                 \
157         case 1:                                                         \
158                 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
159                 break;                                                  \
160         case 2:                                                         \
161                 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
162                 break;                                                  \
163         case 4:                                                         \
164                 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
165                 break;                                                  \
166         case 8:                                                         \
167                 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
168                 break;                                                  \
169         default:                                                        \
170                 (x) = __get_user_bad();                                 \
171         }                                                               \
172 } while (0)
173
174 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
175         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
176                      "2:\n"                                             \
177                      ".section .fixup, \"ax\"\n"                        \
178                      "3:        mov %3,%0\n"                            \
179                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
180                      "  jmp 2b\n"                                       \
181                      ".previous\n"                                      \
182                      _ASM_EXTABLE(1b, 3b)                               \
183                      : "=r" (err), ltype (x)                            \
184                      : "m" (__m(addr)), "i"(errno), "0"(err))
185
186 /*
187  * Copy To/From Userspace
188  */
189
190 /* Handles exceptions in both to and from, but doesn't do access_ok */
191 __must_check unsigned long
192 copy_user_generic(void *to, const void *from, unsigned len);
193
194 __must_check unsigned long
195 copy_to_user(void __user *to, const void *from, unsigned len);
196 __must_check unsigned long
197 copy_from_user(void *to, const void __user *from, unsigned len);
198 __must_check unsigned long
199 copy_in_user(void __user *to, const void __user *from, unsigned len);
200
201 static __always_inline __must_check
202 int __copy_from_user(void *dst, const void __user *src, unsigned size)
203 {
204         int ret = 0;
205         if (!__builtin_constant_p(size))
206                 return copy_user_generic(dst, (__force void *)src, size);
207         switch (size) {
208         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
209                               ret, "b", "b", "=q", 1);
210                 return ret;
211         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
212                               ret, "w", "w", "=r", 2);
213                 return ret;
214         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
215                               ret, "l", "k", "=r", 4);
216                 return ret;
217         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
218                               ret, "q", "", "=r", 8);
219                 return ret;
220         case 10:
221                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
222                                ret, "q", "", "=r", 16);
223                 if (unlikely(ret))
224                         return ret;
225                 __get_user_asm(*(u16 *)(8 + (char *)dst),
226                                (u16 __user *)(8 + (char __user *)src),
227                                ret, "w", "w", "=r", 2);
228                 return ret;
229         case 16:
230                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
231                                ret, "q", "", "=r", 16);
232                 if (unlikely(ret))
233                         return ret;
234                 __get_user_asm(*(u64 *)(8 + (char *)dst),
235                                (u64 __user *)(8 + (char __user *)src),
236                                ret, "q", "", "=r", 8);
237                 return ret;
238         default:
239                 return copy_user_generic(dst, (__force void *)src, size);
240         }
241 }
242
243 static __always_inline __must_check
244 int __copy_to_user(void __user *dst, const void *src, unsigned size)
245 {
246         int ret = 0;
247         if (!__builtin_constant_p(size))
248                 return copy_user_generic((__force void *)dst, src, size);
249         switch (size) {
250         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
251                               ret, "b", "b", "iq", 1);
252                 return ret;
253         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
254                               ret, "w", "w", "ir", 2);
255                 return ret;
256         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
257                               ret, "l", "k", "ir", 4);
258                 return ret;
259         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
260                               ret, "q", "", "ir", 8);
261                 return ret;
262         case 10:
263                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
264                                ret, "q", "", "ir", 10);
265                 if (unlikely(ret))
266                         return ret;
267                 asm("":::"memory");
268                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
269                                ret, "w", "w", "ir", 2);
270                 return ret;
271         case 16:
272                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
273                                ret, "q", "", "ir", 16);
274                 if (unlikely(ret))
275                         return ret;
276                 asm("":::"memory");
277                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
278                                ret, "q", "", "ir", 8);
279                 return ret;
280         default:
281                 return copy_user_generic((__force void *)dst, src, size);
282         }
283 }
284
285 static __always_inline __must_check
286 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
287 {
288         int ret = 0;
289         if (!__builtin_constant_p(size))
290                 return copy_user_generic((__force void *)dst,
291                                          (__force void *)src, size);
292         switch (size) {
293         case 1: {
294                 u8 tmp;
295                 __get_user_asm(tmp, (u8 __user *)src,
296                                ret, "b", "b", "=q", 1);
297                 if (likely(!ret))
298                         __put_user_asm(tmp, (u8 __user *)dst,
299                                        ret, "b", "b", "iq", 1);
300                 return ret;
301         }
302         case 2: {
303                 u16 tmp;
304                 __get_user_asm(tmp, (u16 __user *)src,
305                                ret, "w", "w", "=r", 2);
306                 if (likely(!ret))
307                         __put_user_asm(tmp, (u16 __user *)dst,
308                                        ret, "w", "w", "ir", 2);
309                 return ret;
310         }
311
312         case 4: {
313                 u32 tmp;
314                 __get_user_asm(tmp, (u32 __user *)src,
315                                ret, "l", "k", "=r", 4);
316                 if (likely(!ret))
317                         __put_user_asm(tmp, (u32 __user *)dst,
318                                        ret, "l", "k", "ir", 4);
319                 return ret;
320         }
321         case 8: {
322                 u64 tmp;
323                 __get_user_asm(tmp, (u64 __user *)src,
324                                ret, "q", "", "=r", 8);
325                 if (likely(!ret))
326                         __put_user_asm(tmp, (u64 __user *)dst,
327                                        ret, "q", "", "ir", 8);
328                 return ret;
329         }
330         default:
331                 return copy_user_generic((__force void *)dst,
332                                          (__force void *)src, size);
333         }
334 }
335
336 __must_check long
337 strncpy_from_user(char *dst, const char __user *src, long count);
338 __must_check long
339 __strncpy_from_user(char *dst, const char __user *src, long count);
340 __must_check long strnlen_user(const char __user *str, long n);
341 __must_check long __strnlen_user(const char __user *str, long n);
342 __must_check long strlen_user(const char __user *str);
343 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
344 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
345
346 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
347                                             unsigned size);
348
349 static __must_check __always_inline int
350 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
351 {
352         return copy_user_generic((__force void *)dst, src, size);
353 }
354
355 #define ARCH_HAS_NOCACHE_UACCESS 1
356 extern long __copy_user_nocache(void *dst, const void __user *src,
357                                 unsigned size, int zerorest);
358
359 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
360                                            unsigned size)
361 {
362         might_sleep();
363         return __copy_user_nocache(dst, src, size, 1);
364 }
365
366 static inline int __copy_from_user_inatomic_nocache(void *dst,
367                                                     const void __user *src,
368                                                     unsigned size)
369 {
370         return __copy_user_nocache(dst, src, size, 0);
371 }
372
373 #endif /* __X86_64_UACCESS_H */