]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/uaccess_64.h
6532d638d5a37bc8d1e9920cc690c2d5f4c66990
[linux-2.6-omap-h63xx.git] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define ARCH_HAS_SEARCH_EXTABLE
13
14 extern void __put_user_1(void);
15 extern void __put_user_2(void);
16 extern void __put_user_4(void);
17 extern void __put_user_8(void);
18 extern void __put_user_bad(void);
19
20 #define __put_user_x(size, ret, x, ptr)                                 \
21         asm volatile("call __put_user_" #size                           \
22                      :"=a" (ret)                                        \
23                      :"c" (ptr),"a" (x)                                 \
24                      :"ebx")
25
26 #define put_user(x, ptr)                                                \
27         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
28
29 #define __get_user(x, ptr)                                              \
30         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
31 #define __put_user(x, ptr)                                              \
32         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
33
34 #define __get_user_unaligned __get_user
35 #define __put_user_unaligned __put_user
36
37 #define __put_user_nocheck(x, ptr, size)                \
38 ({                                                      \
39         int __pu_err;                                   \
40         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
41         __pu_err;                                       \
42 })
43
44
45 #define __put_user_check(x, ptr, size)                          \
46 ({                                                              \
47         int __pu_err;                                           \
48         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
49         switch (size) {                                         \
50         case 1:                                                 \
51                 __put_user_x(1, __pu_err, x, __pu_addr);        \
52                 break;                                          \
53         case 2:                                                 \
54                 __put_user_x(2, __pu_err, x, __pu_addr);        \
55                 break;                                          \
56         case 4:                                                 \
57                 __put_user_x(4, __pu_err, x, __pu_addr);        \
58                 break;                                          \
59         case 8:                                                 \
60                 __put_user_x(8, __pu_err, x, __pu_addr);        \
61                 break;                                          \
62         default:                                                \
63                 __put_user_bad();                               \
64         }                                                       \
65         __pu_err;                                               \
66 })
67
68 #define __put_user_size(x, ptr, size, retval, errret)                   \
69 do {                                                                    \
70         retval = 0;                                                     \
71         __chk_user_ptr(ptr);                                            \
72         switch (size) {                                                 \
73         case 1:                                                         \
74                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret);\
75                 break;                                                  \
76         case 2:                                                         \
77                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret);\
78                 break;                                                  \
79         case 4:                                                         \
80                 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
81                 break;                                                  \
82         case 8:                                                         \
83                 __put_user_asm(x, ptr, retval, "q", "", "Zr", errret);  \
84                 break;                                                  \
85         default:                                                        \
86                 __put_user_bad();                                       \
87         }                                                               \
88 } while (0)
89
90 /* FIXME: this hack is definitely wrong -AK */
91 struct __large_struct { unsigned long buf[100]; };
92 #define __m(x) (*(struct __large_struct __user *)(x))
93
94 /*
95  * Tell gcc we read from memory instead of writing: this is because
96  * we do not write to any memory gcc knows about, so there are no
97  * aliasing issues.
98  */
99 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
100         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
101                      "2:\n"                                             \
102                      ".section .fixup, \"ax\"\n"                        \
103                      "3:        mov %3,%0\n"                            \
104                      "  jmp 2b\n"                                       \
105                      ".previous\n"                                      \
106                      _ASM_EXTABLE(1b, 3b)                               \
107                      : "=r"(err)                                        \
108                      : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
109
110
111 #define __get_user_nocheck(x, ptr, size)                        \
112 ({                                                              \
113         int __gu_err;                                           \
114         unsigned long __gu_val;                                 \
115         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
116         (x) = (__force typeof(*(ptr)))__gu_val;                 \
117         __gu_err;                                               \
118 })
119
120 #define __get_user_size(x, ptr, size, retval)                           \
121 do {                                                                    \
122         retval = 0;                                                     \
123         __chk_user_ptr(ptr);                                            \
124         switch (size) {                                                 \
125         case 1:                                                         \
126                 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
127                 break;                                                  \
128         case 2:                                                         \
129                 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
130                 break;                                                  \
131         case 4:                                                         \
132                 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
133                 break;                                                  \
134         case 8:                                                         \
135                 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
136                 break;                                                  \
137         default:                                                        \
138                 (x) = __get_user_bad();                                 \
139         }                                                               \
140 } while (0)
141
142 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
143         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
144                      "2:\n"                                             \
145                      ".section .fixup, \"ax\"\n"                        \
146                      "3:        mov %3,%0\n"                            \
147                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
148                      "  jmp 2b\n"                                       \
149                      ".previous\n"                                      \
150                      _ASM_EXTABLE(1b, 3b)                               \
151                      : "=r" (err), ltype (x)                            \
152                      : "m" (__m(addr)), "i"(errno), "0"(err))
153
154 /*
155  * Copy To/From Userspace
156  */
157
158 /* Handles exceptions in both to and from, but doesn't do access_ok */
159 __must_check unsigned long
160 copy_user_generic(void *to, const void *from, unsigned len);
161
162 __must_check unsigned long
163 copy_to_user(void __user *to, const void *from, unsigned len);
164 __must_check unsigned long
165 copy_from_user(void *to, const void __user *from, unsigned len);
166 __must_check unsigned long
167 copy_in_user(void __user *to, const void __user *from, unsigned len);
168
169 static __always_inline __must_check
170 int __copy_from_user(void *dst, const void __user *src, unsigned size)
171 {
172         int ret = 0;
173         if (!__builtin_constant_p(size))
174                 return copy_user_generic(dst, (__force void *)src, size);
175         switch (size) {
176         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
177                               ret, "b", "b", "=q", 1);
178                 return ret;
179         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
180                               ret, "w", "w", "=r", 2);
181                 return ret;
182         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
183                               ret, "l", "k", "=r", 4);
184                 return ret;
185         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
186                               ret, "q", "", "=r", 8);
187                 return ret;
188         case 10:
189                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
190                                ret, "q", "", "=r", 16);
191                 if (unlikely(ret))
192                         return ret;
193                 __get_user_asm(*(u16 *)(8 + (char *)dst),
194                                (u16 __user *)(8 + (char __user *)src),
195                                ret, "w", "w", "=r", 2);
196                 return ret;
197         case 16:
198                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
199                                ret, "q", "", "=r", 16);
200                 if (unlikely(ret))
201                         return ret;
202                 __get_user_asm(*(u64 *)(8 + (char *)dst),
203                                (u64 __user *)(8 + (char __user *)src),
204                                ret, "q", "", "=r", 8);
205                 return ret;
206         default:
207                 return copy_user_generic(dst, (__force void *)src, size);
208         }
209 }
210
211 static __always_inline __must_check
212 int __copy_to_user(void __user *dst, const void *src, unsigned size)
213 {
214         int ret = 0;
215         if (!__builtin_constant_p(size))
216                 return copy_user_generic((__force void *)dst, src, size);
217         switch (size) {
218         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
219                               ret, "b", "b", "iq", 1);
220                 return ret;
221         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
222                               ret, "w", "w", "ir", 2);
223                 return ret;
224         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
225                               ret, "l", "k", "ir", 4);
226                 return ret;
227         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
228                               ret, "q", "", "ir", 8);
229                 return ret;
230         case 10:
231                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
232                                ret, "q", "", "ir", 10);
233                 if (unlikely(ret))
234                         return ret;
235                 asm("":::"memory");
236                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
237                                ret, "w", "w", "ir", 2);
238                 return ret;
239         case 16:
240                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
241                                ret, "q", "", "ir", 16);
242                 if (unlikely(ret))
243                         return ret;
244                 asm("":::"memory");
245                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
246                                ret, "q", "", "ir", 8);
247                 return ret;
248         default:
249                 return copy_user_generic((__force void *)dst, src, size);
250         }
251 }
252
253 static __always_inline __must_check
254 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
255 {
256         int ret = 0;
257         if (!__builtin_constant_p(size))
258                 return copy_user_generic((__force void *)dst,
259                                          (__force void *)src, size);
260         switch (size) {
261         case 1: {
262                 u8 tmp;
263                 __get_user_asm(tmp, (u8 __user *)src,
264                                ret, "b", "b", "=q", 1);
265                 if (likely(!ret))
266                         __put_user_asm(tmp, (u8 __user *)dst,
267                                        ret, "b", "b", "iq", 1);
268                 return ret;
269         }
270         case 2: {
271                 u16 tmp;
272                 __get_user_asm(tmp, (u16 __user *)src,
273                                ret, "w", "w", "=r", 2);
274                 if (likely(!ret))
275                         __put_user_asm(tmp, (u16 __user *)dst,
276                                        ret, "w", "w", "ir", 2);
277                 return ret;
278         }
279
280         case 4: {
281                 u32 tmp;
282                 __get_user_asm(tmp, (u32 __user *)src,
283                                ret, "l", "k", "=r", 4);
284                 if (likely(!ret))
285                         __put_user_asm(tmp, (u32 __user *)dst,
286                                        ret, "l", "k", "ir", 4);
287                 return ret;
288         }
289         case 8: {
290                 u64 tmp;
291                 __get_user_asm(tmp, (u64 __user *)src,
292                                ret, "q", "", "=r", 8);
293                 if (likely(!ret))
294                         __put_user_asm(tmp, (u64 __user *)dst,
295                                        ret, "q", "", "ir", 8);
296                 return ret;
297         }
298         default:
299                 return copy_user_generic((__force void *)dst,
300                                          (__force void *)src, size);
301         }
302 }
303
304 __must_check long
305 strncpy_from_user(char *dst, const char __user *src, long count);
306 __must_check long
307 __strncpy_from_user(char *dst, const char __user *src, long count);
308 __must_check long strnlen_user(const char __user *str, long n);
309 __must_check long __strnlen_user(const char __user *str, long n);
310 __must_check long strlen_user(const char __user *str);
311 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
312 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
313
314 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
315                                             unsigned size);
316
317 static __must_check __always_inline int
318 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
319 {
320         return copy_user_generic((__force void *)dst, src, size);
321 }
322
323 #define ARCH_HAS_NOCACHE_UACCESS 1
324 extern long __copy_user_nocache(void *dst, const void __user *src,
325                                 unsigned size, int zerorest);
326
327 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
328                                            unsigned size)
329 {
330         might_sleep();
331         return __copy_user_nocache(dst, src, size, 1);
332 }
333
334 static inline int __copy_from_user_inatomic_nocache(void *dst,
335                                                     const void __user *src,
336                                                     unsigned size)
337 {
338         return __copy_user_nocache(dst, src, size, 0);
339 }
340
341 #endif /* __X86_64_UACCESS_H */