]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/include/asm/uaccess.h
x86, mm: Redesign get_user with a __builtin_choose_expr hack
[karo-tx-linux.git] / arch / x86 / include / asm / uaccess.h
1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
3 /*
4  * User space memory access functions
5  */
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
16
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24
25 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
26
27 #define KERNEL_DS       MAKE_MM_SEG(-1UL)
28 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
29
30 #define get_ds()        (KERNEL_DS)
31 #define get_fs()        (current_thread_info()->addr_limit)
32 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
33
34 #define segment_eq(a, b)        ((a).seg == (b).seg)
35
36 #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 #define __addr_ok(addr)         \
38         ((unsigned long __force)(addr) < user_addr_max())
39
40 /*
41  * Test whether a block of memory is a valid user space address.
42  * Returns 0 if the range is valid, nonzero otherwise.
43  *
44  * This is equivalent to the following test:
45  * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
46  *
47  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48  */
49
50 #define __range_not_ok(addr, size, limit)                               \
51 ({                                                                      \
52         unsigned long flag, roksum;                                     \
53         __chk_user_ptr(addr);                                           \
54         asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"             \
55             : "=&r" (flag), "=r" (roksum)                               \
56             : "1" (addr), "g" ((long)(size)),                           \
57               "rm" (limit));                                            \
58         flag;                                                           \
59 })
60
61 /**
62  * access_ok: - Checks if a user space pointer is valid
63  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
64  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65  *        to write to a block, it is always safe to read from it.
66  * @addr: User space pointer to start of block to check
67  * @size: Size of block to check
68  *
69  * Context: User context only.  This function may sleep.
70  *
71  * Checks if a pointer to a block of memory in user space is valid.
72  *
73  * Returns true (nonzero) if the memory block may be valid, false (zero)
74  * if it is definitely invalid.
75  *
76  * Note that, depending on architecture, this function probably just
77  * checks that the pointer is in the user space range - after calling
78  * this function, memory access functions may still return -EFAULT.
79  */
80 #define access_ok(type, addr, size) \
81         (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
82
83 /*
84  * The exception table consists of pairs of addresses relative to the
85  * exception table enty itself: the first is the address of an
86  * instruction that is allowed to fault, and the second is the address
87  * at which the program should continue.  No registers are modified,
88  * so it is entirely up to the continuation code to figure out what to
89  * do.
90  *
91  * All the routines below use bits of fixup code that are out of line
92  * with the main instruction path.  This means when everything is well,
93  * we don't even have to jump over them.  Further, they do not intrude
94  * on our cache or tlb entries.
95  */
96
97 struct exception_table_entry {
98         int insn, fixup;
99 };
100 /* This is not the generic standard exception_table_entry format */
101 #define ARCH_HAS_SORT_EXTABLE
102 #define ARCH_HAS_SEARCH_EXTABLE
103
104 extern int fixup_exception(struct pt_regs *regs);
105 extern int early_fixup_exception(unsigned long *ip);
106
107 /*
108  * These are the main single-value transfer routines.  They automatically
109  * use the right size if we just have the right pointer type.
110  *
111  * This gets kind of ugly. We want to return _two_ values in "get_user()"
112  * and yet we don't want to do any pointers, because that is too much
113  * of a performance impact. Thus we have a few rather ugly macros here,
114  * and hide all the ugliness from the user.
115  *
116  * The "__xxx" versions of the user access functions are versions that
117  * do not verify the address space, that must have been done previously
118  * with a separate "access_ok()" call (this is used when we do multiple
119  * accesses to the same area of user memory).
120  */
121
122 extern int __get_user_1(void);
123 extern int __get_user_2(void);
124 extern int __get_user_4(void);
125 extern int __get_user_8(void);
126 extern int __get_user_bad(void);
127
128 /*
129  * This is a type: either unsigned long, if the argument fits into
130  * that type, or otherwise unsigned long long.
131  */
132 #define __inttype(x) \
133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134
135 /**
136  * get_user: - Get a simple variable from user space.
137  * @x:   Variable to store result.
138  * @ptr: Source address, in user space.
139  *
140  * Context: User context only.  This function may sleep.
141  *
142  * This macro copies a single simple variable from user space to kernel
143  * space.  It supports simple types like char and int, but not larger
144  * data types like structures or arrays.
145  *
146  * @ptr must have pointer-to-simple-variable type, and the result of
147  * dereferencing @ptr must be assignable to @x without a cast.
148  *
149  * Returns zero on success, or -EFAULT on error.
150  * On error, the variable @x is set to zero.
151  *
152  * Careful: we have to cast the result to the type of the pointer
153  * for sign reasons.
154  */
155 #define get_user(x, ptr)                                                \
156 ({                                                                      \
157         int __ret_gu;                                                   \
158         register __inttype(*(ptr)) __val_gu asm("%edx");                \
159         __chk_user_ptr(ptr);                                            \
160         might_fault();                                                  \
161         asm volatile("call __get_user_%P3"                              \
162                      : "=a" (__ret_gu), "=r" (__val_gu)                 \
163                      : "0" (ptr), "i" (sizeof(*(ptr))));                \
164         (x) = (__typeof__(*(ptr))) __val_gu;                            \
165         __ret_gu;                                                       \
166 })
167
168 #define __put_user_x(size, x, ptr, __ret_pu)                    \
169         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
170                      : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
171
172
173
174 #ifdef CONFIG_X86_32
175 #define __put_user_asm_u64(x, addr, err, errret)                        \
176         asm volatile(ASM_STAC "\n"                                      \
177                      "1:        movl %%eax,0(%2)\n"                     \
178                      "2:        movl %%edx,4(%2)\n"                     \
179                      "3: " ASM_CLAC "\n"                                \
180                      ".section .fixup,\"ax\"\n"                         \
181                      "4:        movl %3,%0\n"                           \
182                      "  jmp 3b\n"                                       \
183                      ".previous\n"                                      \
184                      _ASM_EXTABLE(1b, 4b)                               \
185                      _ASM_EXTABLE(2b, 4b)                               \
186                      : "=r" (err)                                       \
187                      : "A" (x), "r" (addr), "i" (errret), "0" (err))
188
189 #define __put_user_asm_ex_u64(x, addr)                                  \
190         asm volatile(ASM_STAC "\n"                                      \
191                      "1:        movl %%eax,0(%1)\n"                     \
192                      "2:        movl %%edx,4(%1)\n"                     \
193                      "3: " ASM_CLAC "\n"                                \
194                      _ASM_EXTABLE_EX(1b, 2b)                            \
195                      _ASM_EXTABLE_EX(2b, 3b)                            \
196                      : : "A" (x), "r" (addr))
197
198 #define __put_user_x8(x, ptr, __ret_pu)                         \
199         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
200                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
201 #else
202 #define __put_user_asm_u64(x, ptr, retval, errret) \
203         __put_user_asm(x, ptr, retval, "q", "", "er", errret)
204 #define __put_user_asm_ex_u64(x, addr)  \
205         __put_user_asm_ex(x, addr, "q", "", "er")
206 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
207 #endif
208
209 extern void __put_user_bad(void);
210
211 /*
212  * Strange magic calling convention: pointer in %ecx,
213  * value in %eax(:%edx), return value in %eax. clobbers %rbx
214  */
215 extern void __put_user_1(void);
216 extern void __put_user_2(void);
217 extern void __put_user_4(void);
218 extern void __put_user_8(void);
219
220 /**
221  * put_user: - Write a simple value into user space.
222  * @x:   Value to copy to user space.
223  * @ptr: Destination address, in user space.
224  *
225  * Context: User context only.  This function may sleep.
226  *
227  * This macro copies a single simple value from kernel space to user
228  * space.  It supports simple types like char and int, but not larger
229  * data types like structures or arrays.
230  *
231  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
232  * to the result of dereferencing @ptr.
233  *
234  * Returns zero on success, or -EFAULT on error.
235  */
236 #define put_user(x, ptr)                                        \
237 ({                                                              \
238         int __ret_pu;                                           \
239         __typeof__(*(ptr)) __pu_val;                            \
240         __chk_user_ptr(ptr);                                    \
241         might_fault();                                          \
242         __pu_val = x;                                           \
243         switch (sizeof(*(ptr))) {                               \
244         case 1:                                                 \
245                 __put_user_x(1, __pu_val, ptr, __ret_pu);       \
246                 break;                                          \
247         case 2:                                                 \
248                 __put_user_x(2, __pu_val, ptr, __ret_pu);       \
249                 break;                                          \
250         case 4:                                                 \
251                 __put_user_x(4, __pu_val, ptr, __ret_pu);       \
252                 break;                                          \
253         case 8:                                                 \
254                 __put_user_x8(__pu_val, ptr, __ret_pu);         \
255                 break;                                          \
256         default:                                                \
257                 __put_user_x(X, __pu_val, ptr, __ret_pu);       \
258                 break;                                          \
259         }                                                       \
260         __ret_pu;                                               \
261 })
262
263 #define __put_user_size(x, ptr, size, retval, errret)                   \
264 do {                                                                    \
265         retval = 0;                                                     \
266         __chk_user_ptr(ptr);                                            \
267         switch (size) {                                                 \
268         case 1:                                                         \
269                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
270                 break;                                                  \
271         case 2:                                                         \
272                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
273                 break;                                                  \
274         case 4:                                                         \
275                 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
276                 break;                                                  \
277         case 8:                                                         \
278                 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
279                                    errret);                             \
280                 break;                                                  \
281         default:                                                        \
282                 __put_user_bad();                                       \
283         }                                                               \
284 } while (0)
285
286 #define __put_user_size_ex(x, ptr, size)                                \
287 do {                                                                    \
288         __chk_user_ptr(ptr);                                            \
289         switch (size) {                                                 \
290         case 1:                                                         \
291                 __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
292                 break;                                                  \
293         case 2:                                                         \
294                 __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
295                 break;                                                  \
296         case 4:                                                         \
297                 __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
298                 break;                                                  \
299         case 8:                                                         \
300                 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
301                 break;                                                  \
302         default:                                                        \
303                 __put_user_bad();                                       \
304         }                                                               \
305 } while (0)
306
307 #ifdef CONFIG_X86_32
308 #define __get_user_asm_u64(x, ptr, retval, errret)      (x) = __get_user_bad()
309 #define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
310 #else
311 #define __get_user_asm_u64(x, ptr, retval, errret) \
312          __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
313 #define __get_user_asm_ex_u64(x, ptr) \
314          __get_user_asm_ex(x, ptr, "q", "", "=r")
315 #endif
316
317 #define __get_user_size(x, ptr, size, retval, errret)                   \
318 do {                                                                    \
319         retval = 0;                                                     \
320         __chk_user_ptr(ptr);                                            \
321         switch (size) {                                                 \
322         case 1:                                                         \
323                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
324                 break;                                                  \
325         case 2:                                                         \
326                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
327                 break;                                                  \
328         case 4:                                                         \
329                 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
330                 break;                                                  \
331         case 8:                                                         \
332                 __get_user_asm_u64(x, ptr, retval, errret);             \
333                 break;                                                  \
334         default:                                                        \
335                 (x) = __get_user_bad();                                 \
336         }                                                               \
337 } while (0)
338
339 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
340         asm volatile(ASM_STAC "\n"                                      \
341                      "1:        mov"itype" %2,%"rtype"1\n"              \
342                      "2: " ASM_CLAC "\n"                                \
343                      ".section .fixup,\"ax\"\n"                         \
344                      "3:        mov %3,%0\n"                            \
345                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
346                      "  jmp 2b\n"                                       \
347                      ".previous\n"                                      \
348                      _ASM_EXTABLE(1b, 3b)                               \
349                      : "=r" (err), ltype(x)                             \
350                      : "m" (__m(addr)), "i" (errret), "0" (err))
351
352 #define __get_user_size_ex(x, ptr, size)                                \
353 do {                                                                    \
354         __chk_user_ptr(ptr);                                            \
355         switch (size) {                                                 \
356         case 1:                                                         \
357                 __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
358                 break;                                                  \
359         case 2:                                                         \
360                 __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
361                 break;                                                  \
362         case 4:                                                         \
363                 __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
364                 break;                                                  \
365         case 8:                                                         \
366                 __get_user_asm_ex_u64(x, ptr);                          \
367                 break;                                                  \
368         default:                                                        \
369                 (x) = __get_user_bad();                                 \
370         }                                                               \
371 } while (0)
372
373 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
374         asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
375                      "2:\n"                                             \
376                      _ASM_EXTABLE_EX(1b, 2b)                            \
377                      : ltype(x) : "m" (__m(addr)))
378
379 #define __put_user_nocheck(x, ptr, size)                        \
380 ({                                                              \
381         int __pu_err;                                           \
382         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
383         __pu_err;                                               \
384 })
385
386 #define __get_user_nocheck(x, ptr, size)                                \
387 ({                                                                      \
388         int __gu_err;                                                   \
389         unsigned long __gu_val;                                         \
390         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
391         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
392         __gu_err;                                                       \
393 })
394
395 /* FIXME: this hack is definitely wrong -AK */
396 struct __large_struct { unsigned long buf[100]; };
397 #define __m(x) (*(struct __large_struct __user *)(x))
398
399 /*
400  * Tell gcc we read from memory instead of writing: this is because
401  * we do not write to any memory gcc knows about, so there are no
402  * aliasing issues.
403  */
404 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
405         asm volatile(ASM_STAC "\n"                                      \
406                      "1:        mov"itype" %"rtype"1,%2\n"              \
407                      "2: " ASM_CLAC "\n"                                \
408                      ".section .fixup,\"ax\"\n"                         \
409                      "3:        mov %3,%0\n"                            \
410                      "  jmp 2b\n"                                       \
411                      ".previous\n"                                      \
412                      _ASM_EXTABLE(1b, 3b)                               \
413                      : "=r"(err)                                        \
414                      : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
415
416 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
417         asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
418                      "2:\n"                                             \
419                      _ASM_EXTABLE_EX(1b, 2b)                            \
420                      : : ltype(x), "m" (__m(addr)))
421
422 /*
423  * uaccess_try and catch
424  */
425 #define uaccess_try     do {                                            \
426         current_thread_info()->uaccess_err = 0;                         \
427         stac();                                                         \
428         barrier();
429
430 #define uaccess_catch(err)                                              \
431         clac();                                                         \
432         (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
433 } while (0)
434
435 /**
436  * __get_user: - Get a simple variable from user space, with less checking.
437  * @x:   Variable to store result.
438  * @ptr: Source address, in user space.
439  *
440  * Context: User context only.  This function may sleep.
441  *
442  * This macro copies a single simple variable from user space to kernel
443  * space.  It supports simple types like char and int, but not larger
444  * data types like structures or arrays.
445  *
446  * @ptr must have pointer-to-simple-variable type, and the result of
447  * dereferencing @ptr must be assignable to @x without a cast.
448  *
449  * Caller must check the pointer with access_ok() before calling this
450  * function.
451  *
452  * Returns zero on success, or -EFAULT on error.
453  * On error, the variable @x is set to zero.
454  */
455
456 #define __get_user(x, ptr)                                              \
457         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
458
459 /**
460  * __put_user: - Write a simple value into user space, with less checking.
461  * @x:   Value to copy to user space.
462  * @ptr: Destination address, in user space.
463  *
464  * Context: User context only.  This function may sleep.
465  *
466  * This macro copies a single simple value from kernel space to user
467  * space.  It supports simple types like char and int, but not larger
468  * data types like structures or arrays.
469  *
470  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
471  * to the result of dereferencing @ptr.
472  *
473  * Caller must check the pointer with access_ok() before calling this
474  * function.
475  *
476  * Returns zero on success, or -EFAULT on error.
477  */
478
479 #define __put_user(x, ptr)                                              \
480         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
481
482 #define __get_user_unaligned __get_user
483 #define __put_user_unaligned __put_user
484
485 /*
486  * {get|put}_user_try and catch
487  *
488  * get_user_try {
489  *      get_user_ex(...);
490  * } get_user_catch(err)
491  */
492 #define get_user_try            uaccess_try
493 #define get_user_catch(err)     uaccess_catch(err)
494
495 #define get_user_ex(x, ptr)     do {                                    \
496         unsigned long __gue_val;                                        \
497         __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
498         (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
499 } while (0)
500
501 #define put_user_try            uaccess_try
502 #define put_user_catch(err)     uaccess_catch(err)
503
504 #define put_user_ex(x, ptr)                                             \
505         __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
506
507 extern unsigned long
508 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
509 extern __must_check long
510 strncpy_from_user(char *dst, const char __user *src, long count);
511
512 extern __must_check long strlen_user(const char __user *str);
513 extern __must_check long strnlen_user(const char __user *str, long n);
514
515 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
516 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
517
518 /*
519  * movsl can be slow when source and dest are not both 8-byte aligned
520  */
521 #ifdef CONFIG_X86_INTEL_USERCOPY
522 extern struct movsl_mask {
523         int mask;
524 } ____cacheline_aligned_in_smp movsl_mask;
525 #endif
526
527 #define ARCH_HAS_NOCACHE_UACCESS 1
528
529 #ifdef CONFIG_X86_32
530 # include <asm/uaccess_32.h>
531 #else
532 # include <asm/uaccess_64.h>
533 #endif
534
535 #endif /* _ASM_X86_UACCESS_H */
536