]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/mips/include/asm/uaccess.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  */
10 #ifndef _ASM_UACCESS_H
11 #define _ASM_UACCESS_H
12
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/thread_info.h>
16
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24 #ifdef CONFIG_32BIT
25
26 #ifdef CONFIG_KVM_GUEST
27 #define __UA_LIMIT 0x40000000UL
28 #else
29 #define __UA_LIMIT 0x80000000UL
30 #endif
31
32 #define __UA_ADDR       ".word"
33 #define __UA_LA         "la"
34 #define __UA_ADDU       "addu"
35 #define __UA_t0         "$8"
36 #define __UA_t1         "$9"
37
38 #endif /* CONFIG_32BIT */
39
40 #ifdef CONFIG_64BIT
41
42 extern u64 __ua_limit;
43
44 #define __UA_LIMIT      __ua_limit
45
46 #define __UA_ADDR       ".dword"
47 #define __UA_LA         "dla"
48 #define __UA_ADDU       "daddu"
49 #define __UA_t0         "$12"
50 #define __UA_t1         "$13"
51
52 #endif /* CONFIG_64BIT */
53
54 /*
55  * USER_DS is a bitmask that has the bits set that may not be set in a valid
56  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
57  * the arithmetic we're doing only works if the limit is a power of two, so
58  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
59  * address in this range it's the process's problem, not ours :-)
60  */
61
62 #ifdef CONFIG_KVM_GUEST
63 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
64 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
65 #else
66 #define KERNEL_DS       ((mm_segment_t) { 0UL })
67 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
68 #endif
69
70 #define VERIFY_READ    0
71 #define VERIFY_WRITE   1
72
73 #define get_ds()        (KERNEL_DS)
74 #define get_fs()        (current_thread_info()->addr_limit)
75 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
76
77 #define segment_eq(a, b)        ((a).seg == (b).seg)
78
79
80 /*
81  * Is a address valid? This does a straighforward calculation rather
82  * than tests.
83  *
84  * Address valid if:
85  *  - "addr" doesn't have any high-bits set
86  *  - AND "size" doesn't have any high-bits set
87  *  - AND "addr+size" doesn't have any high-bits set
88  *  - OR we are in kernel mode.
89  *
90  * __ua_size() is a trick to avoid runtime checking of positive constant
91  * sizes; for those we already know at compile time that the size is ok.
92  */
93 #define __ua_size(size)                                                 \
94         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
95
96 /*
97  * access_ok: - Checks if a user space pointer is valid
98  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
99  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
100  *        to write to a block, it is always safe to read from it.
101  * @addr: User space pointer to start of block to check
102  * @size: Size of block to check
103  *
104  * Context: User context only.  This function may sleep.
105  *
106  * Checks if a pointer to a block of memory in user space is valid.
107  *
108  * Returns true (nonzero) if the memory block may be valid, false (zero)
109  * if it is definitely invalid.
110  *
111  * Note that, depending on architecture, this function probably just
112  * checks that the pointer is in the user space range - after calling
113  * this function, memory access functions may still return -EFAULT.
114  */
115
116 #define __access_mask get_fs().seg
117
118 #define __access_ok(addr, size, mask)                                   \
119 ({                                                                      \
120         unsigned long __addr = (unsigned long) (addr);                  \
121         unsigned long __size = size;                                    \
122         unsigned long __mask = mask;                                    \
123         unsigned long __ok;                                             \
124                                                                         \
125         __chk_user_ptr(addr);                                           \
126         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
127                 __ua_size(__size)));                                    \
128         __ok == 0;                                                      \
129 })
130
131 #define access_ok(type, addr, size)                                     \
132         likely(__access_ok((addr), (size), __access_mask))
133
134 /*
135  * put_user: - Write a simple value into user space.
136  * @x:   Value to copy to user space.
137  * @ptr: Destination address, in user space.
138  *
139  * Context: User context only.  This function may sleep.
140  *
141  * This macro copies a single simple value from kernel space to user
142  * space.  It supports simple types like char and int, but not larger
143  * data types like structures or arrays.
144  *
145  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
146  * to the result of dereferencing @ptr.
147  *
148  * Returns zero on success, or -EFAULT on error.
149  */
150 #define put_user(x,ptr) \
151         __put_user_check((x), (ptr), sizeof(*(ptr)))
152
153 /*
154  * get_user: - Get a simple variable from user space.
155  * @x:   Variable to store result.
156  * @ptr: Source address, in user space.
157  *
158  * Context: User context only.  This function may sleep.
159  *
160  * This macro copies a single simple variable from user space to kernel
161  * space.  It supports simple types like char and int, but not larger
162  * data types like structures or arrays.
163  *
164  * @ptr must have pointer-to-simple-variable type, and the result of
165  * dereferencing @ptr must be assignable to @x without a cast.
166  *
167  * Returns zero on success, or -EFAULT on error.
168  * On error, the variable @x is set to zero.
169  */
170 #define get_user(x,ptr) \
171         __get_user_check((x), (ptr), sizeof(*(ptr)))
172
173 /*
174  * __put_user: - Write a simple value into user space, with less checking.
175  * @x:   Value to copy to user space.
176  * @ptr: Destination address, in user space.
177  *
178  * Context: User context only.  This function may sleep.
179  *
180  * This macro copies a single simple value from kernel space to user
181  * space.  It supports simple types like char and int, but not larger
182  * data types like structures or arrays.
183  *
184  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
185  * to the result of dereferencing @ptr.
186  *
187  * Caller must check the pointer with access_ok() before calling this
188  * function.
189  *
190  * Returns zero on success, or -EFAULT on error.
191  */
192 #define __put_user(x,ptr) \
193         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
194
195 /*
196  * __get_user: - Get a simple variable from user space, with less checking.
197  * @x:   Variable to store result.
198  * @ptr: Source address, in user space.
199  *
200  * Context: User context only.  This function may sleep.
201  *
202  * This macro copies a single simple variable from user space to kernel
203  * space.  It supports simple types like char and int, but not larger
204  * data types like structures or arrays.
205  *
206  * @ptr must have pointer-to-simple-variable type, and the result of
207  * dereferencing @ptr must be assignable to @x without a cast.
208  *
209  * Caller must check the pointer with access_ok() before calling this
210  * function.
211  *
212  * Returns zero on success, or -EFAULT on error.
213  * On error, the variable @x is set to zero.
214  */
215 #define __get_user(x,ptr) \
216         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
217
218 struct __large_struct { unsigned long buf[100]; };
219 #define __m(x) (*(struct __large_struct __user *)(x))
220
221 /*
222  * Yuck.  We need two variants, one for 64bit operation and one
223  * for 32 bit mode and old iron.
224  */
225 #ifdef CONFIG_32BIT
226 #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
227 #endif
228 #ifdef CONFIG_64BIT
229 #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
230 #endif
231
232 extern void __get_user_unknown(void);
233
234 #define __get_user_common(val, size, ptr)                               \
235 do {                                                                    \
236         switch (size) {                                                 \
237         case 1: __get_user_asm(val, "lb", ptr); break;                  \
238         case 2: __get_user_asm(val, "lh", ptr); break;                  \
239         case 4: __get_user_asm(val, "lw", ptr); break;                  \
240         case 8: __GET_USER_DW(val, ptr); break;                         \
241         default: __get_user_unknown(); break;                           \
242         }                                                               \
243 } while (0)
244
245 #define __get_user_nocheck(x, ptr, size)                                \
246 ({                                                                      \
247         int __gu_err;                                                   \
248                                                                         \
249         __chk_user_ptr(ptr);                                            \
250         __get_user_common((x), size, ptr);                              \
251         __gu_err;                                                       \
252 })
253
254 #define __get_user_check(x, ptr, size)                                  \
255 ({                                                                      \
256         int __gu_err = -EFAULT;                                         \
257         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
258                                                                         \
259         might_fault();                                                  \
260         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
261                 __get_user_common((x), size, __gu_ptr);                 \
262                                                                         \
263         __gu_err;                                                       \
264 })
265
266 #define __get_user_asm(val, insn, addr)                                 \
267 {                                                                       \
268         long __gu_tmp;                                                  \
269                                                                         \
270         __asm__ __volatile__(                                           \
271         "1:     " insn "        %1, %3                          \n"     \
272         "2:                                                     \n"     \
273         "       .insn                                           \n"     \
274         "       .section .fixup,\"ax\"                          \n"     \
275         "3:     li      %0, %4                                  \n"     \
276         "       j       2b                                      \n"     \
277         "       .previous                                       \n"     \
278         "       .section __ex_table,\"a\"                       \n"     \
279         "       "__UA_ADDR "\t1b, 3b                            \n"     \
280         "       .previous                                       \n"     \
281         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
282         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
283                                                                         \
284         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
285 }
286
287 /*
288  * Get a long long 64 using 32 bit registers.
289  */
290 #define __get_user_asm_ll32(val, addr)                                  \
291 {                                                                       \
292         union {                                                         \
293                 unsigned long long      l;                              \
294                 __typeof__(*(addr))     t;                              \
295         } __gu_tmp;                                                     \
296                                                                         \
297         __asm__ __volatile__(                                           \
298         "1:     lw      %1, (%3)                                \n"     \
299         "2:     lw      %D1, 4(%3)                              \n"     \
300         "3:                                                     \n"     \
301         "       .insn                                           \n"     \
302         "       .section        .fixup,\"ax\"                   \n"     \
303         "4:     li      %0, %4                                  \n"     \
304         "       move    %1, $0                                  \n"     \
305         "       move    %D1, $0                                 \n"     \
306         "       j       3b                                      \n"     \
307         "       .previous                                       \n"     \
308         "       .section        __ex_table,\"a\"                \n"     \
309         "       " __UA_ADDR "   1b, 4b                          \n"     \
310         "       " __UA_ADDR "   2b, 4b                          \n"     \
311         "       .previous                                       \n"     \
312         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
313         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
314                                                                         \
315         (val) = __gu_tmp.t;                                             \
316 }
317
318 /*
319  * Yuck.  We need two variants, one for 64bit operation and one
320  * for 32 bit mode and old iron.
321  */
322 #ifdef CONFIG_32BIT
323 #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
324 #endif
325 #ifdef CONFIG_64BIT
326 #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
327 #endif
328
329 #define __put_user_nocheck(x, ptr, size)                                \
330 ({                                                                      \
331         __typeof__(*(ptr)) __pu_val;                                    \
332         int __pu_err = 0;                                               \
333                                                                         \
334         __chk_user_ptr(ptr);                                            \
335         __pu_val = (x);                                                 \
336         switch (size) {                                                 \
337         case 1: __put_user_asm("sb", ptr); break;                       \
338         case 2: __put_user_asm("sh", ptr); break;                       \
339         case 4: __put_user_asm("sw", ptr); break;                       \
340         case 8: __PUT_USER_DW(ptr); break;                              \
341         default: __put_user_unknown(); break;                           \
342         }                                                               \
343         __pu_err;                                                       \
344 })
345
346 #define __put_user_check(x, ptr, size)                                  \
347 ({                                                                      \
348         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
349         __typeof__(*(ptr)) __pu_val = (x);                              \
350         int __pu_err = -EFAULT;                                         \
351                                                                         \
352         might_fault();                                                  \
353         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
354                 switch (size) {                                         \
355                 case 1: __put_user_asm("sb", __pu_addr); break;         \
356                 case 2: __put_user_asm("sh", __pu_addr); break;         \
357                 case 4: __put_user_asm("sw", __pu_addr); break;         \
358                 case 8: __PUT_USER_DW(__pu_addr); break;                \
359                 default: __put_user_unknown(); break;                   \
360                 }                                                       \
361         }                                                               \
362         __pu_err;                                                       \
363 })
364
365 #define __put_user_asm(insn, ptr)                                       \
366 {                                                                       \
367         __asm__ __volatile__(                                           \
368         "1:     " insn "        %z2, %3         # __put_user_asm\n"     \
369         "2:                                                     \n"     \
370         "       .insn                                           \n"     \
371         "       .section        .fixup,\"ax\"                   \n"     \
372         "3:     li      %0, %4                                  \n"     \
373         "       j       2b                                      \n"     \
374         "       .previous                                       \n"     \
375         "       .section        __ex_table,\"a\"                \n"     \
376         "       " __UA_ADDR "   1b, 3b                          \n"     \
377         "       .previous                                       \n"     \
378         : "=r" (__pu_err)                                               \
379         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
380           "i" (-EFAULT));                                               \
381 }
382
383 #define __put_user_asm_ll32(ptr)                                        \
384 {                                                                       \
385         __asm__ __volatile__(                                           \
386         "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     \
387         "2:     sw      %D2, 4(%3)                              \n"     \
388         "3:                                                     \n"     \
389         "       .insn                                           \n"     \
390         "       .section        .fixup,\"ax\"                   \n"     \
391         "4:     li      %0, %4                                  \n"     \
392         "       j       3b                                      \n"     \
393         "       .previous                                       \n"     \
394         "       .section        __ex_table,\"a\"                \n"     \
395         "       " __UA_ADDR "   1b, 4b                          \n"     \
396         "       " __UA_ADDR "   2b, 4b                          \n"     \
397         "       .previous"                                              \
398         : "=r" (__pu_err)                                               \
399         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
400           "i" (-EFAULT));                                               \
401 }
402
403 extern void __put_user_unknown(void);
404
405 /*
406  * put_user_unaligned: - Write a simple value into user space.
407  * @x:   Value to copy to user space.
408  * @ptr: Destination address, in user space.
409  *
410  * Context: User context only.  This function may sleep.
411  *
412  * This macro copies a single simple value from kernel space to user
413  * space.  It supports simple types like char and int, but not larger
414  * data types like structures or arrays.
415  *
416  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
417  * to the result of dereferencing @ptr.
418  *
419  * Returns zero on success, or -EFAULT on error.
420  */
421 #define put_user_unaligned(x,ptr)       \
422         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
423
424 /*
425  * get_user_unaligned: - Get a simple variable from user space.
426  * @x:   Variable to store result.
427  * @ptr: Source address, in user space.
428  *
429  * Context: User context only.  This function may sleep.
430  *
431  * This macro copies a single simple variable from user space to kernel
432  * space.  It supports simple types like char and int, but not larger
433  * data types like structures or arrays.
434  *
435  * @ptr must have pointer-to-simple-variable type, and the result of
436  * dereferencing @ptr must be assignable to @x without a cast.
437  *
438  * Returns zero on success, or -EFAULT on error.
439  * On error, the variable @x is set to zero.
440  */
441 #define get_user_unaligned(x,ptr) \
442         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
443
444 /*
445  * __put_user_unaligned: - Write a simple value into user space, with less checking.
446  * @x:   Value to copy to user space.
447  * @ptr: Destination address, in user space.
448  *
449  * Context: User context only.  This function may sleep.
450  *
451  * This macro copies a single simple value from kernel space to user
452  * space.  It supports simple types like char and int, but not larger
453  * data types like structures or arrays.
454  *
455  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
456  * to the result of dereferencing @ptr.
457  *
458  * Caller must check the pointer with access_ok() before calling this
459  * function.
460  *
461  * Returns zero on success, or -EFAULT on error.
462  */
463 #define __put_user_unaligned(x,ptr) \
464         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
465
466 /*
467  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
468  * @x:   Variable to store result.
469  * @ptr: Source address, in user space.
470  *
471  * Context: User context only.  This function may sleep.
472  *
473  * This macro copies a single simple variable from user space to kernel
474  * space.  It supports simple types like char and int, but not larger
475  * data types like structures or arrays.
476  *
477  * @ptr must have pointer-to-simple-variable type, and the result of
478  * dereferencing @ptr must be assignable to @x without a cast.
479  *
480  * Caller must check the pointer with access_ok() before calling this
481  * function.
482  *
483  * Returns zero on success, or -EFAULT on error.
484  * On error, the variable @x is set to zero.
485  */
486 #define __get_user_unaligned(x,ptr) \
487         __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
488
489 /*
490  * Yuck.  We need two variants, one for 64bit operation and one
491  * for 32 bit mode and old iron.
492  */
493 #ifdef CONFIG_32BIT
494 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
495         __get_user_unaligned_asm_ll32(val, ptr)
496 #endif
497 #ifdef CONFIG_64BIT
498 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
499         __get_user_unaligned_asm(val, "uld", ptr)
500 #endif
501
502 extern void __get_user_unaligned_unknown(void);
503
504 #define __get_user_unaligned_common(val, size, ptr)                     \
505 do {                                                                    \
506         switch (size) {                                                 \
507         case 1: __get_user_asm(val, "lb", ptr); break;                  \
508         case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       \
509         case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       \
510         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
511         default: __get_user_unaligned_unknown(); break;                 \
512         }                                                               \
513 } while (0)
514
515 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
516 ({                                                                      \
517         int __gu_err;                                                   \
518                                                                         \
519         __get_user_unaligned_common((x), size, ptr);                    \
520         __gu_err;                                                       \
521 })
522
523 #define __get_user_unaligned_check(x,ptr,size)                          \
524 ({                                                                      \
525         int __gu_err = -EFAULT;                                         \
526         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
527                                                                         \
528         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
529                 __get_user_unaligned_common((x), size, __gu_ptr);       \
530                                                                         \
531         __gu_err;                                                       \
532 })
533
534 #define __get_user_unaligned_asm(val, insn, addr)                       \
535 {                                                                       \
536         long __gu_tmp;                                                  \
537                                                                         \
538         __asm__ __volatile__(                                           \
539         "1:     " insn "        %1, %3                          \n"     \
540         "2:                                                     \n"     \
541         "       .insn                                           \n"     \
542         "       .section .fixup,\"ax\"                          \n"     \
543         "3:     li      %0, %4                                  \n"     \
544         "       j       2b                                      \n"     \
545         "       .previous                                       \n"     \
546         "       .section __ex_table,\"a\"                       \n"     \
547         "       "__UA_ADDR "\t1b, 3b                            \n"     \
548         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
549         "       .previous                                       \n"     \
550         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
551         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
552                                                                         \
553         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
554 }
555
556 /*
557  * Get a long long 64 using 32 bit registers.
558  */
559 #define __get_user_unaligned_asm_ll32(val, addr)                        \
560 {                                                                       \
561         unsigned long long __gu_tmp;                                    \
562                                                                         \
563         __asm__ __volatile__(                                           \
564         "1:     ulw     %1, (%3)                                \n"     \
565         "2:     ulw     %D1, 4(%3)                              \n"     \
566         "       move    %0, $0                                  \n"     \
567         "3:                                                     \n"     \
568         "       .insn                                           \n"     \
569         "       .section        .fixup,\"ax\"                   \n"     \
570         "4:     li      %0, %4                                  \n"     \
571         "       move    %1, $0                                  \n"     \
572         "       move    %D1, $0                                 \n"     \
573         "       j       3b                                      \n"     \
574         "       .previous                                       \n"     \
575         "       .section        __ex_table,\"a\"                \n"     \
576         "       " __UA_ADDR "   1b, 4b                          \n"     \
577         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
578         "       " __UA_ADDR "   2b, 4b                          \n"     \
579         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
580         "       .previous                                       \n"     \
581         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
582         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
583         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
584 }
585
586 /*
587  * Yuck.  We need two variants, one for 64bit operation and one
588  * for 32 bit mode and old iron.
589  */
590 #ifdef CONFIG_32BIT
591 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
592 #endif
593 #ifdef CONFIG_64BIT
594 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
595 #endif
596
597 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
598 ({                                                                      \
599         __typeof__(*(ptr)) __pu_val;                                    \
600         int __pu_err = 0;                                               \
601                                                                         \
602         __pu_val = (x);                                                 \
603         switch (size) {                                                 \
604         case 1: __put_user_asm("sb", ptr); break;                       \
605         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
606         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
607         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
608         default: __put_user_unaligned_unknown(); break;                 \
609         }                                                               \
610         __pu_err;                                                       \
611 })
612
613 #define __put_user_unaligned_check(x,ptr,size)                          \
614 ({                                                                      \
615         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
616         __typeof__(*(ptr)) __pu_val = (x);                              \
617         int __pu_err = -EFAULT;                                         \
618                                                                         \
619         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
620                 switch (size) {                                         \
621                 case 1: __put_user_asm("sb", __pu_addr); break;         \
622                 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
623                 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
624                 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break;      \
625                 default: __put_user_unaligned_unknown(); break;         \
626                 }                                                       \
627         }                                                               \
628         __pu_err;                                                       \
629 })
630
631 #define __put_user_unaligned_asm(insn, ptr)                             \
632 {                                                                       \
633         __asm__ __volatile__(                                           \
634         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
635         "2:                                                     \n"     \
636         "       .insn                                           \n"     \
637         "       .section        .fixup,\"ax\"                   \n"     \
638         "3:     li      %0, %4                                  \n"     \
639         "       j       2b                                      \n"     \
640         "       .previous                                       \n"     \
641         "       .section        __ex_table,\"a\"                \n"     \
642         "       " __UA_ADDR "   1b, 3b                          \n"     \
643         "       .previous                                       \n"     \
644         : "=r" (__pu_err)                                               \
645         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
646           "i" (-EFAULT));                                               \
647 }
648
649 #define __put_user_unaligned_asm_ll32(ptr)                              \
650 {                                                                       \
651         __asm__ __volatile__(                                           \
652         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
653         "2:     sw      %D2, 4(%3)                              \n"     \
654         "3:                                                     \n"     \
655         "       .insn                                           \n"     \
656         "       .section        .fixup,\"ax\"                   \n"     \
657         "4:     li      %0, %4                                  \n"     \
658         "       j       3b                                      \n"     \
659         "       .previous                                       \n"     \
660         "       .section        __ex_table,\"a\"                \n"     \
661         "       " __UA_ADDR "   1b, 4b                          \n"     \
662         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
663         "       " __UA_ADDR "   2b, 4b                          \n"     \
664         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
665         "       .previous"                                              \
666         : "=r" (__pu_err)                                               \
667         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
668           "i" (-EFAULT));                                               \
669 }
670
671 extern void __put_user_unaligned_unknown(void);
672
673 /*
674  * We're generating jump to subroutines which will be outside the range of
675  * jump instructions
676  */
677 #ifdef MODULE
678 #define __MODULE_JAL(destination)                                       \
679         ".set\tnoat\n\t"                                                \
680         __UA_LA "\t$1, " #destination "\n\t"                            \
681         "jalr\t$1\n\t"                                                  \
682         ".set\tat\n\t"
683 #else
684 #define __MODULE_JAL(destination)                                       \
685         "jal\t" #destination "\n\t"
686 #endif
687
688 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
689 #define DADDI_SCRATCH "$0"
690 #else
691 #define DADDI_SCRATCH "$3"
692 #endif
693
694 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
695
696 #define __invoke_copy_to_user(to, from, n)                              \
697 ({                                                                      \
698         register void __user *__cu_to_r __asm__("$4");                  \
699         register const void *__cu_from_r __asm__("$5");                 \
700         register long __cu_len_r __asm__("$6");                         \
701                                                                         \
702         __cu_to_r = (to);                                               \
703         __cu_from_r = (from);                                           \
704         __cu_len_r = (n);                                               \
705         __asm__ __volatile__(                                           \
706         __MODULE_JAL(__copy_user)                                       \
707         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
708         :                                                               \
709         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
710           DADDI_SCRATCH, "memory");                                     \
711         __cu_len_r;                                                     \
712 })
713
714 /*
715  * __copy_to_user: - Copy a block of data into user space, with less checking.
716  * @to:   Destination address, in user space.
717  * @from: Source address, in kernel space.
718  * @n:    Number of bytes to copy.
719  *
720  * Context: User context only.  This function may sleep.
721  *
722  * Copy data from kernel space to user space.  Caller must check
723  * the specified block with access_ok() before calling this function.
724  *
725  * Returns number of bytes that could not be copied.
726  * On success, this will be zero.
727  */
728 #define __copy_to_user(to, from, n)                                     \
729 ({                                                                      \
730         void __user *__cu_to;                                           \
731         const void *__cu_from;                                          \
732         long __cu_len;                                                  \
733                                                                         \
734         __cu_to = (to);                                                 \
735         __cu_from = (from);                                             \
736         __cu_len = (n);                                                 \
737         might_fault();                                                  \
738         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
739         __cu_len;                                                       \
740 })
741
742 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
743
744 #define __copy_to_user_inatomic(to, from, n)                            \
745 ({                                                                      \
746         void __user *__cu_to;                                           \
747         const void *__cu_from;                                          \
748         long __cu_len;                                                  \
749                                                                         \
750         __cu_to = (to);                                                 \
751         __cu_from = (from);                                             \
752         __cu_len = (n);                                                 \
753         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
754         __cu_len;                                                       \
755 })
756
757 #define __copy_from_user_inatomic(to, from, n)                          \
758 ({                                                                      \
759         void *__cu_to;                                                  \
760         const void __user *__cu_from;                                   \
761         long __cu_len;                                                  \
762                                                                         \
763         __cu_to = (to);                                                 \
764         __cu_from = (from);                                             \
765         __cu_len = (n);                                                 \
766         __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
767                                                     __cu_len);          \
768         __cu_len;                                                       \
769 })
770
771 /*
772  * copy_to_user: - Copy a block of data into user space.
773  * @to:   Destination address, in user space.
774  * @from: Source address, in kernel space.
775  * @n:    Number of bytes to copy.
776  *
777  * Context: User context only.  This function may sleep.
778  *
779  * Copy data from kernel space to user space.
780  *
781  * Returns number of bytes that could not be copied.
782  * On success, this will be zero.
783  */
784 #define copy_to_user(to, from, n)                                       \
785 ({                                                                      \
786         void __user *__cu_to;                                           \
787         const void *__cu_from;                                          \
788         long __cu_len;                                                  \
789                                                                         \
790         __cu_to = (to);                                                 \
791         __cu_from = (from);                                             \
792         __cu_len = (n);                                                 \
793         if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {               \
794                 might_fault();                                          \
795                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
796                                                  __cu_len);             \
797         }                                                               \
798         __cu_len;                                                       \
799 })
800
801 #define __invoke_copy_from_user(to, from, n)                            \
802 ({                                                                      \
803         register void *__cu_to_r __asm__("$4");                         \
804         register const void __user *__cu_from_r __asm__("$5");          \
805         register long __cu_len_r __asm__("$6");                         \
806                                                                         \
807         __cu_to_r = (to);                                               \
808         __cu_from_r = (from);                                           \
809         __cu_len_r = (n);                                               \
810         __asm__ __volatile__(                                           \
811         ".set\tnoreorder\n\t"                                           \
812         __MODULE_JAL(__copy_user)                                       \
813         ".set\tnoat\n\t"                                                \
814         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
815         ".set\tat\n\t"                                                  \
816         ".set\treorder"                                                 \
817         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
818         :                                                               \
819         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
820           DADDI_SCRATCH, "memory");                                     \
821         __cu_len_r;                                                     \
822 })
823
824 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
825 ({                                                                      \
826         register void *__cu_to_r __asm__("$4");                         \
827         register const void __user *__cu_from_r __asm__("$5");          \
828         register long __cu_len_r __asm__("$6");                         \
829                                                                         \
830         __cu_to_r = (to);                                               \
831         __cu_from_r = (from);                                           \
832         __cu_len_r = (n);                                               \
833         __asm__ __volatile__(                                           \
834         ".set\tnoreorder\n\t"                                           \
835         __MODULE_JAL(__copy_user_inatomic)                              \
836         ".set\tnoat\n\t"                                                \
837         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
838         ".set\tat\n\t"                                                  \
839         ".set\treorder"                                                 \
840         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
841         :                                                               \
842         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
843           DADDI_SCRATCH, "memory");                                     \
844         __cu_len_r;                                                     \
845 })
846
847 /*
848  * __copy_from_user: - Copy a block of data from user space, with less checking.
849  * @to:   Destination address, in kernel space.
850  * @from: Source address, in user space.
851  * @n:    Number of bytes to copy.
852  *
853  * Context: User context only.  This function may sleep.
854  *
855  * Copy data from user space to kernel space.  Caller must check
856  * the specified block with access_ok() before calling this function.
857  *
858  * Returns number of bytes that could not be copied.
859  * On success, this will be zero.
860  *
861  * If some data could not be copied, this function will pad the copied
862  * data to the requested size using zero bytes.
863  */
864 #define __copy_from_user(to, from, n)                                   \
865 ({                                                                      \
866         void *__cu_to;                                                  \
867         const void __user *__cu_from;                                   \
868         long __cu_len;                                                  \
869                                                                         \
870         __cu_to = (to);                                                 \
871         __cu_from = (from);                                             \
872         __cu_len = (n);                                                 \
873         might_fault();                                                  \
874         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
875                                            __cu_len);                   \
876         __cu_len;                                                       \
877 })
878
879 /*
880  * copy_from_user: - Copy a block of data from user space.
881  * @to:   Destination address, in kernel space.
882  * @from: Source address, in user space.
883  * @n:    Number of bytes to copy.
884  *
885  * Context: User context only.  This function may sleep.
886  *
887  * Copy data from user space to kernel space.
888  *
889  * Returns number of bytes that could not be copied.
890  * On success, this will be zero.
891  *
892  * If some data could not be copied, this function will pad the copied
893  * data to the requested size using zero bytes.
894  */
895 #define copy_from_user(to, from, n)                                     \
896 ({                                                                      \
897         void *__cu_to;                                                  \
898         const void __user *__cu_from;                                   \
899         long __cu_len;                                                  \
900                                                                         \
901         __cu_to = (to);                                                 \
902         __cu_from = (from);                                             \
903         __cu_len = (n);                                                 \
904         if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {              \
905                 might_fault();                                          \
906                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
907                                                    __cu_len);           \
908         }                                                               \
909         __cu_len;                                                       \
910 })
911
912 #define __copy_in_user(to, from, n)                                     \
913 ({                                                                      \
914         void __user *__cu_to;                                           \
915         const void __user *__cu_from;                                   \
916         long __cu_len;                                                  \
917                                                                         \
918         __cu_to = (to);                                                 \
919         __cu_from = (from);                                             \
920         __cu_len = (n);                                                 \
921         might_fault();                                                  \
922         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
923                                            __cu_len);                   \
924         __cu_len;                                                       \
925 })
926
927 #define copy_in_user(to, from, n)                                       \
928 ({                                                                      \
929         void __user *__cu_to;                                           \
930         const void __user *__cu_from;                                   \
931         long __cu_len;                                                  \
932                                                                         \
933         __cu_to = (to);                                                 \
934         __cu_from = (from);                                             \
935         __cu_len = (n);                                                 \
936         if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&       \
937                    access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       \
938                 might_fault();                                          \
939                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
940                                                    __cu_len);           \
941         }                                                               \
942         __cu_len;                                                       \
943 })
944
945 /*
946  * __clear_user: - Zero a block of memory in user space, with less checking.
947  * @to:   Destination address, in user space.
948  * @n:    Number of bytes to zero.
949  *
950  * Zero a block of memory in user space.  Caller must check
951  * the specified block with access_ok() before calling this function.
952  *
953  * Returns number of bytes that could not be cleared.
954  * On success, this will be zero.
955  */
956 static inline __kernel_size_t
957 __clear_user(void __user *addr, __kernel_size_t size)
958 {
959         __kernel_size_t res;
960
961         might_fault();
962         __asm__ __volatile__(
963                 "move\t$4, %1\n\t"
964                 "move\t$5, $0\n\t"
965                 "move\t$6, %2\n\t"
966                 __MODULE_JAL(__bzero)
967                 "move\t%0, $6"
968                 : "=r" (res)
969                 : "r" (addr), "r" (size)
970                 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
971
972         return res;
973 }
974
975 #define clear_user(addr,n)                                              \
976 ({                                                                      \
977         void __user * __cl_addr = (addr);                               \
978         unsigned long __cl_size = (n);                                  \
979         if (__cl_size && access_ok(VERIFY_WRITE,                        \
980                                         __cl_addr, __cl_size))          \
981                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
982         __cl_size;                                                      \
983 })
984
985 /*
986  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
987  * @dst:   Destination address, in kernel space.  This buffer must be at
988  *         least @count bytes long.
989  * @src:   Source address, in user space.
990  * @count: Maximum number of bytes to copy, including the trailing NUL.
991  *
992  * Copies a NUL-terminated string from userspace to kernel space.
993  * Caller must check the specified block with access_ok() before calling
994  * this function.
995  *
996  * On success, returns the length of the string (not including the trailing
997  * NUL).
998  *
999  * If access to userspace fails, returns -EFAULT (some data may have been
1000  * copied).
1001  *
1002  * If @count is smaller than the length of the string, copies @count bytes
1003  * and returns @count.
1004  */
1005 static inline long
1006 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1007 {
1008         long res;
1009
1010         might_fault();
1011         __asm__ __volatile__(
1012                 "move\t$4, %1\n\t"
1013                 "move\t$5, %2\n\t"
1014                 "move\t$6, %3\n\t"
1015                 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1016                 "move\t%0, $2"
1017                 : "=r" (res)
1018                 : "r" (__to), "r" (__from), "r" (__len)
1019                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1020
1021         return res;
1022 }
1023
1024 /*
1025  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1026  * @dst:   Destination address, in kernel space.  This buffer must be at
1027  *         least @count bytes long.
1028  * @src:   Source address, in user space.
1029  * @count: Maximum number of bytes to copy, including the trailing NUL.
1030  *
1031  * Copies a NUL-terminated string from userspace to kernel space.
1032  *
1033  * On success, returns the length of the string (not including the trailing
1034  * NUL).
1035  *
1036  * If access to userspace fails, returns -EFAULT (some data may have been
1037  * copied).
1038  *
1039  * If @count is smaller than the length of the string, copies @count bytes
1040  * and returns @count.
1041  */
1042 static inline long
1043 strncpy_from_user(char *__to, const char __user *__from, long __len)
1044 {
1045         long res;
1046
1047         might_fault();
1048         __asm__ __volatile__(
1049                 "move\t$4, %1\n\t"
1050                 "move\t$5, %2\n\t"
1051                 "move\t$6, %3\n\t"
1052                 __MODULE_JAL(__strncpy_from_user_asm)
1053                 "move\t%0, $2"
1054                 : "=r" (res)
1055                 : "r" (__to), "r" (__from), "r" (__len)
1056                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1057
1058         return res;
1059 }
1060
1061 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1062 static inline long __strlen_user(const char __user *s)
1063 {
1064         long res;
1065
1066         might_fault();
1067         __asm__ __volatile__(
1068                 "move\t$4, %1\n\t"
1069                 __MODULE_JAL(__strlen_user_nocheck_asm)
1070                 "move\t%0, $2"
1071                 : "=r" (res)
1072                 : "r" (s)
1073                 : "$2", "$4", __UA_t0, "$31");
1074
1075         return res;
1076 }
1077
1078 /*
1079  * strlen_user: - Get the size of a string in user space.
1080  * @str: The string to measure.
1081  *
1082  * Context: User context only.  This function may sleep.
1083  *
1084  * Get the size of a NUL-terminated string in user space.
1085  *
1086  * Returns the size of the string INCLUDING the terminating NUL.
1087  * On exception, returns 0.
1088  *
1089  * If there is a limit on the length of a valid string, you may wish to
1090  * consider using strnlen_user() instead.
1091  */
1092 static inline long strlen_user(const char __user *s)
1093 {
1094         long res;
1095
1096         might_fault();
1097         __asm__ __volatile__(
1098                 "move\t$4, %1\n\t"
1099                 __MODULE_JAL(__strlen_user_asm)
1100                 "move\t%0, $2"
1101                 : "=r" (res)
1102                 : "r" (s)
1103                 : "$2", "$4", __UA_t0, "$31");
1104
1105         return res;
1106 }
1107
1108 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1109 static inline long __strnlen_user(const char __user *s, long n)
1110 {
1111         long res;
1112
1113         might_fault();
1114         __asm__ __volatile__(
1115                 "move\t$4, %1\n\t"
1116                 "move\t$5, %2\n\t"
1117                 __MODULE_JAL(__strnlen_user_nocheck_asm)
1118                 "move\t%0, $2"
1119                 : "=r" (res)
1120                 : "r" (s), "r" (n)
1121                 : "$2", "$4", "$5", __UA_t0, "$31");
1122
1123         return res;
1124 }
1125
1126 /*
1127  * strlen_user: - Get the size of a string in user space.
1128  * @str: The string to measure.
1129  *
1130  * Context: User context only.  This function may sleep.
1131  *
1132  * Get the size of a NUL-terminated string in user space.
1133  *
1134  * Returns the size of the string INCLUDING the terminating NUL.
1135  * On exception, returns 0.
1136  *
1137  * If there is a limit on the length of a valid string, you may wish to
1138  * consider using strnlen_user() instead.
1139  */
1140 static inline long strnlen_user(const char __user *s, long n)
1141 {
1142         long res;
1143
1144         might_fault();
1145         __asm__ __volatile__(
1146                 "move\t$4, %1\n\t"
1147                 "move\t$5, %2\n\t"
1148                 __MODULE_JAL(__strnlen_user_asm)
1149                 "move\t%0, $2"
1150                 : "=r" (res)
1151                 : "r" (s), "r" (n)
1152                 : "$2", "$4", "$5", __UA_t0, "$31");
1153
1154         return res;
1155 }
1156
1157 struct exception_table_entry
1158 {
1159         unsigned long insn;
1160         unsigned long nextinsn;
1161 };
1162
1163 extern int fixup_exception(struct pt_regs *regs);
1164
1165 #endif /* _ASM_UACCESS_H */