]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/asm-powerpc/uaccess.h
powerpc: merge uaccess.h
[karo-tx-linux.git] / include / asm-powerpc / uaccess.h
1 #ifndef _ARCH_POWERPC_UACCESS_H
2 #define _ARCH_POWERPC_UACCESS_H
3
4 #ifdef __KERNEL__
5 #ifndef __ASSEMBLY__
6
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 #include <asm/processor.h>
10
11 #define VERIFY_READ     0
12 #define VERIFY_WRITE    1
13
14 /*
15  * The fs value determines whether argument validity checking should be
16  * performed or not.  If get_fs() == USER_DS, checking is performed, with
17  * get_fs() == KERNEL_DS, checking is bypassed.
18  *
19  * For historical reasons, these macros are grossly misnamed.
20  *
21  * The fs/ds values are now the highest legal address in the "segment".
22  * This simplifies the checking in the routines below.
23  */
24
25 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
26
27 #ifdef __powerpc64__
28 #define KERNEL_DS       MAKE_MM_SEG(0UL)
29 #define USER_DS         MAKE_MM_SEG(0xf000000000000000UL)
30 #else
31 #define KERNEL_DS       MAKE_MM_SEG(~0UL)
32 #define USER_DS         MAKE_MM_SEG(TASK_SIZE - 1)
33 #endif
34
35 #define get_ds()        (KERNEL_DS)
36 #define get_fs()        (current->thread.fs)
37 #define set_fs(val)     (current->thread.fs = (val))
38
39 #define segment_eq(a, b)        ((a).seg == (b).seg)
40
41 #ifdef __powerpc64__
42 /*
43  * Use the alpha trick for checking ranges:
44  *
45  * Is a address valid? This does a straightforward calculation rather
46  * than tests.
47  *
48  * Address valid if:
49  *  - "addr" doesn't have any high-bits set
50  *  - AND "size" doesn't have any high-bits set
51  *  - OR we are in kernel mode.
52  *
53  * We dont have to check for high bits in (addr+size) because the first
54  * two checks force the maximum result to be below the start of the
55  * kernel region.
56  */
57 #define __access_ok(addr, size, segment)        \
58         (((segment).seg & (addr | size )) == 0)
59
60 #else
61
62 #define __access_ok(addr, size, segment)        \
63         (((addr) <= (segment).seg) &&           \
64          (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
65
66 #endif
67
68 #define access_ok(type, addr, size)             \
69         (__chk_user_ptr(addr),                  \
70          __access_ok((__force unsigned long)(addr), (size), get_fs()))
71
72 /*
73  * The exception table consists of pairs of addresses: the first is the
74  * address of an instruction that is allowed to fault, and the second is
75  * the address at which the program should continue.  No registers are
76  * modified, so it is entirely up to the continuation code to figure out
77  * what to do.
78  *
79  * All the routines below use bits of fixup code that are out of line
80  * with the main instruction path.  This means when everything is well,
81  * we don't even have to jump over them.  Further, they do not intrude
82  * on our cache or tlb entries.
83  */
84
85 struct exception_table_entry {
86         unsigned long insn;
87         unsigned long fixup;
88 };
89
90 /*
91  * These are the main single-value transfer routines.  They automatically
92  * use the right size if we just have the right pointer type.
93  *
94  * This gets kind of ugly. We want to return _two_ values in "get_user()"
95  * and yet we don't want to do any pointers, because that is too much
96  * of a performance impact. Thus we have a few rather ugly macros here,
97  * and hide all the ugliness from the user.
98  *
99  * The "__xxx" versions of the user access functions are versions that
100  * do not verify the address space, that must have been done previously
101  * with a separate "access_ok()" call (this is used when we do multiple
102  * accesses to the same area of user memory).
103  *
104  * As we use the same address space for kernel and user data on the
105  * PowerPC, we can just do these as direct assignments.  (Of course, the
106  * exception handling means that it's no longer "just"...)
107  *
108  * The "user64" versions of the user access functions are versions that
109  * allow access of 64-bit data. The "get_user" functions do not
110  * properly handle 64-bit data because the value gets down cast to a long.
111  * The "put_user" functions already handle 64-bit data properly but we add
112  * "user64" versions for completeness
113  */
114 #define get_user(x, ptr) \
115         __get_user_check((x), (ptr), sizeof(*(ptr)))
116 #define put_user(x, ptr) \
117         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
118
119 #define __get_user(x, ptr) \
120         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
121 #define __put_user(x, ptr) \
122         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
123 #ifndef __powerpc64__
124 #define __get_user64(x, ptr) \
125         __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
126 #define __put_user64(x, ptr) __put_user(x, ptr)
127 #endif
128
129 #ifdef __powerpc64__
130 #define __get_user_unaligned __get_user
131 #define __put_user_unaligned __put_user
132 #endif
133
134 extern long __put_user_bad(void);
135
136 #ifdef __powerpc64__
137 #define __EX_TABLE_ALIGN        "3"
138 #define __EX_TABLE_TYPE         "llong"
139 #else
140 #define __EX_TABLE_ALIGN        "2"
141 #define __EX_TABLE_TYPE         "long"
142 #endif
143
144 /*
145  * We don't tell gcc that we are accessing memory, but this is OK
146  * because we do not write to any memory gcc knows about, so there
147  * are no aliasing issues.
148  */
149 #define __put_user_asm(x, addr, err, op)                        \
150         __asm__ __volatile__(                                   \
151                 "1:     " op " %1,0(%2) # put_user\n"           \
152                 "2:\n"                                          \
153                 ".section .fixup,\"ax\"\n"                      \
154                 "3:     li %0,%3\n"                             \
155                 "       b 2b\n"                                 \
156                 ".previous\n"                                   \
157                 ".section __ex_table,\"a\"\n"                   \
158                 "       .align " __EX_TABLE_ALIGN "\n"          \
159                 "       ."__EX_TABLE_TYPE" 1b,3b\n"             \
160                 ".previous"                                     \
161                 : "=r" (err)                                    \
162                 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
163
164 #ifndef __powerpc64__
165 #define __put_user_asm2(x, addr, err)                           \
166         __asm__ __volatile__(                                   \
167                 "1:     stw %1,0(%2)\n"                         \
168                 "2:     stw %1+1,4(%2)\n"                       \
169                 "3:\n"                                          \
170                 ".section .fixup,\"ax\"\n"                      \
171                 "4:     li %0,%3\n"                             \
172                 "       b 3b\n"                                 \
173                 ".previous\n"                                   \
174                 ".section __ex_table,\"a\"\n"                   \
175                 "       .align " __EX_TABLE_ALIGN "\n"          \
176                 "       ." __EX_TABLE_TYPE " 1b,4b\n"           \
177                 "       ." __EX_TABLE_TYPE " 2b,4b\n"           \
178                 ".previous"                                     \
179                 : "=r" (err)                                    \
180                 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
181 #else /* __powerpc64__ */
182 #define __put_user_asm2(x, ptr, retval)                         \
183           __put_user_asm(x, ptr, retval, "std")
184 #endif /* __powerpc64__ */
185
186 #define __put_user_size(x, ptr, size, retval)                   \
187 do {                                                            \
188         retval = 0;                                             \
189         switch (size) {                                         \
190           case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
191           case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
192           case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
193           case 8: __put_user_asm2(x, ptr, retval); break;       \
194           default: __put_user_bad();                            \
195         }                                                       \
196 } while (0)
197
198 #define __put_user_nocheck(x, ptr, size)                        \
199 ({                                                              \
200         long __pu_err;                                          \
201         might_sleep();                                          \
202         __chk_user_ptr(ptr);                                    \
203         __put_user_size((x), (ptr), (size), __pu_err);          \
204         __pu_err;                                               \
205 })
206
207 #define __put_user_check(x, ptr, size)                                  \
208 ({                                                                      \
209         long __pu_err = -EFAULT;                                        \
210         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
211         might_sleep();                                                  \
212         if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
213                 __put_user_size((x), __pu_addr, (size), __pu_err);      \
214         __pu_err;                                                       \
215 })
216
217 extern long __get_user_bad(void);
218
219 #define __get_user_asm(x, addr, err, op)                \
220         __asm__ __volatile__(                           \
221                 "1:     "op" %1,0(%2)   # get_user\n"   \
222                 "2:\n"                                  \
223                 ".section .fixup,\"ax\"\n"              \
224                 "3:     li %0,%3\n"                     \
225                 "       li %1,0\n"                      \
226                 "       b 2b\n"                         \
227                 ".previous\n"                           \
228                 ".section __ex_table,\"a\"\n"           \
229                 "       .align "__EX_TABLE_ALIGN "\n"   \
230                 "       ." __EX_TABLE_TYPE " 1b,3b\n"   \
231                 ".previous"                             \
232                 : "=r" (err), "=r" (x)                  \
233                 : "b" (addr), "i" (-EFAULT), "0" (err))
234
235 #ifndef __powerpc64__
236 #define __get_user_asm2(x, addr, err)                   \
237         __asm__ __volatile__(                           \
238                 "1:     lwz %1,0(%2)\n"                 \
239                 "2:     lwz %1+1,4(%2)\n"               \
240                 "3:\n"                                  \
241                 ".section .fixup,\"ax\"\n"              \
242                 "4:     li %0,%3\n"                     \
243                 "       li %1,0\n"                      \
244                 "       li %1+1,0\n"                    \
245                 "       b 3b\n"                         \
246                 ".previous\n"                           \
247                 ".section __ex_table,\"a\"\n"           \
248                 "       .align " __EX_TABLE_ALIGN "\n"  \
249                 "       ." __EX_TABLE_TYPE " 1b,4b\n"   \
250                 "       ." __EX_TABLE_TYPE " 2b,4b\n"   \
251                 ".previous"                             \
252                 : "=r" (err), "=&r" (x)                 \
253                 : "b" (addr), "i" (-EFAULT), "0" (err))
254 #else
255 #define __get_user_asm2(x, addr, err)                   \
256         __get_user_asm(x, addr, err, "ld")
257 #endif /* __powerpc64__ */
258
259 #define __get_user_size(x, ptr, size, retval)                   \
260 do {                                                            \
261         retval = 0;                                             \
262         __chk_user_ptr(ptr);                                    \
263         if (size > sizeof(x))                                   \
264                 (x) = __get_user_bad();                         \
265         switch (size) {                                         \
266         case 1: __get_user_asm(x, ptr, retval, "lbz"); break;   \
267         case 2: __get_user_asm(x, ptr, retval, "lhz"); break;   \
268         case 4: __get_user_asm(x, ptr, retval, "lwz"); break;   \
269         case 8: __get_user_asm2(x, ptr, retval);  break;        \
270         default: (x) = __get_user_bad();                        \
271         }                                                       \
272 } while (0)
273
274 #define __get_user_nocheck(x, ptr, size)                        \
275 ({                                                              \
276         long __gu_err;                                          \
277         unsigned long __gu_val;                                 \
278         __chk_user_ptr(ptr);                                    \
279         might_sleep();                                          \
280         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
281         (x) = (__typeof__(*(ptr)))__gu_val;                     \
282         __gu_err;                                               \
283 })
284
285 #ifndef __powerpc64__
286 #define __get_user64_nocheck(x, ptr, size)                      \
287 ({                                                              \
288         long __gu_err;                                          \
289         long long __gu_val;                                     \
290         __chk_user_ptr(ptr);                                    \
291         might_sleep();                                          \
292         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
293         (x) = (__typeof__(*(ptr)))__gu_val;                     \
294         __gu_err;                                               \
295 })
296 #endif /* __powerpc64__ */
297
298 #define __get_user_check(x, ptr, size)                                  \
299 ({                                                                      \
300         long __gu_err = -EFAULT;                                        \
301         unsigned long  __gu_val = 0;                                    \
302         const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
303         might_sleep();                                                  \
304         if (access_ok(VERIFY_READ, __gu_addr, (size)))                  \
305                 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
306         (x) = (__typeof__(*(ptr)))__gu_val;                             \
307         __gu_err;                                                       \
308 })
309
310 /* more complex routines */
311
312 extern unsigned long __copy_tofrom_user(void __user *to,
313                 const void __user *from, unsigned long size);
314
315 #ifndef __powerpc64__
316 extern inline unsigned long
317 copy_from_user(void *to, const void __user *from, unsigned long n)
318 {
319         unsigned long over;
320
321         if (access_ok(VERIFY_READ, from, n))
322                 return __copy_tofrom_user((__force void __user *)to, from, n);
323         if ((unsigned long)from < TASK_SIZE) {
324                 over = (unsigned long)from + n - TASK_SIZE;
325                 return __copy_tofrom_user((__force void __user *)to, from,
326                                 n - over) + over;
327         }
328         return n;
329 }
330
331 extern inline unsigned long
332 copy_to_user(void __user *to, const void *from, unsigned long n)
333 {
334         unsigned long over;
335
336         if (access_ok(VERIFY_WRITE, to, n))
337                 return __copy_tofrom_user(to, (__force void __user *)from, n);
338         if ((unsigned long)to < TASK_SIZE) {
339                 over = (unsigned long)to + n - TASK_SIZE;
340                 return __copy_tofrom_user(to, (__force void __user *)from,
341                                 n - over) + over;
342         }
343         return n;
344 }
345
346 #else /* __powerpc64__ */
347
348 static inline unsigned long
349 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
350 {
351         if (__builtin_constant_p(n) && (n <= 8)) {
352                 unsigned long ret;
353
354                 switch (n) {
355                 case 1:
356                         __get_user_size(*(u8 *)to, from, 1, ret);
357                         break;
358                 case 2:
359                         __get_user_size(*(u16 *)to, from, 2, ret);
360                         break;
361                 case 4:
362                         __get_user_size(*(u32 *)to, from, 4, ret);
363                         break;
364                 case 8:
365                         __get_user_size(*(u64 *)to, from, 8, ret);
366                         break;
367                 }
368                 return (ret == -EFAULT) ? n : 0;
369         }
370         return __copy_tofrom_user((__force void __user *) to, from, n);
371 }
372
373 static inline unsigned long
374 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
375 {
376         if (__builtin_constant_p(n) && (n <= 8)) {
377                 unsigned long ret;
378
379                 switch (n) {
380                 case 1:
381                         __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
382                         break;
383                 case 2:
384                         __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
385                         break;
386                 case 4:
387                         __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
388                         break;
389                 case 8:
390                         __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
391                         break;
392                 }
393                 return (ret == -EFAULT) ? n : 0;
394         }
395         return __copy_tofrom_user(to, (__force const void __user *) from, n);
396 }
397
398 #endif /* __powerpc64__ */
399
400 static inline unsigned long
401 __copy_from_user(void *to, const void __user *from, unsigned long size)
402 {
403         might_sleep();
404 #ifndef __powerpc64__
405         return __copy_tofrom_user((__force void __user *)to, from, size);
406 #else /* __powerpc64__ */
407         return __copy_from_user_inatomic(to, from, size);
408 #endif /* __powerpc64__ */
409 }
410
411 static inline unsigned long
412 __copy_to_user(void __user *to, const void *from, unsigned long size)
413 {
414         might_sleep();
415 #ifndef __powerpc64__
416         return __copy_tofrom_user(to, (__force void __user *)from, size);
417 #else /* __powerpc64__ */
418         return __copy_to_user_inatomic(to, from, size);
419 #endif /* __powerpc64__ */
420 }
421
422 #ifndef __powerpc64__
423 #define __copy_to_user_inatomic __copy_to_user
424 #define __copy_from_user_inatomic __copy_from_user
425 #else /* __powerpc64__ */
426 #define __copy_in_user(to, from, size) \
427         __copy_tofrom_user((to), (from), (size))
428
429 extern unsigned long copy_from_user(void *to, const void __user *from,
430                                     unsigned long n);
431 extern unsigned long copy_to_user(void __user *to, const void *from,
432                                   unsigned long n);
433 extern unsigned long copy_in_user(void __user *to, const void __user *from,
434                                   unsigned long n);
435 #endif /* __powerpc64__ */
436
437 extern unsigned long __clear_user(void __user *addr, unsigned long size);
438
439 static inline unsigned long clear_user(void __user *addr, unsigned long size)
440 {
441         might_sleep();
442         if (likely(access_ok(VERIFY_WRITE, addr, size)))
443                 return __clear_user(addr, size);
444 #ifndef __powerpc64__
445         if ((unsigned long)addr < TASK_SIZE) {
446                 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
447                 return __clear_user(addr, size - over) + over;
448         }
449 #endif /* __powerpc64__ */
450         return size;
451 }
452
453 extern int __strncpy_from_user(char *dst, const char __user *src, long count);
454
455 static inline long strncpy_from_user(char *dst, const char __user *src,
456                 long count)
457 {
458         might_sleep();
459         if (likely(access_ok(VERIFY_READ, src, 1)))
460                 return __strncpy_from_user(dst, src, count);
461         return -EFAULT;
462 }
463
464 /*
465  * Return the size of a string (including the ending 0)
466  *
467  * Return 0 for error
468  */
469 #ifndef __powerpc64__
470 extern int __strnlen_user(const char __user *str, long len, unsigned long top);
471 #else /* __powerpc64__ */
472 extern int __strnlen_user(const char __user *str, long len);
473 #endif /* __powerpc64__ */
474
475 /*
476  * Returns the length of the string at str (including the null byte),
477  * or 0 if we hit a page we can't access,
478  * or something > len if we didn't find a null byte.
479  *
480  * The `top' parameter to __strnlen_user is to make sure that
481  * we can never overflow from the user area into kernel space.
482  */
483 static inline int strnlen_user(const char __user *str, long len)
484 {
485 #ifndef __powerpc64__
486         unsigned long top = current->thread.fs.seg;
487
488         if ((unsigned long)str > top)
489                 return 0;
490         return __strnlen_user(str, len, top);
491 #else /* __powerpc64__ */
492         might_sleep();
493         if (likely(access_ok(VERIFY_READ, str, 1)))
494                 return __strnlen_user(str, len);
495         return 0;
496 #endif /* __powerpc64__ */
497 }
498
499 #define strlen_user(str)        strnlen_user((str), 0x7ffffffe)
500
501 #endif  /* __ASSEMBLY__ */
502 #endif /* __KERNEL__ */
503
504 #endif  /* _ARCH_POWERPC_UACCESS_H */