]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
kill __copy_from_user_nocache()
authorAl Viro <viro@zeniv.linux.org.uk>
Sat, 25 Mar 2017 22:47:28 +0000 (18:47 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Tue, 28 Mar 2017 22:24:05 +0000 (18:24 -0400)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/lib/usercopy_32.c
include/linux/uaccess.h
lib/iov_iter.c

index 5268ecceea966d2aa30bbb44de7e3b03403609fd..19e6c050c438cc42e196876d0e3d695780a39535 100644 (file)
@@ -14,8 +14,6 @@ unsigned long __must_check __copy_from_user_ll
                (void *to, const void __user *from, unsigned long n);
 unsigned long __must_check __copy_from_user_ll_nozero
                (void *to, const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nocache
-               (void *to, const void __user *from, unsigned long n);
 unsigned long __must_check __copy_from_user_ll_nocache_nozero
                (void *to, const void __user *from, unsigned long n);
 
@@ -119,34 +117,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
        return __copy_from_user_ll(to, from, n);
 }
 
-static __always_inline unsigned long __copy_from_user_nocache(void *to,
-                               const void __user *from, unsigned long n)
-{
-       might_fault();
-       if (__builtin_constant_p(n)) {
-               unsigned long ret;
-
-               switch (n) {
-               case 1:
-                       __uaccess_begin();
-                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
-                       __uaccess_end();
-                       return ret;
-               case 2:
-                       __uaccess_begin();
-                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
-                       __uaccess_end();
-                       return ret;
-               case 4:
-                       __uaccess_begin();
-                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
-                       __uaccess_end();
-                       return ret;
-               }
-       }
-       return __copy_from_user_ll_nocache(to, from, n);
-}
-
 static __always_inline unsigned long
 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
                                  unsigned long n)
index 142f0f1230beee5f3868442863f9c714c429d844..242936b0cb4b650ff9a57e48558bab067c5ed42b 100644 (file)
@@ -260,14 +260,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 extern long __copy_user_nocache(void *dst, const void __user *src,
                                unsigned size, int zerorest);
 
-static inline int
-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
-{
-       might_fault();
-       kasan_check_write(dst, size);
-       return __copy_user_nocache(dst, src, size, 1);
-}
-
 static inline int
 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
                                  unsigned size)
index 1f65ff6540f076536b43fb29a0e430aa5efac571..02aa7aa8b9f3eb8c46185d1feb2a9c5d193481ad 100644 (file)
@@ -293,105 +293,6 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
        return size;
 }
 
-/*
- * Non Temporal Hint version of __copy_user_zeroing_intel.  It is cache aware.
- * hyoshiok@miraclelinux.com
- */
-
-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
-                               const void __user *from, unsigned long size)
-{
-       int d0, d1;
-
-       __asm__ __volatile__(
-              "        .align 2,0x90\n"
-              "0:      movl 32(%4), %%eax\n"
-              "        cmpl $67, %0\n"
-              "        jbe 2f\n"
-              "1:      movl 64(%4), %%eax\n"
-              "        .align 2,0x90\n"
-              "2:      movl 0(%4), %%eax\n"
-              "21:     movl 4(%4), %%edx\n"
-              "        movnti %%eax, 0(%3)\n"
-              "        movnti %%edx, 4(%3)\n"
-              "3:      movl 8(%4), %%eax\n"
-              "31:     movl 12(%4),%%edx\n"
-              "        movnti %%eax, 8(%3)\n"
-              "        movnti %%edx, 12(%3)\n"
-              "4:      movl 16(%4), %%eax\n"
-              "41:     movl 20(%4), %%edx\n"
-              "        movnti %%eax, 16(%3)\n"
-              "        movnti %%edx, 20(%3)\n"
-              "10:     movl 24(%4), %%eax\n"
-              "51:     movl 28(%4), %%edx\n"
-              "        movnti %%eax, 24(%3)\n"
-              "        movnti %%edx, 28(%3)\n"
-              "11:     movl 32(%4), %%eax\n"
-              "61:     movl 36(%4), %%edx\n"
-              "        movnti %%eax, 32(%3)\n"
-              "        movnti %%edx, 36(%3)\n"
-              "12:     movl 40(%4), %%eax\n"
-              "71:     movl 44(%4), %%edx\n"
-              "        movnti %%eax, 40(%3)\n"
-              "        movnti %%edx, 44(%3)\n"
-              "13:     movl 48(%4), %%eax\n"
-              "81:     movl 52(%4), %%edx\n"
-              "        movnti %%eax, 48(%3)\n"
-              "        movnti %%edx, 52(%3)\n"
-              "14:     movl 56(%4), %%eax\n"
-              "91:     movl 60(%4), %%edx\n"
-              "        movnti %%eax, 56(%3)\n"
-              "        movnti %%edx, 60(%3)\n"
-              "        addl $-64, %0\n"
-              "        addl $64, %4\n"
-              "        addl $64, %3\n"
-              "        cmpl $63, %0\n"
-              "        ja  0b\n"
-              "        sfence \n"
-              "5:      movl  %0, %%eax\n"
-              "        shrl  $2, %0\n"
-              "        andl $3, %%eax\n"
-              "        cld\n"
-              "6:      rep; movsl\n"
-              "        movl %%eax,%0\n"
-              "7:      rep; movsb\n"
-              "8:\n"
-              ".section .fixup,\"ax\"\n"
-              "9:      lea 0(%%eax,%0,4),%0\n"
-              "16:     pushl %0\n"
-              "        pushl %%eax\n"
-              "        xorl %%eax,%%eax\n"
-              "        rep; stosb\n"
-              "        popl %%eax\n"
-              "        popl %0\n"
-              "        jmp 8b\n"
-              ".previous\n"
-              _ASM_EXTABLE(0b,16b)
-              _ASM_EXTABLE(1b,16b)
-              _ASM_EXTABLE(2b,16b)
-              _ASM_EXTABLE(21b,16b)
-              _ASM_EXTABLE(3b,16b)
-              _ASM_EXTABLE(31b,16b)
-              _ASM_EXTABLE(4b,16b)
-              _ASM_EXTABLE(41b,16b)
-              _ASM_EXTABLE(10b,16b)
-              _ASM_EXTABLE(51b,16b)
-              _ASM_EXTABLE(11b,16b)
-              _ASM_EXTABLE(61b,16b)
-              _ASM_EXTABLE(12b,16b)
-              _ASM_EXTABLE(71b,16b)
-              _ASM_EXTABLE(13b,16b)
-              _ASM_EXTABLE(81b,16b)
-              _ASM_EXTABLE(14b,16b)
-              _ASM_EXTABLE(91b,16b)
-              _ASM_EXTABLE(6b,9b)
-              _ASM_EXTABLE(7b,16b)
-              : "=&c"(size), "=&D" (d0), "=&S" (d1)
-              :  "1"(to), "2"(from), "0"(size)
-              : "eax", "edx", "memory");
-       return size;
-}
-
 static unsigned long __copy_user_intel_nocache(void *to,
                                const void __user *from, unsigned long size)
 {
@@ -490,8 +391,6 @@ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
                                        unsigned long size);
 unsigned long __copy_user_intel(void __user *to, const void *from,
                                        unsigned long size);
-unsigned long __copy_user_zeroing_intel_nocache(void *to,
-                               const void __user *from, unsigned long size);
 #endif /* CONFIG_X86_INTEL_USERCOPY */
 
 /* Generic arbitrary sized copy.  */
@@ -607,23 +506,6 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
 
-unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
-                                       unsigned long n)
-{
-       stac();
-#ifdef CONFIG_X86_INTEL_USERCOPY
-       if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
-               n = __copy_user_zeroing_intel_nocache(to, from, n);
-       else
-               __copy_user_zeroing(to, from, n);
-#else
-       __copy_user_zeroing(to, from, n);
-#endif
-       clac();
-       return n;
-}
-EXPORT_SYMBOL(__copy_from_user_ll_nocache);
-
 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
                                        unsigned long n)
 {
index 5f76bc995d968a95c731b6fac7dcb48fafffca9f..7fc2104b88bc7731d43e2b6ba8e7d644fff6aea6 100644 (file)
@@ -261,12 +261,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
        return __copy_from_user_inatomic(to, from, n);
 }
 
-static inline unsigned long __copy_from_user_nocache(void *to,
-                               const void __user *from, unsigned long n)
-{
-       return __copy_from_user(to, from, n);
-}
-
 #endif         /* ARCH_HAS_NOCACHE_UACCESS */
 
 /*
index 97db876c6862578d1aca5036fa4be2c602f83d5f..672c32f9f960c1c37963779b7785fc2b37794040 100644 (file)
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
                return 0;
        }
        iterate_and_advance(i, bytes, v,
-               __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+               __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
                                         v.iov_base, v.iov_len),
                memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
                                 v.bv_offset, v.bv_len),
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
        if (unlikely(i->count < bytes))
                return false;
        iterate_all_kinds(i, bytes, v, ({
-               if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+               if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
                                             v.iov_base, v.iov_len))
                        return false;
                0;}),