]> git.hungrycats.org Git - linux/commitdiff
x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec
authorDan Williams <dan.j.williams@intel.com>
Tue, 30 Jan 2018 01:02:49 +0000 (17:02 -0800)
committerBen Hutchings <ben@decadent.org.uk>
Mon, 19 Mar 2018 18:59:17 +0000 (18:59 +0000)
commit 304ec1b050310548db33063e567123fae8fd0301 upstream.

Quoting Linus:

    I do think that it would be a good idea to very expressly document
    the fact that it's not that the user access itself is unsafe. I do
    agree that things like "get_user()" want to be protected, but not
    because of any direct bugs or problems with get_user() and friends,
    but simply because get_user() is an excellent source of a pointer
    that is obviously controlled from a potentially attacking user
    space. So it's a prime candidate for then finding _subsequent_
    accesses that can then be used to perturb the cache.

__uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the
limit check is far away from the user pointer de-reference. In those cases
a barrier_nospec() prevents speculation with a potential pointer to
privileged memory. uaccess_try_nospec covers get_user_try.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Suggested-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>
Cc: kernel-hardening@lists.openwall.com
Cc: gregkh@linuxfoundation.org
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: alan@linux.intel.com
Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@dwillia2-desk3.amr.corp.intel.com
[bwh: Backported to 3.16:
 - Convert several more functions to use __uaccess_begin_nospec(), that
   are just wrappers in mainline
 - There's no 'case 8' in __copy_to_user_inatomic()
 - Adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/lib/usercopy_32.c

index 7aa8a9a48c9004cda53f1c17a65b066416fad1e3..16eff1e17e963356623a9e991f8f6de2fe72bac3 100644 (file)
@@ -433,7 +433,7 @@ do {                                                                        \
 ({                                                                     \
        int __gu_err;                                                   \
        unsigned long __gu_val;                                         \
-       __uaccess_begin();                                              \
+       __uaccess_begin_nospec();                                       \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
        __uaccess_end();                                                \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
@@ -541,7 +541,7 @@ struct __large_struct { unsigned long buf[100]; };
  *     get_user_ex(...);
  * } get_user_catch(err)
  */
-#define get_user_try           uaccess_try
+#define get_user_try           uaccess_try_nospec
 #define get_user_catch(err)    uaccess_catch(err)
 
 #define get_user_ex(x, ptr)    do {                                    \
@@ -576,7 +576,7 @@ extern void __cmpxchg_wrong_size(void)
        __typeof__(ptr) __uval = (uval);                                \
        __typeof__(*(ptr)) __old = (old);                               \
        __typeof__(*(ptr)) __new = (new);                               \
-       __uaccess_begin();                                              \
+       __uaccess_begin_nospec();                                       \
        switch (size) {                                                 \
        case 1:                                                         \
        {                                                               \
index b25d109fb95a44a51d088cf352d78f4d960cf920..c803818cedfb6ddb3c018cda56f6ee2c6843d819 100644 (file)
@@ -48,19 +48,19 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 
                switch (n) {
                case 1:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __put_user_size(*(u8 *)from, (u8 __user *)to,
                                        1, ret, 1);
                        __uaccess_end();
                        return ret;
                case 2:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __put_user_size(*(u16 *)from, (u16 __user *)to,
                                        2, ret, 2);
                        __uaccess_end();
                        return ret;
                case 4:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __put_user_size(*(u32 *)from, (u32 __user *)to,
                                        4, ret, 4);
                        __uaccess_end();
@@ -104,17 +104,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 
                switch (n) {
                case 1:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
                        __uaccess_end();
                        return ret;
                case 2:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
                        __uaccess_end();
                        return ret;
                case 4:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
                        __uaccess_end();
                        return ret;
@@ -154,17 +154,17 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 
                switch (n) {
                case 1:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
                        __uaccess_end();
                        return ret;
                case 2:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
                        __uaccess_end();
                        return ret;
                case 4:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
                        __uaccess_end();
                        return ret;
@@ -182,17 +182,17 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
 
                switch (n) {
                case 1:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
                        __uaccess_end();
                        return ret;
                case 2:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
                        __uaccess_end();
                        return ret;
                case 4:
-                       __uaccess_begin();
+                       __uaccess_begin_nospec();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
                        __uaccess_end();
                        return ret;
index 62727aa9696c026b7b07c8155a0799dfe2da8dd9..6415eb20cf9741b1201c3a49869cc074fc51423a 100644 (file)
@@ -57,31 +57,31 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
        case 1:
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(*(u8 *)dst, (u8 __user *)src,
                              ret, "b", "b", "=q", 1);
                __uaccess_end();
                return ret;
        case 2:
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(*(u16 *)dst, (u16 __user *)src,
                              ret, "w", "w", "=r", 2);
                __uaccess_end();
                return ret;
        case 4:
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(*(u32 *)dst, (u32 __user *)src,
                              ret, "l", "k", "=r", 4);
                __uaccess_end();
                return ret;
        case 8:
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
                              ret, "q", "", "=r", 8);
                __uaccess_end();
                return ret;
        case 10:
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
                               ret, "q", "", "=r", 10);
                if (likely(!ret))
@@ -91,7 +91,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
                __uaccess_end();
                return ret;
        case 16:
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
                               ret, "q", "", "=r", 16);
                if (likely(!ret))
@@ -190,7 +190,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        switch (size) {
        case 1: {
                u8 tmp;
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(tmp, (u8 __user *)src,
                               ret, "b", "b", "=q", 1);
                if (likely(!ret))
@@ -201,7 +201,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        }
        case 2: {
                u16 tmp;
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(tmp, (u16 __user *)src,
                               ret, "w", "w", "=r", 2);
                if (likely(!ret))
@@ -213,7 +213,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 
        case 4: {
                u32 tmp;
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(tmp, (u32 __user *)src,
                               ret, "l", "k", "=r", 4);
                if (likely(!ret))
@@ -224,7 +224,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        }
        case 8: {
                u64 tmp;
-               __uaccess_begin();
+               __uaccess_begin_nospec();
                __get_user_asm(tmp, (u64 __user *)src,
                               ret, "q", "", "=r", 8);
                if (likely(!ret))
index 7f6e7dd46215757ce1d34e1031345f7035c2872d..de848bc95a9f6e4e18bdc9e6c90952576af74fc1 100644 (file)
@@ -570,7 +570,7 @@ do {                                                                        \
 unsigned long __copy_to_user_ll(void __user *to, const void *from,
                                unsigned long n)
 {
-       __uaccess_begin();
+       __uaccess_begin_nospec();
        if (movsl_is_ok(to, from, n))
                __copy_user(to, from, n);
        else
@@ -583,7 +583,7 @@ EXPORT_SYMBOL(__copy_to_user_ll);
 unsigned long __copy_from_user_ll(void *to, const void __user *from,
                                        unsigned long n)
 {
-       __uaccess_begin();
+       __uaccess_begin_nospec();
        if (movsl_is_ok(to, from, n))
                __copy_user_zeroing(to, from, n);
        else
@@ -596,7 +596,7 @@ EXPORT_SYMBOL(__copy_from_user_ll);
 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
                                         unsigned long n)
 {
-       __uaccess_begin();
+       __uaccess_begin_nospec();
        if (movsl_is_ok(to, from, n))
                __copy_user(to, from, n);
        else
@@ -610,7 +610,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
                                        unsigned long n)
 {
-       __uaccess_begin();
+       __uaccess_begin_nospec();
 #ifdef CONFIG_X86_INTEL_USERCOPY
        if (n > 64 && cpu_has_xmm2)
                n = __copy_user_zeroing_intel_nocache(to, from, n);
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache);
 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
                                        unsigned long n)
 {
-       __uaccess_begin();
+       __uaccess_begin_nospec();
 #ifdef CONFIG_X86_INTEL_USERCOPY
        if (n > 64 && cpu_has_xmm2)
                n = __copy_user_intel_nocache(to, from, n);