]> www.infradead.org Git - linux.git/commitdiff
x86/uaccess: Avoid barrier_nospec() in 64-bit copy_from_user()
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Oct 2024 02:03:31 +0000 (16:03 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Oct 2024 21:38:10 +0000 (11:38 -1000)
The barrier_nospec() in 64-bit copy_from_user() is slow. Instead use
pointer masking to force the user pointer to all 1's for an invalid
address.

The kernel test robot reports a 2.6% improvement in the per_thread_ops
benchmark [1].

This is a variation on a patch originally by Josh Poimboeuf [2].

Link: https://lore.kernel.org/202410281344.d02c72a2-oliver.sang@intel.com
Link: https://lore.kernel.org/5b887fe4c580214900e21f6c61095adf9a142735.1730166635.git.jpoimboe@kernel.org
Tested-and-reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/uaccess.h

index 39c7cf82b0c22189517f9b10cd528fea121340b2..43844510d5d0facfcb08c7d32798fe7e0fdd804c 100644 (file)
@@ -38,6 +38,7 @@
 #else
  #define can_do_masked_user_access() 0
  #define masked_user_access_begin(src) NULL
+ #define mask_user_address(src) (src)
 #endif
 
 /*
@@ -159,19 +160,27 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        unsigned long res = n;
        might_fault();
-       if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+       if (should_fail_usercopy())
+               goto fail;
+       if (can_do_masked_user_access())
+               from = mask_user_address(from);
+       else {
+               if (!access_ok(from, n))
+                       goto fail;
                /*
                 * Ensure that bad access_ok() speculation will not
                 * lead to nasty side effects *after* the copy is
                 * finished:
                 */
                barrier_nospec();
-               instrument_copy_from_user_before(to, from, n);
-               res = raw_copy_from_user(to, from, n);
-               instrument_copy_from_user_after(to, from, n, res);
        }
-       if (unlikely(res))
-               memset(to + (n - res), 0, res);
+       instrument_copy_from_user_before(to, from, n);
+       res = raw_copy_from_user(to, from, n);
+       instrument_copy_from_user_after(to, from, n, res);
+       if (likely(!res))
+               return 0;
+fail:
+       memset(to + (n - res), 0, res);
        return res;
 }
 extern __must_check unsigned long