err;                                                            \
 })
 
-#define check_insn(insn, output, input...)                             \
-({                                                                     \
-       int err;                                                        \
+#define kernel_insn(insn, output, input...)                            \
        asm volatile("1:" #insn "\n\t"                                  \
                     "2:\n"                                             \
-                    ".section .fixup,\"ax\"\n"                         \
-                    "3:  movl $-1,%[err]\n"                            \
-                    "    jmp  2b\n"                                    \
-                    ".previous\n"                                      \
-                    _ASM_EXTABLE(1b, 3b)                               \
-                    : [err] "=r" (err), output                         \
-                    : "0"(0), input);                                  \
-       err;                                                            \
-})
+                    _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)  \
+                    : output : input)
 
 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
 {
 
 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
 {
-       int err;
-
        if (IS_ENABLED(CONFIG_X86_32)) {
-               err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+               kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
        } else {
                if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
-                       err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+                       kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
                } else {
                        /* See comment in copy_fxregs_to_kernel() below. */
-                       err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
+                       kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
                }
        }
-       /* Copying from a kernel buffer to FPU registers should never fail: */
-       WARN_ON_FPU(err);
 }
 
 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
 
 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
 {
-       int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-
-       WARN_ON_FPU(err);
+       kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
  * XSAVE area format.
  */
-#define XSTATE_XRESTORE(st, lmask, hmask, err)                         \
+#define XSTATE_XRESTORE(st, lmask, hmask)                              \
        asm volatile(ALTERNATIVE(XRSTOR,                                \
                                 XRSTORS, X86_FEATURE_XSAVES)           \
                     "\n"                                               \
-                    "xor %[err], %[err]\n"                             \
                     "3:\n"                                             \
-                    ".pushsection .fixup,\"ax\"\n"                     \
-                    "4: movl $-2, %[err]\n"                            \
-                    "jmp 3b\n"                                         \
-                    ".popsection\n"                                    \
-                    _ASM_EXTABLE(661b, 4b)                             \
-                    : [err] "=r" (err)                                 \
+                    _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
+                    :                                                  \
                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
                     : "memory")
 
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
 
-       /* We should never fault when copying from a kernel buffer: */
+       /*
+        * We should never fault when copying from a kernel buffer, and the FPU
+        * state we set at boot time should be valid.
+        */
        WARN_ON_FPU(err);
 }
 
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
-       int err;
-
-       XSTATE_XRESTORE(xstate, lmask, hmask, err);
 
-       /* We should never fault when copying from a kernel buffer: */
-       WARN_ON_FPU(err);
+       XSTATE_XRESTORE(xstate, lmask, hmask);
 }
 
 /*
 
 #include <linux/uaccess.h>
 #include <linux/sched/debug.h>
 
+#include <asm/fpu/internal.h>
 #include <asm/traps.h>
 #include <asm/kdebug.h>
 
 }
 EXPORT_SYMBOL_GPL(ex_handler_refcount);
 
+/*
+ * Handler for when we fail to restore a task's FPU state.  We should never get
+ * here because the FPU state of a task using the FPU (task->thread.fpu.state)
+ * should always be valid.  However, past bugs have allowed userspace to set
+ * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
+ * These caused XRSTOR to fail when switching to the task, leaking the FPU
+ * registers of the task previously executing on the CPU.  Mitigate this class
+ * of vulnerability by restoring from the initial state (essentially, zeroing
+ * out all the FPU registers) if we can't restore from the task's FPU state.
+ */
+bool ex_handler_fprestore(const struct exception_table_entry *fixup,
+                         struct pt_regs *regs, int trapnr)
+{
+       regs->ip = ex_fixup_addr(fixup);
+
+       WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
+                 (void *)instruction_pointer(regs));
+
+       __copy_kernel_to_fpregs(&init_fpstate, -1);
+       return true;
+}
+EXPORT_SYMBOL_GPL(ex_handler_fprestore);
+
 bool ex_handler_ext(const struct exception_table_entry *fixup,
                   struct pt_regs *regs, int trapnr)
 {