* Delay loading of the complete FPU state until the return to userland.
  * PKRU is handled separately.
  */
-static inline void switch_fpu_finish(struct fpu *new_fpu)
+static inline void switch_fpu_finish(void)
 {
        if (cpu_feature_enabled(X86_FEATURE_FPU))
                set_thread_flag(TIF_NEED_FPU_LOAD);
 
        struct thread_struct *prev = &prev_p->thread,
                             *next = &next_p->thread;
        struct fpu *prev_fpu = &prev->fpu;
-       struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
        this_cpu_write(current_task, next_p);
 
-       switch_fpu_finish(next_fpu);
+       switch_fpu_finish();
 
        /* Load the Intel cache allocation PQR MSR. */
        resctrl_sched_in();
 
        struct thread_struct *prev = &prev_p->thread;
        struct thread_struct *next = &next_p->thread;
        struct fpu *prev_fpu = &prev->fpu;
-       struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
        this_cpu_write(current_task, next_p);
        this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
-       switch_fpu_finish(next_fpu);
+       switch_fpu_finish();
 
        /* Reload sp0. */
        update_task_stack(next_p);