* both the traditional FP registers and the added VSX
                 * registers into thread.fp_state.fpr[].
                 */
-               if (current->thread.regs->msr & MSR_FP)
+               if (t->regs->msr & MSR_FP)
                        giveup_fpu(current);
-               vcpu->arch.fp = t->fp_state;
+               t->fp_save_area = NULL;
        }
 
 #ifdef CONFIG_ALTIVEC
        if (msr & MSR_VEC) {
                if (current->thread.regs->msr & MSR_VEC)
                        giveup_altivec(current);
-               vcpu->arch.vr = t->vr_state;
+               t->vr_save_area = NULL;
        }
 #endif
 
 #endif
 
        if (msr & MSR_FP) {
-               t->fp_state = vcpu->arch.fp;
-               t->fpexc_mode = 0;
                enable_kernel_fp();
-               load_fp_state(&t->fp_state);
+               load_fp_state(&vcpu->arch.fp);
+               t->fp_save_area = &vcpu->arch.fp;
        }
 
        if (msr & MSR_VEC) {
 #ifdef CONFIG_ALTIVEC
-               t->vr_state = vcpu->arch.vr;
-               t->vrsave = -1;
                enable_kernel_altivec();
-               load_vr_state(&t->vr_state);
+               load_vr_state(&vcpu->arch.vr);
+               t->vr_save_area = &vcpu->arch.vr;
 #endif
        }
 
-       current->thread.regs->msr |= msr;
+       t->regs->msr |= msr;
        vcpu->arch.guest_owned_ext |= msr;
        kvmppc_recalc_shadow_msr(vcpu);
 
 
        if (lost_ext & MSR_FP) {
                enable_kernel_fp();
-               load_fp_state(¤t->thread.fp_state);
+               load_fp_state(&vcpu->arch.fp);
        }
 #ifdef CONFIG_ALTIVEC
        if (lost_ext & MSR_VEC) {
                enable_kernel_altivec();
-               load_vr_state(¤t->thread.vr_state);
+               load_vr_state(&vcpu->arch.vr);
        }
 #endif
        current->thread.regs->msr |= lost_ext;
 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret;
-       struct thread_fp_state fp;
-       int fpexc_mode;
 #ifdef CONFIG_ALTIVEC
-       struct thread_vr_state vr;
        unsigned long uninitialized_var(vrsave);
-       int used_vr;
 #endif
-#ifdef CONFIG_VSX
-       int used_vsr;
-#endif
-       ulong ext_msr;
 
        /* Check if we can run the vcpu at all */
        if (!vcpu->arch.sane) {
                goto out;
        }
 
-       /* Save FPU state in stack */
+       /* Save FPU state in thread_struct */
        if (current->thread.regs->msr & MSR_FP)
                giveup_fpu(current);
-       fp = current->thread.fp_state;
-       fpexc_mode = current->thread.fpexc_mode;
 
 #ifdef CONFIG_ALTIVEC
-       /* Save Altivec state in stack */
-       used_vr = current->thread.used_vr;
-       if (used_vr) {
-               if (current->thread.regs->msr & MSR_VEC)
-                       giveup_altivec(current);
-               vr = current->thread.vr_state;
-               vrsave = current->thread.vrsave;
-       }
+       /* Save Altivec state in thread_struct */
+       if (current->thread.regs->msr & MSR_VEC)
+               giveup_altivec(current);
 #endif
 
 #ifdef CONFIG_VSX
-       /* Save VSX state in stack */
-       used_vsr = current->thread.used_vsr;
-       if (used_vsr && (current->thread.regs->msr & MSR_VSX))
+       /* Save VSX state in thread_struct */
+       if (current->thread.regs->msr & MSR_VSX)
                __giveup_vsx(current);
 #endif
 
-       /* Remember the MSR with disabled extensions */
-       ext_msr = current->thread.regs->msr;
-
        /* Preload FPU if it's enabled */
        if (vcpu->arch.shared->msr & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
        /* Make sure we save the guest FPU/Altivec/VSX state */
        kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
 
-       current->thread.regs->msr = ext_msr;
-
-       /* Restore FPU/VSX state from stack */
-       current->thread.fp_state = fp;
-       current->thread.fpexc_mode = fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
-       /* Restore Altivec state from stack */
-       if (used_vr && current->thread.used_vr) {
-               current->thread.vr_state = vr;
-               current->thread.vrsave = vrsave;
-       }
-       current->thread.used_vr = used_vr;
-#endif
-
-#ifdef CONFIG_VSX
-       current->thread.used_vsr = used_vsr;
-#endif
-
 out:
        vcpu->mode = OUTSIDE_GUEST_MODE;
        return ret;
 
 {
        int ret, s;
        struct thread_struct thread;
-#ifdef CONFIG_PPC_FPU
-       struct thread_fp_state fp;
-       int fpexc_mode;
-#endif
 
        if (!vcpu->arch.sane) {
                kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 #ifdef CONFIG_PPC_FPU
        /* Save userspace FPU state in stack */
        enable_kernel_fp();
-       fp = current->thread.fp_state;
-       fpexc_mode = current->thread.fpexc_mode;
-
-       /* Restore guest FPU state to thread */
-       current->thread.fp_state = vcpu->arch.fp;
 
        /*
         * Since we can't trap on MSR_FP in GS-mode, we consider the guest
        kvmppc_save_guest_fp(vcpu);
 
        vcpu->fpu_active = 0;
-
-       /* Save guest FPU state from thread */
-       vcpu->arch.fp = current->thread.fp_state;
-
-       /* Restore userspace FPU state from stack */
-       current->thread.fp_state = fp;
-       current->thread.fpexc_mode = fpexc_mode;
 #endif
 
 out: