#endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
+void giveup_vsx(struct task_struct *tsk)
+{
+       u64 oldmsr = mfmsr();
+       u64 newmsr;
+
+       check_if_tm_restore_required(tsk);
+
+       newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+
+       if (tsk->thread.regs->msr & MSR_FP)
+               __giveup_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               __giveup_altivec(tsk);
+       __giveup_vsx(tsk);
+}
+EXPORT_SYMBOL(giveup_vsx);
+
 void enable_kernel_vsx(void)
 {
        WARN_ON(preemptible());
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 
-void giveup_vsx(struct task_struct *tsk)
-{
-       check_if_tm_restore_required(tsk);
-       giveup_fpu(tsk);
-       giveup_altivec(tsk);
-       __giveup_vsx(tsk);
-}
-EXPORT_SYMBOL(giveup_vsx);
-
 void flush_vsx_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
 
         * contains valid data
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                msr |= MSR_VSX;
         * contains valid data
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                if (msr & MSR_VSX) {
 
         * VMX data.
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                err |= copy_vsx_to_user(v_regs, current);
                /* set MSR_VSX in the MSR value in the frame to
         * VMX data.
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                tm_v_regs += ELF_NVRREG;
 
 
  * __giveup_vsx(tsk)
  * Disable VSX for the task given as the argument.
  * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
  */
 _GLOBAL(__giveup_vsx)
-       mfmsr   r5
-       oris    r5,r5,MSR_VSX@h
-       mtmsrd  r5                      /* enable use of VSX now */
-       isync
-
        addi    r3,r3,THREAD            /* want THREAD of task */
        ld      r5,PT_REGS(r3)
        cmpdi   0,r5,0