#ifdef CONFIG_VSX
 extern void enable_kernel_vsx(void);
 extern void flush_vsx_to_thread(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void __giveup_vsx(struct task_struct *);
 static inline void disable_kernel_vsx(void)
 {
        msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-#else
-static inline void __giveup_vsx(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_SPE
 
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-void giveup_vsx(struct task_struct *tsk)
+static void __giveup_vsx(struct task_struct *tsk)
 {
-       check_if_tm_restore_required(tsk);
-
-       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
        if (tsk->thread.regs->msr & MSR_FP)
                __giveup_fpu(tsk);
        if (tsk->thread.regs->msr & MSR_VEC)
                __giveup_altivec(tsk);
+       tsk->thread.regs->msr &= ~MSR_VSX;
+}
+
+static void giveup_vsx(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
        __giveup_vsx(tsk);
        msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-EXPORT_SYMBOL(giveup_vsx);
+
+static void save_vsx(struct task_struct *tsk)
+{
+       if (tsk->thread.regs->msr & MSR_FP)
+               save_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               save_altivec(tsk);
+}
 
 void enable_kernel_vsx(void)
 {
 }
 #else
 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
+static inline void save_vsx(struct task_struct *tsk) { }
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_SPE
 
        msr_check_and_set(msr_all_available);
 
-       if (usermsr & MSR_FP)
-               save_fpu(tsk);
-
-       if (usermsr & MSR_VEC)
-               save_altivec(tsk);
+       /*
+        * Saving the way the register space is in hardware, save_vsx boils
+        * down to a save_fpu() and save_altivec()
+        */
+       if (usermsr & MSR_VSX) {
+               save_vsx(tsk);
+       } else {
+               if (usermsr & MSR_FP)
+                       save_fpu(tsk);
 
-       if (usermsr & MSR_VSX)
-               __giveup_vsx(tsk);
+               if (usermsr & MSR_VEC)
+                       save_altivec(tsk);
+       }
 
        if (usermsr & MSR_SPE)
                __giveup_spe(tsk);
 
        std     r12,_MSR(r1)
        b       fast_exception_return
 
-/*
- * __giveup_vsx(tsk)
- * Disable VSX for the task given as the argument.
- * Does NOT save vsx registers.
- */
-_GLOBAL(__giveup_vsx)
-       addi    r3,r3,THREAD            /* want THREAD of task */
-       ld      r5,PT_REGS(r3)
-       cmpdi   0,r5,0
-       beq     1f
-       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r3,MSR_VSX@h
-       andc    r4,r4,r3                /* disable VSX for previous task */
-       std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-       blr
-
 #endif /* CONFIG_VSX */