* on non-ASID CPUs, the old mm will remain valid until the
                 * finish_arch_post_lock_switch() call.
                 */
-               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+               mm->context.switch_pending = 1;
        else
                cpu_switch_mm(mm->pgd, mm);
 }
        finish_arch_post_lock_switch
 static inline void finish_arch_post_lock_switch(void)
 {
-       if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
-               struct mm_struct *mm = current->mm;
-               cpu_switch_mm(mm->pgd, mm);
+       struct mm_struct *mm = current->mm;
+
+       if (mm && mm->context.switch_pending) {
+               /*
+                * Preemption must be disabled during cpu_switch_mm() as we
+                * have some stateful cache flush implementations. Check
+                * switch_pending again in case we were preempted and the
+                * switch to this mm was already done.
+                */
+               preempt_disable();
+               if (mm->context.switch_pending) {
+                       mm->context.switch_pending = 0;
+                       cpu_switch_mm(mm->pgd, mm);
+               }
+               preempt_enable_no_resched();
        }
 }
 
 
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20
-#define TIF_SWITCH_MM          22      /* deferred switch_mm */
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)