* Defer the switch to the current thread's TTBR0_EL1
                         * until uaccess_enable(). Restore the current
                         * thread's saved ttbr0 corresponding to its active_mm
-                        * (if different from init_mm).
                         */
                        cpu_set_reserved_ttbr0();
-                       if (current->active_mm != &init_mm)
-                               update_saved_ttbr0(current, current->active_mm);
+                       update_saved_ttbr0(current, current->active_mm);
                }
        }
 }
 
 static inline void update_saved_ttbr0(struct task_struct *tsk,
                                      struct mm_struct *mm)
 {
-       if (system_uses_ttbr0_pan()) {
-               BUG_ON(mm->pgd == swapper_pg_dir);
-               task_thread_info(tsk)->ttbr0 =
-                       virt_to_phys(mm->pgd) | ASID(mm) << 48;
-       }
+       u64 ttbr;
+
+       if (!system_uses_ttbr0_pan())
+               return;
+
+       if (mm == &init_mm)
+               ttbr = __pa_symbol(empty_zero_page);
+       else
+               ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+
+       task_thread_info(tsk)->ttbr0 = ttbr;
 }
 #else
 static inline void update_saved_ttbr0(struct task_struct *tsk,
         * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
         * value may have not been initialised yet (activate_mm caller) or the
         * ASID has changed since the last run (following the context switch
-        * of another thread of the same process). Avoid setting the reserved
-        * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+        * of another thread of the same process).
         */
-       if (next != &init_mm)
-               update_saved_ttbr0(tsk, next);
+       update_saved_ttbr0(tsk, next);
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)