struct thread_info {
        unsigned long           flags;          /* low level flags */
        mm_segment_t            addr_limit;     /* address limit */
-       struct task_struct      *task;          /* main task structure */
        int                     preempt_count;  /* 0 => preemptable, <0 => bug */
-       int                     cpu;            /* cpu */
 };
 
 #define INIT_THREAD_INFO(tsk)                                          \
 {                                                                      \
-       .task           = &tsk,                                         \
-       .flags          = 0,                                            \
        .preempt_count  = INIT_PREEMPT_COUNT,                           \
        .addr_limit     = KERNEL_DS,                                    \
 }
 
 #define init_stack             (init_thread_union.stack)
 
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-/*
- * struct thread_info can be accessed directly via sp_el0.
- *
- * We don't use read_sysreg() as we want the compiler to cache the value where
- * possible.
- */
-static inline struct thread_info *current_thread_info(void)
-{
-       unsigned long sp_el0;
-
-       asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
-       return (struct thread_info *)sp_el0;
-}
-
 #define thread_saved_pc(tsk)   \
        ((unsigned long)(tsk->thread.cpu_context.pc))
 #define thread_saved_sp(tsk)   \
 
 {
   DEFINE(TSK_ACTIVE_MM,                offsetof(struct task_struct, active_mm));
   BLANK();
-  DEFINE(TI_FLAGS,             offsetof(struct thread_info, flags));
-  DEFINE(TI_PREEMPT,           offsetof(struct thread_info, preempt_count));
-  DEFINE(TI_ADDR_LIMIT,                offsetof(struct thread_info, addr_limit));
+  DEFINE(TSK_TI_FLAGS,         offsetof(struct task_struct, thread_info.flags));
+  DEFINE(TSK_TI_PREEMPT,       offsetof(struct task_struct, thread_info.preempt_count));
+  DEFINE(TSK_TI_ADDR_LIMIT,    offsetof(struct task_struct, thread_info.addr_limit));
+  DEFINE(TSK_STACK,            offsetof(struct task_struct, stack));
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,   offsetof(struct task_struct, thread.cpu_context));
   BLANK();
   DEFINE(TZ_DSTTIME,           offsetof(struct timezone, tz_dsttime));
   BLANK();
   DEFINE(CPU_BOOT_STACK,       offsetof(struct secondary_data, stack));
+  DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
   BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_CONTEXT,         offsetof(struct kvm_vcpu, arch.ctxt));
 
 
        .if     \el == 0
        mrs     x21, sp_el0
-       mov     tsk, sp
-       and     tsk, tsk, #~(THREAD_SIZE - 1)   // Ensure MDSCR_EL1.SS is clear,
-       ldr     x19, [tsk, #TI_FLAGS]           // since we can unmask debug
+       ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
+       ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
        disable_step_tsk x19, x20               // exceptions when scheduling.
 
        mov     x29, xzr                        // fp pointed to user-space
        add     x21, sp, #S_FRAME_SIZE
        get_thread_info tsk
        /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
-       ldr     x20, [tsk, #TI_ADDR_LIMIT]
+       ldr     x20, [tsk, #TSK_TI_ADDR_LIMIT]
        str     x20, [sp, #S_ORIG_ADDR_LIMIT]
        mov     x20, #TASK_SIZE_64
-       str     x20, [tsk, #TI_ADDR_LIMIT]
+       str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
        /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
        .endif /* \el == 0 */
        mrs     x22, elr_el1
        .if     \el != 0
        /* Restore the task's original addr_limit. */
        ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
-       str     x20, [tsk, #TI_ADDR_LIMIT]
+       str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 
        /* No need to restore UAO, it will be restored from SPSR_EL1 */
        .endif
        mov     x19, sp                 // preserve the original sp
 
        /*
-        * Compare sp with the current thread_info, if the top
-        * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
-        * should switch to the irq stack.
+        * Compare sp with the base of the task stack.
+        * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+        * and should switch to the irq stack.
         */
-       and     x25, x19, #~(THREAD_SIZE - 1)
-       cmp     x25, tsk
-       b.ne    9998f
+       ldr     x25, [tsk, TSK_STACK]
+       eor     x25, x25, x19
+       and     x25, x25, #~(THREAD_SIZE - 1)
+       cbnz    x25, 9998f
 
        adr_this_cpu x25, irq_stack, x26
        mov     x26, #IRQ_STACK_START_SP
        irq_handler
 
 #ifdef CONFIG_PREEMPT
-       ldr     w24, [tsk, #TI_PREEMPT]         // get preempt count
+       ldr     w24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
        cbnz    w24, 1f                         // preempt count != 0
-       ldr     x0, [tsk, #TI_FLAGS]            // get flags
+       ldr     x0, [tsk, #TSK_TI_FLAGS]        // get flags
        tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
        bl      el1_preempt
 1:
 el1_preempt:
        mov     x24, lr
 1:     bl      preempt_schedule_irq            // irq en/disable is done inside
-       ldr     x0, [tsk, #TI_FLAGS]            // get new tasks TI_FLAGS
+       ldr     x0, [tsk, #TSK_TI_FLAGS]        // get new tasks TI_FLAGS
        tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
        ret     x24
 #endif
        ldp     x29, x9, [x8], #16
        ldr     lr, [x8]
        mov     sp, x9
-       and     x9, x9, #~(THREAD_SIZE - 1)
-       msr     sp_el0, x9
+       msr     sp_el0, x1
        ret
 ENDPROC(cpu_switch_to)
 
 ret_fast_syscall:
        disable_irq                             // disable interrupts
        str     x0, [sp, #S_X0]                 // returned x0
-       ldr     x1, [tsk, #TI_FLAGS]            // re-check for syscall tracing
+       ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for syscall tracing
        and     x2, x1, #_TIF_SYSCALL_WORK
        cbnz    x2, ret_fast_syscall_trace
        and     x2, x1, #_TIF_WORK_MASK
 #ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_on               // enabled while in userspace
 #endif
-       ldr     x1, [tsk, #TI_FLAGS]            // re-check for single-step
+       ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for single-step
        b       finish_ret_to_user
 /*
  * "slow" syscall return path.
  */
 ret_to_user:
        disable_irq                             // disable interrupts
-       ldr     x1, [tsk, #TI_FLAGS]
+       ldr     x1, [tsk, #TSK_TI_FLAGS]
        and     x2, x1, #_TIF_WORK_MASK
        cbnz    x2, work_pending
 finish_ret_to_user:
        enable_dbg_and_irq
        ct_user_exit 1
 
-       ldr     x16, [tsk, #TI_FLAGS]           // check for syscall hooks
+       ldr     x16, [tsk, #TSK_TI_FLAGS]       // check for syscall hooks
        tst     x16, #_TIF_SYSCALL_WORK
        b.ne    __sys_trace
        cmp     scno, sc_nr                     // check upper syscall limit