mm_segment_t            addr_limit;     /* thread address space */
 
        unsigned long           cpenable;
+#if XCHAL_HAVE_EXCLUSIVE
+       /* result of the most recent exclusive store */
+       unsigned long           atomctl8;
+#endif
 
        /* Allocate storage for extra user states and coprocessor states. */
 #if XTENSA_HAVE_COPROCESSORS
 
        DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
        DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
        DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+#if XCHAL_HAVE_EXCLUSIVE
+       DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
+#endif
 #if XTENSA_HAVE_COPROCESSORS
        DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
        DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
 
        s32i    a2, a1, PT_LCOUNT
 #endif
 
+#if XCHAL_HAVE_EXCLUSIVE
+       /* Clear exclusive access monitor set by interrupted code */
+       clrex
+#endif
+
        /* It is now save to restore the EXC_TABLE_FIXUP variable. */
 
        rsr     a2, exccause
        s32i    a3, a4, THREAD_CPENABLE
 #endif
 
+#if XCHAL_HAVE_EXCLUSIVE
+       l32i    a3, a5, THREAD_ATOMCTL8
+       getex   a3
+       s32i    a3, a4, THREAD_ATOMCTL8
+#endif
+
        /* Flush register file. */
 
        spill_registers_kernel