preempt_count_dec();
 }
 
-enum ctx_state ist_enter(struct pt_regs *regs)
+void ist_enter(struct pt_regs *regs)
 {
-       enum ctx_state prev_state;
-
        if (user_mode(regs)) {
-               /* Other than that, we're just an exception. */
-               prev_state = exception_enter();
+               CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        } else {
                /*
                 * We might have interrupted pretty much anything.  In
                 * but we need to notify RCU.
                 */
                rcu_nmi_enter();
-               prev_state = CONTEXT_KERNEL;  /* the value is irrelevant. */
        }
 
        /*
-        * We are atomic because we're on the IST stack (or we're on x86_32,
-        * in which case we still shouldn't schedule).
-        *
-        * This must be after exception_enter(), because exception_enter()
-        * won't do anything if in_interrupt() returns true.
+        * We are atomic because we're on the IST stack; or we're on
+        * x86_32, in which case we still shouldn't schedule; or we're
+        * on x86_64 and entered from user mode, in which case we're
+        * still atomic unless ist_begin_non_atomic is called.
         */
        preempt_count_add(HARDIRQ_OFFSET);
 
        /* This code is a bit fragile.  Test it. */
        rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
-
-       return prev_state;
 }
 
-void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
+void ist_exit(struct pt_regs *regs)
 {
-       /* Must be before exception_exit. */
        preempt_count_sub(HARDIRQ_OFFSET);
 
-       if (user_mode(regs))
-               return exception_exit(prev_state);
-       else
+       if (!user_mode(regs))
                rcu_nmi_exit();
 }
 
  * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
  * begins a non-atomic section within an ist_enter()/ist_exit() region.
  * Callers are responsible for enabling interrupts themselves inside
- * the non-atomic section, and callers must call is_end_non_atomic()
+ * the non-atomic section, and callers must call ist_end_non_atomic()
  * before ist_exit().
  */
 void ist_begin_non_atomic(struct pt_regs *regs)
 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
                          unsigned long trapnr, int signr)
 {
-       enum ctx_state prev_state = exception_enter();
        siginfo_t info;
 
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
                do_trap(trapnr, signr, str, regs, error_code,
                        fill_trap_info(regs, signr, trapnr, &info));
        }
-
-       exception_exit(prev_state);
 }
 
 #define DO_ERROR(trapnr, signr, str, name)                             \
        }
 #endif
 
-       ist_enter(regs);  /* Discard prev_state because we won't return. */
+       ist_enter(regs);
        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
        tsk->thread.error_code = error_code;
 
 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 {
-       enum ctx_state prev_state;
        const struct bndcsr *bndcsr;
        siginfo_t *info;
 
-       prev_state = exception_enter();
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        if (notify_die(DIE_TRAP, "bounds", regs, error_code,
                        X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
-               goto exit;
+               return;
        conditional_sti(regs);
 
        if (!user_mode(regs))
                die("bounds", regs, error_code);
        }
 
-exit:
-       exception_exit(prev_state);
        return;
+
 exit_trap:
        /*
         * This path out is for all the cases where we could not
         * time..
         */
        do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
-       exception_exit(prev_state);
 }
 
 dotraplinkage void
 do_general_protection(struct pt_regs *regs, long error_code)
 {
        struct task_struct *tsk;
-       enum ctx_state prev_state;
 
-       prev_state = exception_enter();
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        conditional_sti(regs);
 
        if (v8086_mode(regs)) {
                local_irq_enable();
                handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-               goto exit;
+               return;
        }
 
        tsk = current;
        if (!user_mode(regs)) {
                if (fixup_exception(regs))
-                       goto exit;
+                       return;
 
                tsk->thread.error_code = error_code;
                tsk->thread.trap_nr = X86_TRAP_GP;
                if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
                               X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
                        die("general protection fault", regs, error_code);
-               goto exit;
+               return;
        }
 
        tsk->thread.error_code = error_code;
        }
 
        force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
-exit:
-       exception_exit(prev_state);
 }
 NOKPROBE_SYMBOL(do_general_protection);
 
 /* May run on IST stack. */
 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 {
-       enum ctx_state prev_state;
-
 #ifdef CONFIG_DYNAMIC_FTRACE
        /*
         * ftrace must be first, everything else may cause a recursive crash.
        if (poke_int3_handler(regs))
                return;
 
-       prev_state = ist_enter(regs);
+       ist_enter(regs);
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
        if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
        preempt_conditional_cli(regs);
        debug_stack_usage_dec();
 exit:
-       ist_exit(regs, prev_state);
+       ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_int3);
 
 dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 {
        struct task_struct *tsk = current;
-       enum ctx_state prev_state;
        int user_icebp = 0;
        unsigned long dr6;
        int si_code;
 
-       prev_state = ist_enter(regs);
+       ist_enter(regs);
 
        get_debugreg(dr6, 6);
 
        debug_stack_usage_dec();
 
 exit:
-       ist_exit(regs, prev_state);
+       ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
 
 
 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 {
-       enum ctx_state prev_state;
-
-       prev_state = exception_enter();
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        math_error(regs, error_code, X86_TRAP_MF);
-       exception_exit(prev_state);
 }
 
 dotraplinkage void
 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 {
-       enum ctx_state prev_state;
-
-       prev_state = exception_enter();
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        math_error(regs, error_code, X86_TRAP_XF);
-       exception_exit(prev_state);
 }
 
 dotraplinkage void
 dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
-       enum ctx_state prev_state;
-
-       prev_state = exception_enter();
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        BUG_ON(use_eager_fpu());
 
 
                info.regs = regs;
                math_emulate(&info);
-               exception_exit(prev_state);
                return;
        }
 #endif
 #ifdef CONFIG_X86_32
        conditional_sti(regs);
 #endif
-       exception_exit(prev_state);
 }
 NOKPROBE_SYMBOL(do_device_not_available);
 
 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 {
        siginfo_t info;
-       enum ctx_state prev_state;
 
-       prev_state = exception_enter();
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
        local_irq_enable();
 
                do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
                        &info);
        }
-       exception_exit(prev_state);
 }
 #endif