#include <linux/sched/debug.h> /* For show_regs() */
 #include <linux/percpu-defs.h>
 #include <linux/mem_encrypt.h>
-#include <linux/lockdep.h>
 #include <linux/printk.h>
 #include <linux/mm_types.h>
 #include <linux/set_memory.h>
        this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
 }
 
-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
 {
        struct sev_es_runtime_data *data;
        struct ghcb *ghcb;
 
+       WARN_ON(!irqs_disabled());
+
        data = this_cpu_read(runtime_data);
        ghcb = &data->ghcb_page;
 
                        data->ghcb_active        = false;
                        data->backup_ghcb_active = false;
 
+                       instrumentation_begin();
                        panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+                       instrumentation_end();
                }
 
                /* Mark backup_ghcb active before writing to it */
 /* Include code shared with pre-decompression boot stage */
 #include "sev-shared.c"
 
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+static noinstr void __sev_put_ghcb(struct ghcb_state *state)
 {
        struct sev_es_runtime_data *data;
        struct ghcb *ghcb;
 
+       WARN_ON(!irqs_disabled());
+
        data = this_cpu_read(runtime_data);
        ghcb = &data->ghcb_page;
 
        struct ghcb_state state;
        struct ghcb *ghcb;
 
-       ghcb = sev_es_get_ghcb(&state);
+       ghcb = __sev_get_ghcb(&state);
 
        vc_ghcb_invalidate(ghcb);
        ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
        sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
        VMGEXIT();
 
-       sev_es_put_ghcb(&state);
+       __sev_put_ghcb(&state);
 }
 
 static u64 get_jump_table_addr(void)
 
        local_irq_save(flags);
 
-       ghcb = sev_es_get_ghcb(&state);
+       ghcb = __sev_get_ghcb(&state);
 
        vc_ghcb_invalidate(ghcb);
        ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
            ghcb_sw_exit_info_2_is_valid(ghcb))
                ret = ghcb->save.sw_exit_info_2;
 
-       sev_es_put_ghcb(&state);
+       __sev_put_ghcb(&state);
 
        local_irq_restore(flags);
 
        struct ghcb_state state;
        struct ghcb *ghcb;
 
-       ghcb = sev_es_get_ghcb(&state);
+       ghcb = __sev_get_ghcb(&state);
 
        while (true) {
                vc_ghcb_invalidate(ghcb);
                        break;
        }
 
-       sev_es_put_ghcb(&state);
+       __sev_put_ghcb(&state);
 }
 
 /*
        }
 
        irq_state = irqentry_nmi_enter(regs);
-       lockdep_assert_irqs_disabled();
        instrumentation_begin();
 
        /*
         * keep the IRQs disabled to protect us against concurrent TLB flushes.
         */
 
-       ghcb = sev_es_get_ghcb(&state);
+       ghcb = __sev_get_ghcb(&state);
 
        vc_ghcb_invalidate(ghcb);
        result = vc_init_em_ctxt(&ctxt, regs, error_code);
        if (result == ES_OK)
                result = vc_handle_exitcode(&ctxt, ghcb, error_code);
 
-       sev_es_put_ghcb(&state);
+       __sev_put_ghcb(&state);
 
        /* Done - now check the result */
        switch (result) {