#define __ex_clear(x, reg) \
        ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
 
+void vmwrite_error(unsigned long field, unsigned long value);
+void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
+void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
+void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
+void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
+
 static __always_inline void vmcs_check16(unsigned long field)
 {
        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
        return __vmcs_readl(field);
 }
 
-static noinline void vmwrite_error(unsigned long field, unsigned long value)
-{
-       printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
-              field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
-       dump_stack();
-}
+#define vmx_asm1(insn, op1, error_args...)                             \
+do {                                                                   \
+       asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
+                         ".byte 0x2e\n\t" /* branch not taken hint */  \
+                         "jna %l[error]\n\t"                           \
+                         _ASM_EXTABLE(1b, %l[fault])                   \
+                         : : op1 : "cc" : error, fault);               \
+       return;                                                         \
+error:                                                                 \
+       insn##_error(error_args);                                       \
+       return;                                                         \
+fault:                                                                 \
+       kvm_spurious_fault();                                           \
+} while (0)
+
+#define vmx_asm2(insn, op1, op2, error_args...)                                \
+do {                                                                   \
+       asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
+                         ".byte 0x2e\n\t" /* branch not taken hint */  \
+                         "jna %l[error]\n\t"                           \
+                         _ASM_EXTABLE(1b, %l[fault])                   \
+                         : : op1, op2 : "cc" : error, fault);          \
+       return;                                                         \
+error:                                                                 \
+       insn##_error(error_args);                                       \
+       return;                                                         \
+fault:                                                                 \
+       kvm_spurious_fault();                                           \
+} while (0)
 
 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
 {
-       bool error;
-
-       asm volatile (__ex("vmwrite %2, %1") CC_SET(na)
-                     : CC_OUT(na) (error) : "r"(field), "rm"(value));
-       if (unlikely(error))
-               vmwrite_error(field, value);
+       vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
 }
 
 static __always_inline void vmcs_write16(unsigned long field, u16 value)
 static inline void vmcs_clear(struct vmcs *vmcs)
 {
        u64 phys_addr = __pa(vmcs);
-       bool error;
 
-       asm volatile (__ex("vmclear %1") CC_SET(na)
-                     : CC_OUT(na) (error) : "m"(phys_addr));
-       if (unlikely(error))
-               printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
-                      vmcs, phys_addr);
+       vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
 }
 
 static inline void vmcs_load(struct vmcs *vmcs)
 {
        u64 phys_addr = __pa(vmcs);
-       bool error;
 
        if (static_branch_unlikely(&enable_evmcs))
                return evmcs_load(phys_addr);
 
-       asm volatile (__ex("vmptrld %1") CC_SET(na)
-                     : CC_OUT(na) (error) : "m"(phys_addr));
-       if (unlikely(error))
-               printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
-                      vmcs, phys_addr);
+       vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
 }
 
 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
                u64 rsvd : 48;
                u64 gva;
        } operand = { vpid, 0, gva };
-       bool error;
 
-       asm volatile (__ex("invvpid %2, %1") CC_SET(na)
-                     : CC_OUT(na) (error) : "r"(ext), "m"(operand));
-       BUG_ON(error);
+       vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
 }
 
 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
        struct {
                u64 eptp, gpa;
        } operand = {eptp, gpa};
-       bool error;
 
-       asm volatile (__ex("invept %2, %1") CC_SET(na)
-                     : CC_OUT(na) (error) : "r"(ext), "m"(operand));
-       BUG_ON(error);
+       vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
 }
 
 static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
 
 
 void vmx_vmexit(void);
 
+#define vmx_insn_failed(fmt...)                \
+do {                                   \
+       WARN_ONCE(1, fmt);              \
+       pr_warn_ratelimited(fmt);       \
+} while (0)
+
+noinline void vmwrite_error(unsigned long field, unsigned long value)
+{
+       vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",
+                       field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
+}
+
+noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
+{
+       vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr);
+}
+
+noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
+{
+       vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr);
+}
+
+noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
+{
+       vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
+                       ext, vpid, gva);
+}
+
+noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
+{
+       vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
+                       ext, eptp, gpa);
+}
+
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 /*