{
        unsigned long field;
        u64 field_value;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
        int len;
        gva_t gva = 0;
-       struct vmcs12 *vmcs12;
+       struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+                                                   : get_vmcs12(vcpu);
        struct x86_exception e;
        short offset;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+       /*
+        * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+        * any VMREAD sets the ALU flags for VMfailInvalid.
+        */
+       if (vmx->nested.current_vmptr == -1ull ||
+           (is_guest_mode(vcpu) &&
+            get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
                return nested_vmx_failInvalid(vcpu);
 
-       if (!is_guest_mode(vcpu))
-               vmcs12 = get_vmcs12(vcpu);
-       else {
-               /*
-                * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
-                * to shadowed-field sets the ALU flags for VMfailInvalid.
-                */
-               if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-                       return nested_vmx_failInvalid(vcpu);
-               vmcs12 = get_shadow_vmcs12(vcpu);
-       }
-
        /* Decode instruction info and find the field to read */
        field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 
         */
        u64 field_value = 0;
        struct x86_exception e;
-       struct vmcs12 *vmcs12;
+       struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+                                                   : get_vmcs12(vcpu);
        short offset;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (vmx->nested.current_vmptr == -1ull)
+       /*
+        * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+        * any VMWRITE sets the ALU flags for VMfailInvalid.
+        */
+       if (vmx->nested.current_vmptr == -1ull ||
+           (is_guest_mode(vcpu) &&
+            get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
                return nested_vmx_failInvalid(vcpu);
 
        if (vmx_instruction_info & (1u << 10))
                return nested_vmx_failValid(vcpu,
                        VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
 
-       if (!is_guest_mode(vcpu)) {
-               vmcs12 = get_vmcs12(vcpu);
-
-               /*
-                * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
-                * vmcs12, else we may crush a field or consume a stale value.
-                */
-               if (!is_shadow_field_rw(field))
-                       copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
-       } else {
-               /*
-                * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
-                * to shadowed-field sets the ALU flags for VMfailInvalid.
-                */
-               if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-                       return nested_vmx_failInvalid(vcpu);
-               vmcs12 = get_shadow_vmcs12(vcpu);
-       }
+       /*
+        * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
+        * vmcs12, else we may crush a field or consume a stale value.
+        */
+       if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
+               copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
        offset = vmcs_field_to_offset(field);
        if (offset < 0)