return 1;
 }
 
+static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
+{
+       kunmap(vmx->nested.current_vmcs12_page);
+       nested_release_page(vmx->nested.current_vmcs12_page);
+}
+
 /*
  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
  * just stops using VMX.
                return;
        vmx->nested.vmxon = false;
        if (vmx->nested.current_vmptr != -1ull) {
-               kunmap(vmx->nested.current_vmcs12_page);
-               nested_release_page(vmx->nested.current_vmcs12_page);
+               nested_release_vmcs12(vmx);
                vmx->nested.current_vmptr = -1ull;
                vmx->nested.current_vmcs12 = NULL;
        }
+       if (enable_shadow_vmcs)
+               free_vmcs(vmx->nested.current_shadow_vmcs);
        /* Unpin physical memory we referred to in current vmcs02 */
        if (vmx->nested.apic_access_page) {
                nested_release_page(vmx->nested.apic_access_page);
        }
 
        if (vmptr == vmx->nested.current_vmptr) {
-               kunmap(vmx->nested.current_vmcs12_page);
-               nested_release_page(vmx->nested.current_vmcs12_page);
+               nested_release_vmcs12(vmx);
                vmx->nested.current_vmptr = -1ull;
                vmx->nested.current_vmcs12 = NULL;
        }
                        skip_emulated_instruction(vcpu);
                        return 1;
                }
-               if (vmx->nested.current_vmptr != -1ull) {
-                       kunmap(vmx->nested.current_vmcs12_page);
-                       nested_release_page(vmx->nested.current_vmcs12_page);
-               }
+               if (vmx->nested.current_vmptr != -1ull)
+                       nested_release_vmcs12(vmx);
 
                vmx->nested.current_vmptr = vmptr;
                vmx->nested.current_vmcs12 = new_vmcs12;