static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
        if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
        }
 
        vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
+
+       if (hv_vcpu) {
+               hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
+               hv_vcpu->nested.vm_id = 0;
+               hv_vcpu->nested.vp_id = 0;
+       }
 }
 
 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
 {
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
        struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
 
        /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
        vmcs12->tpr_threshold = evmcs->tpr_threshold;
        vmcs12->guest_rip = evmcs->guest_rip;
 
+       if (unlikely(!(hv_clean_fields &
+                      HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) {
+               hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
+               hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
+               hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
+       }
+
        if (unlikely(!(hv_clean_fields &
                       HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
                vmcs12->guest_rsp = evmcs->guest_rsp;