struct page *pml_pg;
 
        u64 current_tsc_ratio;
+
+       bool guest_pkru_valid;
+       u32 guest_pkru;
+       u32 host_pkru;
 };
 
 enum segment_cache_field {
        } while (cmpxchg(&pi_desc->control, old.control,
                        new.control) != old.control);
 }
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
        }
 
        vmx_vcpu_pi_load(vcpu, cpu);
+       vmx->host_pkru = read_pkru();
 }
 
 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       if (vmx->guest_pkru_valid)
+               __write_pkru(vmx->guest_pkru);
+
        atomic_switch_perf_msrs(vmx);
        debugctlmsr = get_debugctlmsr();
 
 
        vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
 
+       /*
+        * eager fpu is enabled if PKEY is supported and CR4 is switched
+        * back on host, so it is safe to read guest PKRU from current
+        * XSAVE.
+        */
+       if (boot_cpu_has(X86_FEATURE_OSPKE)) {
+               vmx->guest_pkru = __read_pkru();
+               if (vmx->guest_pkru != vmx->host_pkru) {
+                       vmx->guest_pkru_valid = true;
+                       __write_pkru(vmx->host_pkru);
+               } else
+                       vmx->guest_pkru_valid = false;
+       }
+
        /*
         * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
         * we did not inject a still-pending event to L1 now because of