Pick up bugfixes from 5.9, otherwise various tests fail.
         * would also use advanced VM-exit information for EPT violations to
         * reconstruct the page fault error code.
         */
-       if (unlikely(kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
 -      if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
++      if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
                return kvm_emulate_instruction(vcpu, 0);
  
        return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
 
  
  static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
  {
-       return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+       if (!enable_ept)
+               return true;
+ 
+       return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
  }
  
 +static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
 +{
 +      return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
 +          (secondary_exec_controls_get(to_vmx(vcpu)) &
 +          SECONDARY_EXEC_UNRESTRICTED_GUEST));
 +}
 +
 +bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
 +static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
 +{
 +      return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
 +}
 +
  void dump_vmcs(void);
  
  #endif /* __KVM_X86_VMX_H */
 
                 * even when not intercepted. AMD manual doesn't explicitly
                 * state this but appears to behave the same.
                 *
-                * Unconditionally return L1's TSC offset on userspace reads
-                * so that userspace reads and writes always operate on L1's
-                * offset, e.g. to ensure deterministic behavior for migration.
+                * On userspace reads and writes, however, we unconditionally
 -               * operate L1's TSC value to ensure backwards-compatible
++               * return L1's TSC value to ensure backwards-compatible
+                * behavior for migration.
                 */
                u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
                                                            vcpu->arch.tsc_offset;