On entering/exiting TDX vcpu, preserved or clobbered CPU state is different
from the VMX case. Add TDX hooks to save/restore host/guest CPU state.
Save/restore kernel GS base MSR.
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <
20250129095902.16391-7-adrian.hunter@intel.com>
Reviewed-by: Xiayao Li <xiaoyao.li@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
        vmx_update_cpu_dirty_logging(vcpu);
 }
 
+static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu)) {
+               tdx_prepare_switch_to_guest(vcpu);
+               return;
+       }
+
+       vmx_prepare_switch_to_guest(vcpu);
+}
+
+static void vt_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu)) {
+               tdx_vcpu_put(vcpu);
+               return;
+       }
+
+       vmx_vcpu_put(vcpu);
+}
+
 static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
 {
        if (is_td_vcpu(vcpu))
        .vcpu_free = vt_vcpu_free,
        .vcpu_reset = vt_vcpu_reset,
 
-       .prepare_switch_to_guest = vmx_prepare_switch_to_guest,
+       .prepare_switch_to_guest = vt_prepare_switch_to_guest,
        .vcpu_load = vt_vcpu_load,
-       .vcpu_put = vmx_vcpu_put,
+       .vcpu_put = vt_vcpu_put,
 
        .update_exception_bitmap = vmx_update_exception_bitmap,
        .get_feature_msr = vmx_get_feature_msr,
 
 #include <linux/cpu.h>
 #include <asm/cpufeature.h>
 #include <linux/misc_cgroup.h>
+#include <linux/mmu_context.h>
 #include <asm/tdx.h>
 #include "capabilities.h"
 #include "mmu.h"
 #include "vmx.h"
 #include "mmu/spte.h"
 #include "common.h"
+#include "posted_intr.h"
 #include <trace/events/kvm.h>
 #include "trace.h"
 
        local_irq_enable();
 }
 
+/*
+ * Compared to vmx_prepare_switch_to_guest(), there is not much to do
+ * as SEAMCALL/SEAMRET calls take care of most of save and restore.
+ */
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vt *vt = to_vt(vcpu);
+
+       if (vt->guest_state_loaded)
+               return;
+
+       if (likely(is_64bit_mm(current->mm)))
+               vt->msr_host_kernel_gs_base = current->thread.gsbase;
+       else
+               vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+
+       vt->guest_state_loaded = true;
+}
+
+static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vt *vt = to_vt(vcpu);
+
+       if (!vt->guest_state_loaded)
+               return;
+
+       ++vcpu->stat.host_state_reload;
+       wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
+
+       vt->guest_state_loaded = false;
+}
+
+void tdx_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       vmx_vcpu_pi_put(vcpu);
+       tdx_prepare_switch_to_host(vcpu);
+}
+
 void tdx_vcpu_free(struct kvm_vcpu *vcpu)
 {
        struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
 
 void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
 fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+void tdx_vcpu_put(struct kvm_vcpu *vcpu);
 
 int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
 
 {
        return EXIT_FASTPATH_NONE;
 }
+static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
+static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
 
 static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }