--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
+
+#include <asm/mshyperv.h>
+
+#include "svm.h"
+#include "svm_ops.h"
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+{
+       struct hv_enlightenments *hve;
+       struct hv_partition_assist_pg **p_hv_pa_pg =
+                       &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+
+       if (!*p_hv_pa_pg)
+               *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+       if (!*p_hv_pa_pg)
+               return -ENOMEM;
+
+       hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
+
+       hve->partition_assist_page = __pa(*p_hv_pa_pg);
+       hve->hv_vm_id = (unsigned long)vcpu->kvm;
+       if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
+               hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+               vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+       }
+
+       return 0;
+}
+
 
  */
 #define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
 
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
+
 static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
 {
        struct hv_enlightenments *hve =
                svm_x86_ops.tlb_remote_flush_with_range =
                                hv_remote_flush_tlb_with_range;
        }
+
+       if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+               int cpu;
+
+               pr_info("kvm: Hyper-V Direct TLB Flush enabled\n");
+               for_each_online_cpu(cpu) {
+                       struct hv_vp_assist_page *vp_ap =
+                               hv_get_vp_assist_page(cpu);
+
+                       if (!vp_ap)
+                               continue;
+
+                       vp_ap->nested_control.features.directhypercall = 1;
+               }
+               svm_x86_ops.enable_direct_tlbflush =
+                               svm_hv_enable_direct_tlbflush;
+       }
 }
 
 static inline void svm_hv_vmcb_dirty_nested_enlightenments(
            hve->hv_enlightenments_control.msr_bitmap)
                vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
 }
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+               struct kvm_vcpu *vcpu)
+{
+       struct hv_enlightenments *hve =
+               (struct hv_enlightenments *)vmcb->control.reserved_sw;
+       u32 vp_index = kvm_hv_get_vpindex(vcpu);
+
+       if (hve->hv_vp_id != vp_index) {
+               hve->hv_vp_id = vp_index;
+               vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+       }
+}
 #else
 
 static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
                struct kvm_vcpu *vcpu)
 {
 }
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+               struct kvm_vcpu *vcpu)
+{
+}
 #endif /* CONFIG_HYPERV */
 
 #endif /* __ARCH_X86_KVM_SVM_ONHYPERV_H__ */