#define KVM_REQ_TRIPLE_FAULT           KVM_ARCH_REQ(2)
 #define KVM_REQ_MMU_SYNC               KVM_ARCH_REQ(3)
 #define KVM_REQ_CLOCK_UPDATE           KVM_ARCH_REQ(4)
-#define KVM_REQ_LOAD_CR3               KVM_ARCH_REQ(5)
+#define KVM_REQ_LOAD_MMU_PGD           KVM_ARCH_REQ(5)
 #define KVM_REQ_EVENT                  KVM_ARCH_REQ(6)
 #define KVM_REQ_APF_HALT               KVM_ARCH_REQ(7)
 #define KVM_REQ_STEAL_UPDATE           KVM_ARCH_REQ(8)
        void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
        void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
-       void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
        int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
        void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        int (*get_tdp_level)(struct kvm_vcpu *vcpu);
        u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
 
+       void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long cr3);
+
        bool (*has_wbinvd_exit)(void);
 
        u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
 
 static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
 {
        if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
-               kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
-                                    kvm_get_active_pcid(vcpu));
+               kvm_x86_ops->load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
+                                               kvm_get_active_pcid(vcpu));
 }
 
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 
                         * accompanied by KVM_REQ_MMU_RELOAD, which will free
                         * the root set here and allocate a new one.
                         */
-                       kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
+                       kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
                        if (!skip_tlb_flush) {
                                kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
                                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        kvm_mmu_sync_roots(vcpu);
        if (r)
                goto out;
-       kvm_mmu_load_cr3(vcpu);
+       kvm_mmu_load_pgd(vcpu);
        kvm_x86_ops->tlb_flush(vcpu, true);
 out:
        return r;
 
 }
 STACK_FRAME_NON_STANDARD(svm_vcpu_run);
 
-static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
+static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        bool update_guest_cr3 = true;
        .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
        .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
        .set_cr0 = svm_set_cr0,
-       .set_cr3 = svm_set_cr3,
        .set_cr4 = svm_set_cr4,
        .set_efer = svm_set_efer,
        .get_idt = svm_get_idt,
        .read_l1_tsc_offset = svm_read_l1_tsc_offset,
        .write_l1_tsc_offset = svm_write_l1_tsc_offset,
 
+       .load_mmu_pgd = svm_load_mmu_pgd,
+
        .check_intercept = svm_check_intercept,
        .handle_exit_irqoff = svm_handle_exit_irqoff,
 
 
                         * If L1 use EPT, then L0 needs to execute INVEPT on
                         * EPTP02 instead of EPTP01. Therefore, delay TLB
                         * flush until vmcs02->eptp is fully updated by
-                        * KVM_REQ_LOAD_CR3. Note that this assumes
+                        * KVM_REQ_LOAD_MMU_PGD. Note that this assumes
                         * KVM_REQ_TLB_FLUSH is evaluated after
-                        * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
+                        * KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest().
                         */
                        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
                }
        /*
         * Immediately write vmcs02.GUEST_CR3.  It will be propagated to vmcs12
         * on nested VM-Exit, which can occur without actually running L2 and
-        * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
+        * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
         * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
         * transition to HLT instead of running L2.
         */
         *
         * If vmcs12 uses EPT, we need to execute this flush on EPTP01
         * and therefore we request the TLB flush to happen only after VMCS EPTP
-        * has been set by KVM_REQ_LOAD_CR3.
+        * has been set by KVM_REQ_LOAD_MMU_PGD.
         */
        if (enable_vpid &&
            (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
 
        return eptp;
 }
 
-void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
        struct kvm *kvm = vcpu->kvm;
        bool update_guest_cr3 = true;
        .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
        .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
        .set_cr0 = vmx_set_cr0,
-       .set_cr3 = vmx_set_cr3,
        .set_cr4 = vmx_set_cr4,
        .set_efer = vmx_set_efer,
        .get_idt = vmx_get_idt,
        .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
        .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
 
+       .load_mmu_pgd = vmx_load_mmu_pgd,
+
        .check_intercept = vmx_check_intercept,
        .handle_exit_irqoff = vmx_handle_exit_irqoff,
 
 
 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3);
 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
 
                }
                if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
                        kvm_mmu_sync_roots(vcpu);
-               if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu))
-                       kvm_mmu_load_cr3(vcpu);
+               if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
+                       kvm_mmu_load_pgd(vcpu);
                if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
                        kvm_vcpu_flush_tlb(vcpu, true);
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {