}
 
 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
-                           u8 level, bool direct)
+                           u8 level)
 {
+       bool direct = vcpu->arch.mmu->root_role.direct;
        struct kvm_mmu_page *sp;
 
        sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
                root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
                mmu->root.hpa = root;
        } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
-               root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
+               root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
                mmu->root.hpa = root;
        } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
                if (WARN_ON_ONCE(!mmu->pae_root)) {
                        WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
 
                        root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
-                                             i << 30, PT32_ROOT_LEVEL, true);
+                                             i << 30, PT32_ROOT_LEVEL);
                        mmu->pae_root[i] = root | PT_PRESENT_MASK |
                                           shadow_me_value;
                }
         */
        if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
                root = mmu_alloc_root(vcpu, root_gfn, 0,
-                                     mmu->root_role.level, false);
+                                     mmu->root_role.level);
                mmu->root.hpa = root;
                goto set_root_pgd;
        }
                }
 
                root = mmu_alloc_root(vcpu, root_gfn, i << 30,
-                                     PT32_ROOT_LEVEL, false);
+                                     PT32_ROOT_LEVEL);
                mmu->pae_root[i] = root | pm_mask;
        }