struct kvm *kvm = vcpu->kvm;
        int i;
        LIST_HEAD(invalid_list);
-       bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
+       bool free_active_root;
 
        BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
 
        /* Before acquiring the MMU lock, see if we need to do any real work. */
-       if (!(free_active_root && VALID_PAGE(mmu->root.hpa))) {
+       free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
+               && VALID_PAGE(mmu->root.hpa);
+
+       if (!free_active_root) {
                for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                        if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
                            VALID_PAGE(mmu->prev_roots[i].hpa))
                                           &invalid_list);
 
        if (free_active_root) {
-               if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
-                   (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
+               if (to_shadow_page(mmu->root.hpa)) {
                        mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
                } else if (mmu->pae_root) {
                        for (i = 0; i < 4; ++i) {