PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
 }
 
-static void free_mmu_pages(struct kvm_vcpu *vcpu)
+static void free_mmu_pages(struct kvm_mmu *mmu)
 {
-       free_page((unsigned long)vcpu->arch.mmu->pae_root);
-       free_page((unsigned long)vcpu->arch.mmu->lm_root);
+       free_page((unsigned long)mmu->pae_root);
+       free_page((unsigned long)mmu->lm_root);
 }
 
-static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
+static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
 {
        struct page *page;
        int i;
        if (!page)
                return -ENOMEM;
 
-       vcpu->arch.mmu->pae_root = page_address(page);
+       mmu->pae_root = page_address(page);
        for (i = 0; i < 4; ++i)
-               vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
+               mmu->pae_root[i] = INVALID_PAGE;
 
        return 0;
 }
 int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        uint i;
+       int ret;
 
        vcpu->arch.mmu = &vcpu->arch.root_mmu;
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
                vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
 
        vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
-       return alloc_mmu_pages(vcpu);
+
+       ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
+       if (ret)
+               return ret;
+
+       ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
+       if (ret)
+               goto fail_allocate_root;
+
+       return ret;
+ fail_allocate_root:
+       free_mmu_pages(&vcpu->arch.guest_mmu);
+       return ret;
 }
 
 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
        kvm_mmu_unload(vcpu);
-       free_mmu_pages(vcpu);
+       free_mmu_pages(&vcpu->arch.root_mmu);
+       free_mmu_pages(&vcpu->arch.guest_mmu);
        mmu_free_memory_caches(vcpu);
 }