return max_level;
 }
 
-static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
-                                  struct kvm_page_fault *fault)
+static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
+                                      struct kvm_page_fault *fault)
 {
        int max_order, r;
 
        return RET_PF_CONTINUE;
 }
 
-static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
+                                struct kvm_page_fault *fault)
 {
        if (fault->is_private)
-               return kvm_faultin_pfn_private(vcpu, fault);
+               return kvm_mmu_faultin_pfn_private(vcpu, fault);
 
        fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
                                          fault->write, &fault->map_writable);
        return RET_PF_CONTINUE;
 }
 
-static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
-                          unsigned int access)
+static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
+                              struct kvm_page_fault *fault, unsigned int access)
 {
        struct kvm_memory_slot *slot = fault->slot;
        int ret;
        if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
                return RET_PF_RETRY;
 
-       ret = __kvm_faultin_pfn(vcpu, fault);
+       ret = __kvm_mmu_faultin_pfn(vcpu, fault);
        if (ret != RET_PF_CONTINUE)
                return ret;
 
        if (r)
                return r;
 
-       r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
+       r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
        if (r != RET_PF_CONTINUE)
                return r;
 
        if (r)
                return r;
 
-       r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
+       r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
        if (r != RET_PF_CONTINUE)
                return r;