__direct_pte_prefetch(vcpu, sp, sptep);
 }
 
-static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-                       int map_writable, int level, gfn_t gfn, pfn_t pfn,
-                       bool prefault)
+static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
+                       int level, gfn_t gfn, pfn_t pfn, bool prefault)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
        make_mmu_pages_available(vcpu);
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
-       r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
-                        prefault);
+       r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
-
        return r;
 
 out_unlock:
        make_mmu_pages_available(vcpu);
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
-       r = __direct_map(vcpu, gpa, write, map_writable,
-                        level, gfn, pfn, prefault);
+       r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return r;