* The fast path to get the writable pfn which will be stored in @pfn,
  * true indicates success, otherwise false is returned.
  */
-static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
-                           bool *writable, kvm_pfn_t *pfn)
+static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
 {
        struct page *page[1];
 
         * or the caller allows to map a writable pfn for a read fault
         * request.
         */
-       if (!(write_fault || writable))
+       if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable))
                return false;
 
-       if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
+       if (get_user_page_fast_only(kfp->hva, FOLL_WRITE, page)) {
                *pfn = page_to_pfn(page[0]);
-
-               if (writable)
-                       *writable = true;
+               if (kfp->map_writable)
+                       *kfp->map_writable = true;
                return true;
        }
 
  * The slow path to get the pfn of the specified host virtual address,
  * 1 indicates success, -errno is returned if error is detected.
  */
-static int hva_to_pfn_slow(unsigned long addr, bool no_wait, bool write_fault,
-                          bool interruptible, bool *writable, kvm_pfn_t *pfn)
+static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
 {
        /*
         * When a VCPU accesses a page that is not mapped into the secondary
         * Note that get_user_page_fast_only() and FOLL_WRITE for now
         * implicitly honor NUMA hinting faults and don't need this flag.
         */
-       unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
-       struct page *page;
+       unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags;
+       struct page *page, *wpage;
        int npages;
 
-       if (writable)
-               *writable = write_fault;
-
-       if (write_fault)
-               flags |= FOLL_WRITE;
-       if (no_wait)
-               flags |= FOLL_NOWAIT;
-       if (interruptible)
-               flags |= FOLL_INTERRUPTIBLE;
-
-       npages = get_user_pages_unlocked(addr, 1, &page, flags);
+       npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags);
        if (npages != 1)
                return npages;
 
-       /* map read fault as writable if possible */
-       if (unlikely(!write_fault) && writable) {
-               struct page *wpage;
+       if (!kfp->map_writable)
+               goto out;
 
-               if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
-                       *writable = true;
-                       put_page(page);
-                       page = wpage;
-               }
+       if (kfp->flags & FOLL_WRITE) {
+               *kfp->map_writable = true;
+               goto out;
        }
+
+       /* map read fault as writable if possible */
+       if (get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) {
+               *kfp->map_writable = true;
+               put_page(page);
+               page = wpage;
+       }
+
+out:
        *pfn = page_to_pfn(page);
        return npages;
 }
 }
 
 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
-                              unsigned long addr, bool write_fault,
-                              bool *writable, kvm_pfn_t *p_pfn)
+                              struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
 {
-       struct follow_pfnmap_args args = { .vma = vma, .address = addr };
+       struct follow_pfnmap_args args = { .vma = vma, .address = kfp->hva };
+       bool write_fault = kfp->flags & FOLL_WRITE;
        kvm_pfn_t pfn;
        int r;
 
                 * not call the fault handler, so do it here.
                 */
                bool unlocked = false;
-               r = fixup_user_fault(current->mm, addr,
+               r = fixup_user_fault(current->mm, kfp->hva,
                                     (write_fault ? FAULT_FLAG_WRITE : 0),
                                     &unlocked);
                if (unlocked)
                goto out;
        }
 
-       if (writable)
-               *writable = args.writable;
+       if (kfp->map_writable)
+               *kfp->map_writable = args.writable;
        pfn = args.pfn;
 
        /*
        return r;
 }
 
-/*
- * Pin guest page in memory and return its pfn.
- * @addr: host virtual address which maps memory to the guest
- * @interruptible: whether the process can be interrupted by non-fatal signals
- * @no_wait: whether or not this function need to wait IO complete if the
- *          host page is not in the memory
- * @write_fault: whether we should get a writable host page
- * @writable: whether it allows to map a writable host page for !@write_fault
- *
- * The function will map a writable host page for these two cases:
- * 1): @write_fault = true
- * 2): @write_fault = false && @writable, @writable will tell the caller
- *     whether the mapping is writable.
- */
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool no_wait,
-                    bool write_fault, bool *writable)
+kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
 {
        struct vm_area_struct *vma;
        kvm_pfn_t pfn;
 
        might_sleep();
 
-       if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
+       if (hva_to_pfn_fast(kfp, &pfn))
                return pfn;
 
-       npages = hva_to_pfn_slow(addr, no_wait, write_fault, interruptible,
-                                writable, &pfn);
+       npages = hva_to_pfn_slow(kfp, &pfn);
        if (npages == 1)
                return pfn;
        if (npages == -EINTR || npages == -EAGAIN)
 
        mmap_read_lock(current->mm);
 retry:
-       vma = vma_lookup(current->mm, addr);
+       vma = vma_lookup(current->mm, kfp->hva);
 
        if (vma == NULL)
                pfn = KVM_PFN_ERR_FAULT;
        else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
-               r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
+               r = hva_to_pfn_remapped(vma, kfp, &pfn);
                if (r == -EAGAIN)
                        goto retry;
                if (r < 0)
                        pfn = KVM_PFN_ERR_FAULT;
        } else {
-               if (no_wait && vma_is_valid(vma, write_fault))
+               if ((kfp->flags & FOLL_NOWAIT) &&
+                   vma_is_valid(vma, kfp->flags & FOLL_WRITE))
                        pfn = KVM_PFN_ERR_NEEDS_IO;
                else
                        pfn = KVM_PFN_ERR_FAULT;
        return pfn;
 }
 
-kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
-                              bool interruptible, bool no_wait,
-                              bool write_fault, bool *writable)
+static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
 {
-       unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
+       kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL,
+                                    kfp->flags & FOLL_WRITE);
 
-       if (kvm_is_error_hva(addr)) {
-               if (writable)
-                       *writable = false;
+       if (kfp->hva == KVM_HVA_ERR_RO_BAD)
+               return KVM_PFN_ERR_RO_FAULT;
 
-               return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
-                                                   KVM_PFN_NOSLOT;
-       }
+       if (kvm_is_error_hva(kfp->hva))
+               return KVM_PFN_NOSLOT;
 
-       /* Do not map writable pfn in the readonly memslot. */
-       if (writable && memslot_is_readonly(slot)) {
-               *writable = false;
-               writable = NULL;
+       if (memslot_is_readonly(kfp->slot) && kfp->map_writable) {
+               *kfp->map_writable = false;
+               kfp->map_writable = NULL;
        }
 
-       return hva_to_pfn(addr, interruptible, no_wait, write_fault, writable);
+       return hva_to_pfn(kfp);
+}
+
+kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
+                              bool interruptible, bool no_wait,
+                              bool write_fault, bool *writable)
+{
+       struct kvm_follow_pfn kfp = {
+               .slot = slot,
+               .gfn = gfn,
+               .map_writable = writable,
+       };
+
+       if (write_fault)
+               kfp.flags |= FOLL_WRITE;
+       if (no_wait)
+               kfp.flags |= FOLL_NOWAIT;
+       if (interruptible)
+               kfp.flags |= FOLL_INTERRUPTIBLE;
+
+       return kvm_follow_pfn(&kfp);
 }
 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
 
 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
                      bool *writable)
 {
-       return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
-                                   write_fault, writable);
+       struct kvm_follow_pfn kfp = {
+               .slot = gfn_to_memslot(kvm, gfn),
+               .gfn = gfn,
+               .flags = write_fault ? FOLL_WRITE : 0,
+               .map_writable = writable,
+       };
+
+       return kvm_follow_pfn(&kfp);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
 
 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return __gfn_to_pfn_memslot(slot, gfn, false, false, true, NULL);
+       struct kvm_follow_pfn kfp = {
+               .slot = slot,
+               .gfn = gfn,
+               .flags = FOLL_WRITE,
+       };
+
+       return kvm_follow_pfn(&kfp);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);