* can be used as guest memory but they are not managed by host
         * kernel).
         */
+       struct page *refcounted_page;
        struct page *page;
        void *hva;
        kvm_pfn_t pfn;
 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
                        int len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
-{
-       if (dirty)
-               kvm_release_pfn_dirty(pfn);
-       else
-               kvm_release_pfn_clean(pfn);
-}
-
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
+       struct kvm_follow_pfn kfp = {
+               .slot = gfn_to_memslot(vcpu->kvm, gfn),
+               .gfn = gfn,
+               .flags = FOLL_WRITE,
+               .refcounted_page = &map->refcounted_page,
+       };
+
+       map->refcounted_page = NULL;
        map->page = NULL;
        map->hva = NULL;
        map->gfn = gfn;
 
-       map->pfn = gfn_to_pfn(vcpu->kvm, gfn);
+       map->pfn = kvm_follow_pfn(&kfp);
        if (is_error_noslot_pfn(map->pfn))
                return -EINVAL;
 
        if (dirty)
                kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
 
-       kvm_release_pfn(map->pfn, dirty);
+       if (map->refcounted_page) {
+               if (dirty)
+                       kvm_release_page_dirty(map->refcounted_page);
+               else
+                       kvm_release_page_clean(map->refcounted_page);
+       }
 
        map->hva = NULL;
        map->page = NULL;
+       map->refcounted_page = NULL;
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);