}
 }
 
-static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
+static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
 {
+       /*
+        * mn_active_invalidate_count acts for all intents and purposes
+        * like mmu_notifier_count here; but the latter cannot be used
+        * here because the invalidation of caches in the mmu_notifier
+        * event occurs _before_ mmu_notifier_count is elevated.
+        *
+        * Note, it does not matter that mn_active_invalidate_count
+        * is not protected by gpc->lock.  It is guaranteed to
+        * be elevated before the mmu_notifier acquires gpc->lock, and
+        * isn't dropped until after mmu_notifier_seq is updated.
+        */
+       if (kvm->mn_active_invalidate_count)
+               return true;
+
+       /*
+        * Ensure mn_active_invalidate_count is read before
+        * mmu_notifier_seq.  This pairs with the smp_wmb() in
+        * mmu_notifier_invalidate_range_end() to guarantee either the
+        * old (non-zero) value of mn_active_invalidate_count or the
+        * new (incremented) value of mmu_notifier_seq is observed.
+        */
+       smp_rmb();
+       return kvm->mmu_notifier_seq != mmu_seq;
+}
+
+static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+{
+       /* Note, the new page offset may be different than the old! */
+       void *old_khva = gpc->khva - offset_in_page(gpc->khva);
+       kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
+       void *new_khva = NULL;
        unsigned long mmu_seq;
-       kvm_pfn_t new_pfn;
-       int retry;
+
+       lockdep_assert_held(&gpc->refresh_lock);
+
+       lockdep_assert_held_write(&gpc->lock);
+
+       /*
+        * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
+        * assets have already been updated and so a concurrent check() from a
+        * different task may not fail the gpa/uhva/generation checks.
+        */
+       gpc->valid = false;
 
        do {
                mmu_seq = kvm->mmu_notifier_seq;
                smp_rmb();
 
+               write_unlock_irq(&gpc->lock);
+
+               /*
+                * If the previous iteration "failed" due to an mmu_notifier
+                * event, release the pfn and unmap the kernel virtual address
+                * from the previous attempt.  Unmapping might sleep, so this
+                * needs to be done after dropping the lock.  Opportunistically
+                * check for resched while the lock isn't held.
+                */
+               if (new_pfn != KVM_PFN_ERR_FAULT) {
+                       /*
+                        * Keep the mapping if the previous iteration reused
+                        * the existing mapping and didn't create a new one.
+                        */
+                       if (new_khva == old_khva)
+                               new_khva = NULL;
+
+                       gpc_release_pfn_and_khva(kvm, new_pfn, new_khva);
+
+                       cond_resched();
+               }
+
                /* We always request a writeable mapping */
-               new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL);
+               new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
                if (is_error_noslot_pfn(new_pfn))
-                       break;
+                       goto out_error;
+
+               /*
+                * Obtain a new kernel mapping if KVM itself will access the
+                * pfn.  Note, kmap() and memremap() can both sleep, so this
+                * too must be done outside of gpc->lock!
+                */
+               if (gpc->usage & KVM_HOST_USES_PFN) {
+                       if (new_pfn == gpc->pfn) {
+                               new_khva = old_khva;
+                       } else if (pfn_valid(new_pfn)) {
+                               new_khva = kmap(pfn_to_page(new_pfn));
+#ifdef CONFIG_HAS_IOMEM
+                       } else {
+                               new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
+#endif
+                       }
+                       if (!new_khva) {
+                               kvm_release_pfn_clean(new_pfn);
+                               goto out_error;
+                       }
+               }
+
+               write_lock_irq(&gpc->lock);
 
-               KVM_MMU_READ_LOCK(kvm);
-               retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva);
-               KVM_MMU_READ_UNLOCK(kvm);
-               if (!retry)
-                       break;
+               /*
+                * Other tasks must wait for _this_ refresh to complete before
+                * attempting to refresh.
+                */
+               WARN_ON_ONCE(gpc->valid);
+       } while (mmu_notifier_retry_cache(kvm, mmu_seq));
 
-               cond_resched();
-       } while (1);
+       gpc->valid = true;
+       gpc->pfn = new_pfn;
+       gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
+       return 0;
+
+out_error:
+       write_lock_irq(&gpc->lock);
 
-       return new_pfn;
+       return -EFAULT;
 }
 
 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        kvm_pfn_t old_pfn, new_pfn;
        unsigned long old_uhva;
        void *old_khva;
-       bool old_valid;
        int ret = 0;
 
        /*
        old_pfn = gpc->pfn;
        old_khva = gpc->khva - offset_in_page(gpc->khva);
        old_uhva = gpc->uhva;
-       old_valid = gpc->valid;
 
        /* If the userspace HVA is invalid, refresh that first */
        if (gpc->gpa != gpa || gpc->generation != slots->generation ||
                gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
 
                if (kvm_is_error_hva(gpc->uhva)) {
-                       gpc->pfn = KVM_PFN_ERR_FAULT;
                        ret = -EFAULT;
                        goto out;
                }
         * If the userspace HVA changed or the PFN was already invalid,
         * drop the lock and do the HVA to PFN lookup again.
         */
-       if (!old_valid || old_uhva != gpc->uhva) {
-               unsigned long uhva = gpc->uhva;
-               void *new_khva = NULL;
-
-               /* Placeholders for "hva is valid but not yet mapped" */
-               gpc->pfn = KVM_PFN_ERR_FAULT;
-               gpc->khva = NULL;
-               gpc->valid = true;
-
-               write_unlock_irq(&gpc->lock);
-
-               new_pfn = hva_to_pfn_retry(kvm, uhva);
-               if (is_error_noslot_pfn(new_pfn)) {
-                       ret = -EFAULT;
-                       goto map_done;
-               }
-
-               if (gpc->usage & KVM_HOST_USES_PFN) {
-                       if (new_pfn == old_pfn) {
-                               /*
-                                * Reuse the existing pfn and khva, but put the
-                                * reference acquired hva_to_pfn_retry(); the
-                                * cache still holds a reference to the pfn
-                                * from the previous refresh.
-                                */
-                               gpc_release_pfn_and_khva(kvm, new_pfn, NULL);
-
-                               new_khva = old_khva;
-                               old_pfn = KVM_PFN_ERR_FAULT;
-                               old_khva = NULL;
-                       } else if (pfn_valid(new_pfn)) {
-                               new_khva = kmap(pfn_to_page(new_pfn));
-#ifdef CONFIG_HAS_IOMEM
-                       } else {
-                               new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
-#endif
-                       }
-                       if (new_khva)
-                               new_khva += page_offset;
-                       else
-                               ret = -EFAULT;
-               }
-
-       map_done:
-               write_lock_irq(&gpc->lock);
-               if (ret) {
-                       gpc->valid = false;
-                       gpc->pfn = KVM_PFN_ERR_FAULT;
-                       gpc->khva = NULL;
-               } else {
-                       /* At this point, gpc->valid may already have been cleared */
-                       gpc->pfn = new_pfn;
-                       gpc->khva = new_khva;
-               }
+       if (!gpc->valid || old_uhva != gpc->uhva) {
+               ret = hva_to_pfn_retry(kvm, gpc);
        } else {
                /* If the HVA→PFN mapping was already valid, don't unmap it. */
                old_pfn = KVM_PFN_ERR_FAULT;
        }
 
  out:
+       /*
+        * Invalidate the cache and purge the pfn/khva if the refresh failed.
+        * Some/all of the uhva, gpa, and memslot generation info may still be
+        * valid, leave it as is.
+        */
+       if (ret) {
+               gpc->valid = false;
+               gpc->pfn = KVM_PFN_ERR_FAULT;
+               gpc->khva = NULL;
+       }
+
+       /* Snapshot the new pfn before dropping the lock! */
+       new_pfn = gpc->pfn;
+
        write_unlock_irq(&gpc->lock);
 
        mutex_unlock(&gpc->refresh_lock);
 
-       gpc_release_pfn_and_khva(kvm, old_pfn, old_khva);
+       if (old_pfn != new_pfn)
+               gpc_release_pfn_and_khva(kvm, old_pfn, old_khva);
 
        return ret;
 }