{
        struct kvm_memslots *slots = kvm_memslots(kvm);
 
+       if (!gpc->active)
+               return false;
+
        if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
                return false;
 
 {
        struct kvm_memslots *slots = kvm_memslots(kvm);
        unsigned long page_offset = gpa & ~PAGE_MASK;
-       kvm_pfn_t old_pfn, new_pfn;
+       bool unmap_old = false;
        unsigned long old_uhva;
+       kvm_pfn_t old_pfn;
        void *old_khva;
-       int ret = 0;
+       int ret;
 
        /*
         * If must fit within a single page. The 'len' argument is
 
        write_lock_irq(&gpc->lock);
 
+       if (!gpc->active) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
        old_pfn = gpc->pfn;
        old_khva = gpc->khva - offset_in_page(gpc->khva);
        old_uhva = gpc->uhva;
                /* If the HVA→PFN mapping was already valid, don't unmap it. */
                old_pfn = KVM_PFN_ERR_FAULT;
                old_khva = NULL;
+               ret = 0;
        }
 
  out:
                gpc->khva = NULL;
        }
 
-       /* Snapshot the new pfn before dropping the lock! */
-       new_pfn = gpc->pfn;
+       /* Detect a pfn change before dropping the lock! */
+       unmap_old = (old_pfn != gpc->pfn);
 
+out_unlock:
        write_unlock_irq(&gpc->lock);
 
        mutex_unlock(&gpc->refresh_lock);
 
-       if (old_pfn != new_pfn)
+       if (unmap_old)
                gpc_unmap_khva(kvm, old_pfn, old_khva);
 
        return ret;
                gpc->vcpu = vcpu;
                gpc->usage = usage;
                gpc->valid = false;
-               gpc->active = true;
 
                spin_lock(&kvm->gpc_lock);
                list_add(&gpc->list, &kvm->gpc_list);
                spin_unlock(&kvm->gpc_lock);
+
+               /*
+                * Activate the cache after adding it to the list, a concurrent
+                * refresh must not establish a mapping until the cache is
+                * reachable by mmu_notifier events.
+                */
+               write_lock_irq(&gpc->lock);
+               gpc->active = true;
+               write_unlock_irq(&gpc->lock);
        }
        return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
 }
 void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 {
        if (gpc->active) {
+               /*
+                * Deactivate the cache before removing it from the list, KVM
+                * must stall mmu_notifier events until all users go away, i.e.
+                * until gpc->lock is dropped and refresh is guaranteed to fail.
+                */
+               write_lock_irq(&gpc->lock);
+               gpc->active = false;
+               write_unlock_irq(&gpc->lock);
+
                spin_lock(&kvm->gpc_lock);
                list_del(&gpc->list);
                spin_unlock(&kvm->gpc_lock);
 
                kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
-               gpc->active = false;
        }
 }
 EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);