]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: pfncache: check the need for invalidation under read lock first
authorPaul Durrant <pdurrant@amazon.com>
Thu, 15 Feb 2024 15:29:14 +0000 (15:29 +0000)
committerSean Christopherson <seanjc@google.com>
Thu, 22 Feb 2024 15:01:20 +0000 (07:01 -0800)
When processing mmu_notifier invalidations for gpc caches, pre-check for
overlap with the invalidation event while holding gpc->lock for read, and
only take gpc->lock for write if the cache needs to be invalidated.  Doing
a pre-check without taking gpc->lock for write avoids unnecessarily
contending the lock for unrelated invalidations, which is very beneficial
for caches that are heavily used (but rarely subjected to mmu_notifier
invalidations).

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240215152916.1158-20-paul@xen.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
virt/kvm/pfncache.c

index a47ca6fd75c27b735ae388aa67884b7f18397841..9ac8c9da4eda115a35462ff6cc84dc9d31fa6767 100644 (file)
@@ -29,14 +29,30 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
 
        spin_lock(&kvm->gpc_lock);
        list_for_each_entry(gpc, &kvm->gpc_list, list) {
-               write_lock_irq(&gpc->lock);
+               read_lock_irq(&gpc->lock);
 
                /* Only a single page so no need to care about length */
                if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
                    gpc->uhva >= start && gpc->uhva < end) {
-                       gpc->valid = false;
+                       read_unlock_irq(&gpc->lock);
+
+                       /*
+                        * There is a small window here where the cache could
+                        * be modified, and invalidation would no longer be
+                        * necessary. Hence check again whether invalidation
+                        * is still necessary once the write lock has been
+                        * acquired.
+                        */
+
+                       write_lock_irq(&gpc->lock);
+                       if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+                           gpc->uhva >= start && gpc->uhva < end)
+                               gpc->valid = false;
+                       write_unlock_irq(&gpc->lock);
+                       continue;
                }
-               write_unlock_irq(&gpc->lock);
+
+               read_unlock_irq(&gpc->lock);
        }
        spin_unlock(&kvm->gpc_lock);
 }