]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/mmu: Honor NEED_RESCHED when zapping rmaps and blocking is allowed
authorSean Christopherson <seanjc@google.com>
Fri, 9 Aug 2024 19:43:25 +0000 (12:43 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 10 Sep 2024 03:22:04 +0000 (20:22 -0700)
Convert kvm_unmap_gfn_range(), which is the helper that zaps rmap SPTEs in
response to an mmu_notifier invalidation, to use __kvm_rmap_zap_gfn_range()
and feed in range->may_block.  In other words, honor NEED_RESCHED by way of
cond_resched() when zapping rmaps.  This fixes a long-standing issue where
KVM could process an absurd number of rmap entries without ever yielding,
e.g. if an mmu_notifier fired on a PUD (or larger) range.

Opportunistically rename __kvm_zap_rmap() to kvm_zap_rmap(), and drop the
old kvm_zap_rmap().  Ideally, the shuffling would be done in a different
patch, but that just makes the compiler unhappy, e.g.

  arch/x86/kvm/mmu/mmu.c:1462:13: error: ‘kvm_zap_rmap’ defined but not used

Reported-by: Peter Xu <peterx@redhat.com>
Link: https://lore.kernel.org/r/20240809194335.1726916-14-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c

index 70b043d7701dddc32eab2338d2fd705a74aecf97..27a8a4f486c5c8ab0364f12a9b66665a7fe7fe46 100644 (file)
@@ -1435,16 +1435,10 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
        return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
 }
 
-static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-                          const struct kvm_memory_slot *slot)
-{
-       return kvm_zap_all_rmap_sptes(kvm, rmap_head);
-}
-
 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-                        struct kvm_memory_slot *slot, gfn_t gfn, int level)
+                        const struct kvm_memory_slot *slot)
 {
-       return __kvm_zap_rmap(kvm, rmap_head, slot);
+       return kvm_zap_all_rmap_sptes(kvm, rmap_head);
 }
 
 struct slot_rmap_walk_iterator {
@@ -1578,7 +1572,7 @@ static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
                                     gfn_t start, gfn_t end, bool can_yield,
                                     bool flush)
 {
-       return __walk_slot_rmaps(kvm, slot, __kvm_zap_rmap,
+       return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
                                 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
                                 start, end - 1, can_yield, true, flush);
 }
@@ -1607,7 +1601,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
        bool flush = false;
 
        if (kvm_memslots_have_rmaps(kvm))
-               flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
+               flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
+                                                range->start, range->end,
+                                                range->may_block, flush);
 
        if (tdp_mmu_enabled)
                flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);