]> www.infradead.org Git - users/willy/xarray.git/commitdiff
KVM: pfncache: remove KVM_GUEST_USES_PFN usage
authorPaul Durrant <pdurrant@amazon.com>
Thu, 15 Feb 2024 15:29:00 +0000 (15:29 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 20 Feb 2024 15:37:43 +0000 (07:37 -0800)
As noted in [1] the KVM_GUEST_USES_PFN usage flag is never set by any
callers of kvm_gpc_init(), and for good reason: the implementation is
incomplete/broken.  And it's not clear that there will ever be a user of
KVM_GUEST_USES_PFN, as coordinating vCPUs with mmu_notifier events is
non-trivial.

Remove KVM_GUEST_USES_PFN and all related code, e.g. dropping
KVM_GUEST_USES_PFN also makes the 'vcpu' argument redundant, to avoid
having to reason about broken code as __kvm_gpc_refresh() evolves.

Moreover, all existing callers specify KVM_HOST_USES_PFN so the usage
check in hva_to_pfn_retry() and hence the 'usage' argument to
kvm_gpc_init() are also redundant.

[1] https://lore.kernel.org/all/ZQiR8IpqOZrOpzHC@google.com

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240215152916.1158-6-paul@xen.org
[sean: explicitly call out that guest usage is incomplete]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
include/linux/kvm_host.h
include/linux/kvm_types.h
virt/kvm/pfncache.c

index f0f37c769a3afa40f7536f80451584fedc73770f..415723a28dcec7c16f900a8f5a5778da6a840f14 100644 (file)
@@ -12056,7 +12056,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        vcpu->arch.regs_avail = ~0;
        vcpu->arch.regs_dirty = ~0;
 
-       kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
+       kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
 
        if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
index 2d001a9c63787e0f58781cfbf8d48d9f3ef12533..e904642254677cdfb2c6bbef1bb0ffb3c307b483 100644 (file)
@@ -2108,14 +2108,10 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
 
        timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
 
-       kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
-                    KVM_HOST_USES_PFN);
-       kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
-                    KVM_HOST_USES_PFN);
-       kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
-                    KVM_HOST_USES_PFN);
-       kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
-                    KVM_HOST_USES_PFN);
+       kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm);
+       kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm);
+       kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm);
+       kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm);
 }
 
 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
@@ -2158,7 +2154,7 @@ void kvm_xen_init_vm(struct kvm *kvm)
 {
        mutex_init(&kvm->arch.xen.xen_lock);
        idr_init(&kvm->arch.xen.evtchn_ports);
-       kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
+       kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm);
 }
 
 void kvm_xen_destroy_vm(struct kvm *kvm)
index 604ae285d9a993fb55d317137d20523dece4b7d1..3e1c04608c67d4a63dafb2be2c1f6db8f59c40ec 100644 (file)
@@ -1319,21 +1319,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  *
  * @gpc:          struct gfn_to_pfn_cache object.
  * @kvm:          pointer to kvm instance.
- * @vcpu:         vCPU to be used for marking pages dirty and to be woken on
- *                invalidation.
- * @usage:        indicates if the resulting host physical PFN is used while
- *                the @vcpu is IN_GUEST_MODE (in which case invalidation of 
- *                the cache from MMU notifiers---but not for KVM memslot
- *                changes!---will also force @vcpu to exit the guest and
- *                refresh the cache); and/or if the PFN used directly
- *                by KVM (and thus needs a kernel virtual mapping).
  *
  * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
  * immutable attributes.  Note, the cache must be zero-allocated (or zeroed by
  * the caller before init).
  */
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
-                 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
 
 /**
  * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
index 9d1f7835d8c13917ad171297752e072c04bec1b3..d93f6522b2c34c2e5e33b80266f43cb2f7775bde 100644 (file)
@@ -49,12 +49,6 @@ typedef u64            hfn_t;
 
 typedef hfn_t kvm_pfn_t;
 
-enum pfn_cache_usage {
-       KVM_GUEST_USES_PFN = BIT(0),
-       KVM_HOST_USES_PFN  = BIT(1),
-       KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
-};
-
 struct gfn_to_hva_cache {
        u64 generation;
        gpa_t gpa;
@@ -69,13 +63,11 @@ struct gfn_to_pfn_cache {
        unsigned long uhva;
        struct kvm_memory_slot *memslot;
        struct kvm *kvm;
-       struct kvm_vcpu *vcpu;
        struct list_head list;
        rwlock_t lock;
        struct mutex refresh_lock;
        void *khva;
        kvm_pfn_t pfn;
-       enum pfn_cache_usage usage;
        bool active;
        bool valid;
 };
index f3571f44d9af143bbf5856e63fc09c73e43e25a3..6f4b537eb25b1dd57255f903f0599b44ab1a1758 100644 (file)
@@ -25,9 +25,7 @@
 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
                                       unsigned long end, bool may_block)
 {
-       DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
        struct gfn_to_pfn_cache *gpc;
-       bool evict_vcpus = false;
 
        spin_lock(&kvm->gpc_lock);
        list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -37,43 +35,10 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
                if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
                    gpc->uhva >= start && gpc->uhva < end) {
                        gpc->valid = false;
-
-                       /*
-                        * If a guest vCPU could be using the physical address,
-                        * it needs to be forced out of guest mode.
-                        */
-                       if (gpc->usage & KVM_GUEST_USES_PFN) {
-                               if (!evict_vcpus) {
-                                       evict_vcpus = true;
-                                       bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
-                               }
-                               __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
-                       }
                }
                write_unlock_irq(&gpc->lock);
        }
        spin_unlock(&kvm->gpc_lock);
-
-       if (evict_vcpus) {
-               /*
-                * KVM needs to ensure the vCPU is fully out of guest context
-                * before allowing the invalidation to continue.
-                */
-               unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
-               bool called;
-
-               /*
-                * If the OOM reaper is active, then all vCPUs should have
-                * been stopped already, so perform the request without
-                * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
-                */
-               if (!may_block)
-                       req &= ~KVM_REQUEST_WAIT;
-
-               called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
-
-               WARN_ON_ONCE(called && !may_block);
-       }
 }
 
 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
@@ -206,16 +171,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
                 * pfn.  Note, kmap() and memremap() can both sleep, so this
                 * too must be done outside of gpc->lock!
                 */
-               if (gpc->usage & KVM_HOST_USES_PFN) {
-                       if (new_pfn == gpc->pfn)
-                               new_khva = old_khva;
-                       else
-                               new_khva = gpc_map(new_pfn);
-
-                       if (!new_khva) {
-                               kvm_release_pfn_clean(new_pfn);
-                               goto out_error;
-                       }
+               if (new_pfn == gpc->pfn)
+                       new_khva = old_khva;
+               else
+                       new_khva = gpc_map(new_pfn);
+
+               if (!new_khva) {
+                       kvm_release_pfn_clean(new_pfn);
+                       goto out_error;
                }
 
                write_lock_irq(&gpc->lock);
@@ -346,18 +309,12 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
        return __kvm_gpc_refresh(gpc, gpc->gpa, len);
 }
 
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
-                 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
 {
-       WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
-       WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
-
        rwlock_init(&gpc->lock);
        mutex_init(&gpc->refresh_lock);
 
        gpc->kvm = kvm;
-       gpc->vcpu = vcpu;
-       gpc->usage = usage;
        gpc->pfn = KVM_PFN_ERR_FAULT;
        gpc->uhva = KVM_HVA_ERR_BAD;
 }