]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: Use enum to track if cached PFN will be used in guest and/or host
authorSean Christopherson <seanjc@google.com>
Fri, 18 Feb 2022 19:45:47 +0000 (11:45 -0800)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Mon, 28 Feb 2022 19:35:41 +0000 (19:35 +0000)
Replace the guest_uses_pa and kernel_map booleans in the PFN cache code
with a unified enum/bitmask. Using explicit names makes it easier to
review and audit call sites.

Opportunistically add a WARN to prevent passing garbage; instantating a
cache without declaring its usage is either buggy or pointless.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kvm/xen.c
include/linux/kvm_host.h
include/linux/kvm_types.h
virt/kvm/pfncache.c

index 4aa0f2b3166507521e23cf224c1604afeb1eca07..5be1c92271058f6075368f20af7baed82176bdfe 100644 (file)
@@ -39,7 +39,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        }
 
        do {
-               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
+               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
                                                gpa, PAGE_SIZE, false);
                if (ret)
                        goto out;
index f11039944c08ffd7d5bfe6675f95c8b9f2d3dd86..d044e328046a6fb331573f2a7f1dd5762c08d302 100644 (file)
@@ -1222,9 +1222,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  * @gpc:          struct gfn_to_pfn_cache object.
  * @vcpu:         vCPU to be used for marking pages dirty and to be woken on
  *                invalidation.
- * @guest_uses_pa: indicates that the resulting host physical PFN is used while
- *                @vcpu is IN_GUEST_MODE so invalidations should wake it.
- * @kernel_map:    requests a kernel virtual mapping (kmap / memremap).
+ * @usage:        indicates if the resulting host physical PFN is used while
+ *                the @vcpu is IN_GUEST_MODE and/or if the PFN used directly
+ *                by KVM (and thus needs a kernel virtual mapping).
  * @gpa:          guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
  * @dirty:         mark the cache dirty immediately.
@@ -1240,9 +1240,8 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  * to ensure that the cache is valid before accessing the target page.
  */
 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, bool guest_uses_pa,
-                             bool kernel_map, gpa_t gpa, unsigned long len,
-                             bool dirty);
+                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                             gpa_t gpa, unsigned long len, bool dirty);
 
 /**
  * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
index dceac12c1ce571937a68cd90dc3181102183b59b..784f37cbf33e63db5bd8d909ca8ea981c027287b 100644 (file)
@@ -18,6 +18,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
+#include <linux/bits.h>
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
 
@@ -46,6 +47,12 @@ typedef u64            hfn_t;
 
 typedef hfn_t kvm_pfn_t;
 
+enum pfn_cache_usage {
+       KVM_GUEST_USES_PFN = BIT(0),
+       KVM_HOST_USES_PFN  = BIT(1),
+       KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
 struct gfn_to_hva_cache {
        u64 generation;
        gpa_t gpa;
@@ -64,11 +71,10 @@ struct gfn_to_pfn_cache {
        rwlock_t lock;
        void *khva;
        kvm_pfn_t pfn;
+       enum pfn_cache_usage usage;
        bool active;
        bool valid;
        bool dirty;
-       bool kernel_map;
-       bool guest_uses_pa;
 };
 
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
index ce878f4be4daab2dc5e1677b2a3c3edd760959e9..9b3a192cb18c9e3836dd584f54a8a7ccdc280389 100644 (file)
@@ -42,7 +42,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
                         * If a guest vCPU could be using the physical address,
                         * it needs to be woken.
                         */
-                       if (gpc->guest_uses_pa) {
+                       if (gpc->usage & KVM_GUEST_USES_PFN) {
                                if (!wake_vcpus) {
                                        wake_vcpus = true;
                                        bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
@@ -219,7 +219,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                        goto map_done;
                }
 
-               if (gpc->kernel_map) {
+               if (gpc->usage & KVM_HOST_USES_PFN) {
                        if (new_pfn == old_pfn) {
                                new_khva = old_khva;
                                old_pfn = KVM_PFN_ERR_FAULT;
@@ -299,10 +299,11 @@ EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
 
 
 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, bool guest_uses_pa,
-                             bool kernel_map, gpa_t gpa, unsigned long len,
-                             bool dirty)
+                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                             gpa_t gpa, unsigned long len, bool dirty)
 {
+       WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
+
        if (!gpc->active) {
                rwlock_init(&gpc->lock);
 
@@ -310,8 +311,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                gpc->pfn = KVM_PFN_ERR_FAULT;
                gpc->uhva = KVM_HVA_ERR_BAD;
                gpc->vcpu = vcpu;
-               gpc->kernel_map = kernel_map;
-               gpc->guest_uses_pa = guest_uses_pa;
+               gpc->usage = usage;
                gpc->valid = false;
                gpc->active = true;