]> www.infradead.org Git - users/willy/xarray.git/commitdiff
KVM: x86: Support write protecting only large pages
authorKeqian Zhu <zhukeqian1@huawei.com>
Thu, 29 Apr 2021 03:41:14 +0000 (11:41 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:25 +0000 (13:09 -0400)
Prepare for write protecting large page lazily during dirty log tracking,
for which we will only need to write protect gfns at large page
granularity.

No functional or performance change expected.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Message-Id: <20210429034115.35560-2-zhukeqian1@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/page_track.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h

index 8ac1b9c935fee82d6d6cd8438342e9d834c6ebbd..a668d2050b7955cf1114124eef94038804b3d998 100644 (file)
@@ -1249,20 +1249,21 @@ int kvm_cpu_dirty_log_size(void)
 }
 
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-                                   struct kvm_memory_slot *slot, u64 gfn)
+                                   struct kvm_memory_slot *slot, u64 gfn,
+                                   int min_level)
 {
        struct kvm_rmap_head *rmap_head;
        int i;
        bool write_protected = false;
 
-       for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+       for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
                rmap_head = __gfn_to_rmap(gfn, i, slot);
                write_protected |= __rmap_write_protect(kvm, rmap_head, true);
        }
 
        if (is_tdp_mmu_enabled(kvm))
                write_protected |=
-                       kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
+                       kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
 
        return write_protected;
 }
@@ -1272,7 +1273,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
        struct kvm_memory_slot *slot;
 
        slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+       return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
 }
 
 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
index ff4c6256f3f9e5d3dbfc46f336def52c755069fb..18be103df9d59f2c0984e98b8c5bc39cfa30761d 100644 (file)
@@ -128,7 +128,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-                                   struct kvm_memory_slot *slot, u64 gfn);
+                                   struct kvm_memory_slot *slot, u64 gfn,
+                                   int min_level);
 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
                                        u64 start_gfn, u64 pages);
 
index 34bb0ec69bd8b420215c374c87c6d10858c0dd43..91a9f7e0fd91484bfbcfbcfdb504024d557c56fc 100644 (file)
@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
        kvm_mmu_gfn_disallow_lpage(slot, gfn);
 
        if (mode == KVM_PAGE_TRACK_WRITE)
-               if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+               if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
                        kvm_flush_remote_tlbs(kvm);
 }
 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
index 237317b1eddda3c4f15136f54f6e61e9e0c9a49c..6b6dfcdcb17979c8e590293a2b0675edf4edcaeb 100644 (file)
@@ -1462,15 +1462,22 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
  * Returns true if an SPTE was set and a TLB flush is needed.
  */
 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
-                             gfn_t gfn)
+                             gfn_t gfn, int min_level)
 {
        struct tdp_iter iter;
        u64 new_spte;
        bool spte_set = false;
 
+       BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
+
        rcu_read_lock();
 
-       tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
+       for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+                                  min_level, gfn, gfn + 1) {
+               if (!is_shadow_present_pte(iter.old_spte) ||
+                   !is_last_spte(iter.old_spte, iter.level))
+                       continue;
+
                if (!is_writable_pte(iter.old_spte))
                        break;
 
@@ -1492,14 +1499,15 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
  * Returns true if an SPTE was set and a TLB flush is needed.
  */
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot, gfn_t gfn)
+                                  struct kvm_memory_slot *slot, gfn_t gfn,
+                                  int min_level)
 {
        struct kvm_mmu_page *root;
        bool spte_set = false;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
        for_each_tdp_mmu_root(kvm, root, slot->as_id)
-               spte_set |= write_protect_gfn(kvm, root, gfn);
+               spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
 
        return spte_set;
 }
index 5fdf630904517a7cc2281cc0206741e968f5459d..a861570fcd7cd09332c51c1d2f2c9ed78a42dff1 100644 (file)
@@ -74,7 +74,8 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                       bool flush);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot, gfn_t gfn);
+                                  struct kvm_memory_slot *slot, gfn_t gfn,
+                                  int min_level);
 
 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
                         int *root_level);