Count the pages used by KVM mmu on x86 in memory stats under secondary
pagetable stats (e.g. "SecPageTables" in /proc/meminfo) to give better
visibility into the memory consumption of KVM mmu in a similar way to
how normal user page tables are accounted.
Add the inner helper in common KVM, ARM will also use it to count stats
in a future commit.
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Acked-by: Marc Zyngier <maz@kernel.org> # generic KVM changes
Link: https://lore.kernel.org/r/20220823004639.2387269-3-yosryahmed@google.com
Link: https://lore.kernel.org/r/20220823004639.2387269-4-yosryahmed@google.com
[sean: squash x86 usage to workaround modpost issues]
Signed-off-by: Sean Christopherson <seanjc@google.com>
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
 }
 
+static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       kvm_mod_used_mmu_pages(kvm, +1);
+       kvm_account_pgtable_pages((void *)sp->spt, +1);
+}
+
+static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       kvm_mod_used_mmu_pages(kvm, -1);
+       kvm_account_pgtable_pages((void *)sp->spt, -1);
+}
+
 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
 {
        MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
         */
        sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
        list_add(&sp->link, &kvm->arch.active_mmu_pages);
-       kvm_mod_used_mmu_pages(kvm, +1);
+       kvm_account_mmu_page(kvm, sp);
 
        sp->gfn = gfn;
        sp->role = role;
                        list_add(&sp->link, invalid_list);
                else
                        list_move(&sp->link, invalid_list);
-               kvm_mod_used_mmu_pages(kvm, -1);
+               kvm_unaccount_mmu_page(kvm, sp);
        } else {
                /*
                 * Remove the active root from the active page list, the root
 
        }
 }
 
+static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       kvm_account_pgtable_pages((void *)sp->spt, +1);
+}
+
+static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       kvm_account_pgtable_pages((void *)sp->spt, -1);
+}
+
 /**
  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
  *
 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
                              bool shared)
 {
+       tdp_unaccount_mmu_page(kvm, sp);
        if (shared)
                spin_lock(&kvm->arch.tdp_mmu_pages_lock);
        else
        if (account_nx)
                account_huge_nx_page(kvm, sp);
        spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+       tdp_account_mmu_page(kvm, sp);
 
        return 0;
 }
 
 }
 #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
 
+/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+       mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
 /*
  * This defines how many reserved entries we want to keep before we
  * kick the vcpu to the userspace to avoid dirty ring full.  This