* state bits, it is used to clear the last level sptep.
* Returns the old PTE.
*/
-static u64 mmu_spte_clear_track_bits(u64 *sptep)
+static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
{
kvm_pfn_t pfn;
u64 old_spte = *sptep;
+ int level = sptep_to_sp(sptep)->role.level;
if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
if (!is_shadow_present_pte(old_spte))
return old_spte;
+ kvm_update_page_stats(kvm, level, -1);
+
pfn = spte_to_pfn(old_spte);
/*
}
}
-static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
+static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ u64 *sptep)
{
- mmu_spte_clear_track_bits(sptep);
+ mmu_spte_clear_track_bits(kvm, sptep);
__pte_list_remove(sptep, rmap_head);
}
/* Return true if rmap existed, false otherwise */
-static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
+static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc, *next;
int i;
return false;
if (!(rmap_head->val & 1)) {
- mmu_spte_clear_track_bits((u64 *)rmap_head->val);
+ mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
goto out;
}
for (; desc; desc = next) {
for (i = 0; i < desc->spte_count; i++)
- mmu_spte_clear_track_bits(desc->sptes[i]);
+ mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
next = desc->more;
mmu_free_pte_list_desc(desc);
}
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
- u64 old_spte = mmu_spte_clear_track_bits(sptep);
+ u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
if (is_shadow_present_pte(old_spte))
rmap_remove(kvm, sptep);
if (is_large_pte(*sptep)) {
WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
drop_spte(kvm, sptep);
- --kvm->stat.lpages;
return true;
}
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{
- return pte_list_destroy(rmap_head);
+ return pte_list_destroy(kvm, rmap_head);
}
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
need_flush = 1;
if (pte_write(pte)) {
- pte_list_remove(rmap_head, sptep);
+ pte_list_remove(kvm, rmap_head, sptep);
goto restart;
} else {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(
*sptep, new_pfn);
- mmu_spte_clear_track_bits(sptep);
+ mmu_spte_clear_track_bits(kvm, sptep);
mmu_spte_set(sptep, new_spte);
}
}
if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
- if (is_large_pte(pte))
- --kvm->stat.lpages;
} else {
child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
trace_kvm_mmu_set_spte(level, gfn, sptep);
if (!was_rmapped) {
- if (is_large_pte(*sptep))
- ++vcpu->kvm->stat.lpages;
+ kvm_update_page_stats(vcpu->kvm, level, 1);
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
pfn, PG_LEVEL_NUM)) {
- pte_list_remove(rmap_head, sptep);
+ pte_list_remove(kvm, rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
- bool was_large, is_large;
WARN_ON(level > PT64_ROOT_MAX_LEVEL);
WARN_ON(level < PG_LEVEL_4K);
return;
}
- /*
- * Update large page stats if a large page is being zapped, created, or
- * is replacing an existing shadow page.
- */
- was_large = was_leaf && is_large_pte(old_spte);
- is_large = is_leaf && is_large_pte(new_spte);
- if (was_large != is_large) {
- if (was_large)
- atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
- else
- atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
- }
+ if (is_leaf != was_leaf)
+ kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
if (was_leaf && is_dirty_spte(old_spte) &&
(!is_present || !is_dirty_spte(new_spte) || pfn_changed))
STATS_DESC_COUNTER(VM, mmu_recycled),
STATS_DESC_COUNTER(VM, mmu_cache_miss),
STATS_DESC_ICOUNTER(VM, mmu_unsync),
- STATS_DESC_ICOUNTER(VM, lpages),
+ STATS_DESC_ICOUNTER(VM, pages_4k),
+ STATS_DESC_ICOUNTER(VM, pages_2m),
+ STATS_DESC_ICOUNTER(VM, pages_1g),
STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)