@@ -1138,6 +1138,7 @@ struct kvm_vm_stat {
ulong lpages;
ulong nx_lpage_splits;
ulong max_mmu_page_hash_collisions;
+ ulong max_mmu_rmap_size;
};
struct kvm_vcpu_stat {
@@ -2602,6 +2602,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (is_shadow_present_pte(*sptep)) {
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
+ if (rmap_count > vcpu->kvm->stat.max_mmu_rmap_size)
+ vcpu->kvm->stat.max_mmu_rmap_size = rmap_count;
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
}
@@ -257,6 +257,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VM_STAT("largepages", lpages, .mode = 0444),
VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
+ VM_STAT("max_mmu_rmap_size", max_mmu_rmap_size),
{ NULL }
};
Add a new statistic max_mmu_rmap_size, which stores the maximum size of rmap for the vm. Signed-off-by: Peter Xu <peterx@redhat.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 2 ++ arch/x86/kvm/x86.c | 1 + 3 files changed, 4 insertions(+)