diff mbox series

[RFC,5/5] KVM: s390: Add VSIE shadow stat counters

Message ID 20250318-vsieie-v1-5-6461fcef3412@linux.ibm.com (mailing list archive)
State New
Headers show
Series KVM: s390: Add VSIE Interpretation Extension Facility (vsie_sigpif) | expand

Commit Message

Christoph Schlameuss March 18, 2025, 6:59 p.m. UTC
Add new stat counters to VSIE shadowing to be able to verify and monitor
the functionality.

* vsie_shadow_scb shows the number of allocated SIE control block
  shadows. Should count upwards between 0 and the max number of cpus.
* vsie_shadow_sca shows the number of allocated system control area
  shadows. Should count upwards between 0 and the max number of cpus.
* vsie_shadow_sca_create shows the number of newly allocated system
  control area shadows.
* vsie_shadow_sca_reuse shows the number of reused system control area
  shadows.

Signed-off-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
---
 arch/s390/include/asm/kvm_host.h | 4 ++++
 arch/s390/kvm/kvm-s390.c         | 4 ++++
 arch/s390/kvm/vsie.c             | 7 ++++++-
 3 files changed, 14 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e44f43906844d3b629e9685637af3f66398a4a8d..909c662ac4e3e1e70a2e3e9054acee14bc20ed02 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -824,6 +824,10 @@  struct kvm_vm_stat {
 	u64 gmap_shadow_r3_entry;
 	u64 gmap_shadow_sg_entry;
 	u64 gmap_shadow_pg_entry;
+	u64 vsie_shadow_scb;
+	u64 vsie_shadow_sca;
+	u64 vsie_shadow_sca_create;
+	u64 vsie_shadow_sca_reuse;
 };
 
 struct kvm_arch_memory_slot {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 16204c638119fa3a6c36e8e24af2b0b399f8123b..aba798e7814be6011d71a1e1be894e2c0a6b2bb2 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -76,6 +76,10 @@  const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
 	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
 	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
 	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
+	STATS_DESC_COUNTER(VM, vsie_shadow_scb),
+	STATS_DESC_COUNTER(VM, vsie_shadow_sca),
+	STATS_DESC_COUNTER(VM, vsie_shadow_sca_create),
+	STATS_DESC_COUNTER(VM, vsie_shadow_sca_reuse),
 };
 
 const struct kvm_stats_header kvm_vm_stats_header = {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 3ddebebf8e9e90be3d5e27b6dc91d91214c3ea34..7b599b6eb2ceb4141b8f1489804aef5dcd429ea0 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -678,8 +678,10 @@  static struct ssca_vsie *get_existing_ssca(struct kvm *kvm, u64 sca_o_hva)
 {
 	struct ssca_vsie *ssca = radix_tree_lookup(&kvm->arch.vsie.osca_to_ssca, sca_o_hva);
 
-	if (ssca)
+	if (ssca) {
 		WARN_ON_ONCE(atomic_inc_return(&ssca->ref_count) < 1);
+		kvm->stat.vsie_shadow_sca_reuse++;
+	}
 	return ssca;
 }
 
@@ -755,6 +757,7 @@  static struct ssca_vsie *get_ssca(struct kvm *kvm, struct vsie_page *vsie_page)
 
 		kvm->arch.vsie.sscas[kvm->arch.vsie.ssca_count] = ssca;
 		kvm->arch.vsie.ssca_count++;
+		kvm->stat.vsie_shadow_sca++;
 	} else {
 		/* reuse previously created ssca for different osca */
 		ssca = get_free_existing_ssca(kvm);
@@ -771,6 +774,7 @@  static struct ssca_vsie *get_ssca(struct kvm *kvm, struct vsie_page *vsie_page)
 
 	/* virt_to_phys(sca_o_hva) == ssca->osca */
 	radix_tree_insert(&kvm->arch.vsie.osca_to_ssca, sca_o_hva, ssca);
+	kvm->stat.vsie_shadow_sca_create++;
 	WRITE_ONCE(ssca->ssca.osca, sca_o_hpa);
 
 out:
@@ -1672,6 +1676,7 @@  static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
 		__set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
 		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
 		kvm->arch.vsie.page_count++;
+		kvm->stat.vsie_shadow_scb++;
 	} else {
 		/* reuse an existing entry that belongs to nobody */
 		while (true) {