diff mbox series

[v7,08/12] KVM: arm64: Don't hit sysregs to see if SPE is enabled or not

Message ID 20241112103717.589952-9-james.clark@linaro.org (mailing list archive)
State New
Headers show
Series kvm/coresight: Support exclude guest and exclude host | expand

Commit Message

James Clark Nov. 12, 2024, 10:37 a.m. UTC
Now that the driver tells us whether SPE was used or not we can use
that. Except in pKVM where the host isn't trusted we keep the existing
feature + sysreg check.

The unconditional zeroing of pmscr_el1 if nothing is saved can also be
dropped. Zeroing it after the restore has the same effect, but only
incurs the write if it was actually enabled.

Now in the normal nVHE case, SPE saving is gated by a single flag read
on kvm_host_data.

Signed-off-by: James Clark <james.clark@linaro.org>
---
 arch/arm64/include/asm/kvm_hyp.h   |  2 +-
 arch/arm64/kvm/hyp/nvhe/debug-sr.c | 52 ++++++++++++++++++------------
 arch/arm64/kvm/hyp/nvhe/switch.c   |  2 +-
 3 files changed, 34 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c838309e4ec4..4039a42ca62a 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -105,7 +105,7 @@  void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
 void __debug_switch_to_host(struct kvm_vcpu *vcpu);
 
 #ifdef __KVM_NVHE_HYPERVISOR__
-void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+void __debug_save_host_buffers_nvhe(void);
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
 #endif
 
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 89f44a51a172..578c549af3c6 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -14,24 +14,23 @@ 
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 
-static void __debug_save_spe(u64 *pmscr_el1)
+static bool __debug_spe_enabled(void)
 {
-	u64 reg;
-
-	/* Clear pmscr in case of early return */
-	*pmscr_el1 = 0;
-
 	/*
-	 * At this point, we know that this CPU implements
-	 * SPE and is available to the host.
-	 * Check if the host is actually using it ?
+	 * Check if the host is actually using SPE. In pKVM read the state,
+	 * otherwise just trust that the host told us it was being used.
 	 */
-	reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
-	if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT)))
-		return;
+	if (unlikely(is_protected_kvm_enabled()))
+		return host_data_get_flag(HOST_FEAT_HAS_SPE) &&
+		       (read_sysreg_s(SYS_PMBLIMITR_EL1) & PMBLIMITR_EL1_E);
+	else
+		return host_data_get_flag(HOST_STATE_SPE_EN);
+}
 
-	/* Yes; save the control register and disable data generation */
-	*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
+static void __debug_save_spe(void)
+{
+	/* Save the control register and disable data generation */
+	*host_data_ptr(host_debug_state.pmscr_el1) = read_sysreg_el1(SYS_PMSCR);
 	write_sysreg_el1(0, SYS_PMSCR);
 	isb();
 
@@ -39,8 +38,14 @@  static void __debug_save_spe(u64 *pmscr_el1)
 	psb_csync();
 }
 
-static void __debug_restore_spe(u64 pmscr_el1)
+static void __debug_restore_spe(void)
 {
+	u64 pmscr_el1 = *host_data_ptr(host_debug_state.pmscr_el1);
+
+	/*
+	 * PMSCR was set to 0 to disable so if it's already 0, no restore is
+	 * necessary.
+	 */
 	if (!pmscr_el1)
 		return;
 
@@ -49,6 +54,13 @@  static void __debug_restore_spe(u64 pmscr_el1)
 
 	/* Re-enable data generation */
 	write_sysreg_el1(pmscr_el1, SYS_PMSCR);
+
+	/*
+	 * Disable future restores until a non zero value is saved again. Since
+	 * this is called unconditionally on exit, future register writes are
+	 * skipped until they are needed again.
+	 */
+	*host_data_ptr(host_debug_state.pmscr_el1) = 0;
 }
 
 static void __debug_save_trace(u64 *trfcr_el1)
@@ -79,11 +91,12 @@  static void __debug_restore_trace(u64 trfcr_el1)
 	write_sysreg_el1(trfcr_el1, SYS_TRFCR);
 }
 
-void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+void __debug_save_host_buffers_nvhe(void)
 {
 	/* Disable and flush SPE data generation */
-	if (host_data_get_flag(HOST_FEAT_HAS_SPE))
-		__debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
+	if (__debug_spe_enabled())
+		__debug_save_spe();
+
 	/* Disable and flush Self-Hosted Trace generation */
 	if (host_data_get_flag(HOST_FEAT_HAS_TRBE))
 		__debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1));
@@ -96,8 +109,7 @@  void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
 
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
 {
-	if (host_data_get_flag(HOST_FEAT_HAS_SPE))
-		__debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
+	__debug_restore_spe();
 	if (host_data_get_flag(HOST_FEAT_HAS_TRBE))
 		__debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1));
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index cc69106734ca..edd657797463 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -300,7 +300,7 @@  int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
 	 * before we load guest Stage1.
 	 */
-	__debug_save_host_buffers_nvhe(vcpu);
+	__debug_save_host_buffers_nvhe();
 
 	/*
 	 * We're about to restore some new MMU state. Make sure