@@ -945,6 +945,8 @@ struct kvm_vcpu_arch {
#define HOST_FEAT_HAS_TRF __kvm_single_flag(feats, BIT(2))
/* PMBLIMITR_EL1_E is set (SPE profiling buffer enabled) */
#define HOST_STATE_SPE_EN __kvm_single_flag(state, BIT(0))
+/* TRBLIMITR_EL1_E is set (TRBE trace buffer enabled) */
+#define HOST_STATE_TRBE_EN __kvm_single_flag(state, BIT(1))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
@@ -1387,6 +1389,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u64 clr);
bool kvm_set_pmuserenr(u64 val);
void kvm_set_pmblimitr(u64 pmblimitr);
+void kvm_set_trblimitr(u64 trblimitr);
#else
static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u64 clr) {}
@@ -1395,6 +1398,7 @@ static inline bool kvm_set_pmuserenr(u64 val)
return false;
}
static inline void kvm_set_pmblimitr(u64 pmblimitr) {}
+static inline void kvm_set_trblimitr(u64 trblimitr) {}
#endif
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
@@ -364,3 +364,19 @@ void kvm_set_pmblimitr(u64 pmblimitr)
host_data_clear_flag(HOST_STATE_SPE_EN);
}
EXPORT_SYMBOL_GPL(kvm_set_pmblimitr);
+
+void kvm_set_trblimitr(u64 trblimitr)
+{
+ /* Only read in nVHE */
+ if (has_vhe())
+ return;
+
+ if (kvm_arm_skip_trace_state())
+ return;
+
+ if (trblimitr & TRBLIMITR_EL1_E)
+ host_data_set_flag(HOST_STATE_TRBE_EN);
+ else
+ host_data_clear_flag(HOST_STATE_TRBE_EN);
+}
+EXPORT_SYMBOL_GPL(kvm_set_trblimitr);
@@ -18,6 +18,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <linux/vmalloc.h>
+#include <linux/kvm_host.h>
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
@@ -213,6 +214,12 @@ static inline void trbe_drain_buffer(void)
dsb(nsh);
}
+static void trbe_write_trblimitr(u64 val)
+{
+ write_sysreg_s(val, SYS_TRBLIMITR_EL1);
+ kvm_set_trblimitr(val);
+}
+
static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
{
/*
@@ -220,7 +227,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
* might be required for fetching the buffer limits.
*/
trblimitr |= TRBLIMITR_EL1_E;
- write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ trbe_write_trblimitr(trblimitr);
/* Synchronize the TRBE enable event */
isb();
@@ -238,7 +245,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
* might be required for fetching the buffer limits.
*/
trblimitr &= ~TRBLIMITR_EL1_E;
- write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ trbe_write_trblimitr(trblimitr);
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
@@ -253,8 +260,10 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
+ preempt_disable();
trbe_drain_and_disable_local(cpudata);
- write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ trbe_write_trblimitr(0);
+ preempt_enable();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
write_sysreg_s(0, SYS_TRBSR_EL1);
Currently in nVHE, KVM has to check if TRBE is enabled on every guest switch even if it was never used. Because it's a debug feature and is more likely to not be used than used, give KVM the TRBE buffer status to allow a much simpler and faster do-nothing path in the hyp. This is always called with preemption disabled except for probe/hotplug which gets wrapped with preempt_disable(). Signed-off-by: James Clark <james.clark@linaro.org> --- arch/arm64/include/asm/kvm_host.h | 4 ++++ arch/arm64/kvm/debug.c | 16 ++++++++++++++++ drivers/hwtracing/coresight/coresight-trbe.c | 15 ++++++++++++--- 3 files changed, 32 insertions(+), 3 deletions(-)