@@ -847,6 +847,11 @@ struct kvm_vcpu_arch {
gpa_t nested_io_gpr;
/* For nested APIv2 guests*/
struct kvmhv_nestedv2_io nestedv2_io;
+
+ /* Aggregate context switch and guest run time info (in ns) */
+ u64 l1_to_l2_cs_agg;
+ u64 l2_to_l1_cs_agg;
+ u64 l2_runtime_agg;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
@@ -62,7 +62,8 @@ struct lppaca {
u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
u8 fpregs_in_use;
u8 pmcregs_in_use;
- u8 reserved8[28];
+ u8 l2_accumul_cntrs_enable; /* Enable usage of counters for KVM guest */
+ u8 reserved8[27];
__be64 wait_state_cycles; /* Wait cycles for this proc */
u8 reserved9[28];
__be16 slb_count; /* # of SLBs to maintain */
@@ -92,9 +93,13 @@ struct lppaca {
/* cacheline 4-5 */
__be32 page_ins; /* CMO Hint - # page ins by OS */
- u8 reserved12[148];
+ u8 reserved12[28];
+ volatile __be64 l1_to_l2_cs_tb;
+ volatile __be64 l2_to_l1_cs_tb;
+ volatile __be64 l2_runtime_tb;
+ u8 reserved13[96];
volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
- u8 reserved13[96];
+ u8 reserved14[96];
} ____cacheline_aligned;
#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
@@ -4108,6 +4108,37 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}
+static inline int kvmhv_get_l2_accumul(void)
+{
+ return get_lppaca()->l2_accumul_cntrs_enable;
+}
+
+static inline void kvmhv_set_l2_accumul(int val)
+{
+ get_lppaca()->l2_accumul_cntrs_enable = val;
+}
+
+static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
+{
+ struct lppaca *lp = get_lppaca();
+ u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns;
+
+ l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb));
+ l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb));
+ l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb));
+ trace_kvmppc_vcpu_exit_cs_time(vcpu, l1_to_l2_ns, l2_to_l1_ns,
+ l2_runtime_ns);
+ lp->l1_to_l2_cs_tb = 0;
+ lp->l2_to_l1_cs_tb = 0;
+ lp->l2_runtime_tb = 0;
+ kvmhv_set_l2_accumul(0);
+
+ // Maintain an aggregate of context switch times
+ vcpu->arch.l1_to_l2_cs_agg += l1_to_l2_ns;
+ vcpu->arch.l2_to_l1_cs_agg += l2_to_l1_ns;
+ vcpu->arch.l2_runtime_agg += l2_runtime_ns;
+}
+
static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
@@ -4130,6 +4161,11 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr);
accumulate_time(vcpu, &vcpu->arch.in_guest);
+
+ /* Enable the guest host context switch time tracking */
+ if (unlikely(trace_kvmppc_vcpu_exit_cs_time_enabled()))
+ kvmhv_set_l2_accumul(1);
+
rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
&trap, &i);
@@ -4156,6 +4192,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
timer_rearm_host_dec(*tb);
+ /* Record context switch and guest_run_time data */
+ if (kvmhv_get_l2_accumul())
+ do_trace_nested_cs_time(vcpu);
+
return trap;
}
@@ -491,6 +491,31 @@ TRACE_EVENT(kvmppc_run_vcpu_enter,
TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
);
+TRACE_EVENT(kvmppc_vcpu_exit_cs_time,
+ TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs,
+ u64 l2_runtime),
+
+ TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(__u64, l1_to_l2_cs_ns)
+ __field(__u64, l2_to_l1_cs_ns)
+ __field(__u64, l2_runtime_ns)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->l1_to_l2_cs_ns = l1_to_l2_cs;
+ __entry->l2_to_l1_cs_ns = l2_to_l1_cs;
+ __entry->l2_runtime_ns = l2_runtime;
+ ),
+
+ TP_printk("VCPU %d: l1_to_l2_cs_time=%llu-ns l2_to_l1_cs_time=%llu-ns l2_runtime=%llu-ns",
+ __entry->vcpu_id, __entry->l1_to_l2_cs_ns,
+ __entry->l2_to_l1_cs_ns, __entry->l2_runtime_ns)
+);
+
TRACE_EVENT(kvmppc_run_vcpu_exit,
TP_PROTO(struct kvm_vcpu *vcpu),