@@ -128,6 +128,9 @@ struct kvm_vcpu_stat {
u32 ld_slow;
u32 st_slow;
#endif
+ u32 pthru_all;
+ u32 pthru_host;
+ u32 pthru_bad_aff;
};
enum kvm_exit_types {
@@ -65,6 +65,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "ld_slow", VCPU_STAT(ld_slow) },
{ "st", VCPU_STAT(st) },
{ "st_slow", VCPU_STAT(st_slow) },
+ { "pthru_all", VCPU_STAT(pthru_all) },
+ { "pthru_host", VCPU_STAT(pthru_host) },
+ { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
{ NULL }
};
@@ -696,6 +696,7 @@ static struct kvmppc_irq_map *get_irqmap_gsi(
unsigned long irq_map_err;
/*
+ * Count affinity for passthrough IRQs.
* Change affinity to CPU running the target VCPU.
*/
static void ics_set_affinity_passthru(struct ics_irq_state *state,
@@ -708,17 +709,23 @@ static void ics_set_affinity_passthru(struct ics_irq_state *state,
s16 intr_cpu;
u32 pcpu;
+ vcpu->stat.pthru_all++;
+
intr_cpu = state->intr_cpu;
if (intr_cpu == -1)
return;
+ vcpu->stat.pthru_host++;
+
state->intr_cpu = -1;
pcpu = cpu_first_thread_sibling(raw_smp_processor_id());
if (intr_cpu == pcpu)
return;
+ vcpu->stat.pthru_bad_aff++;
+
pimap = kvmppc_get_passthru_irqmap(vcpu);
if (likely(pimap)) {
irq_map = get_irqmap_gsi(pimap, irq);
Add VCPU stat counters to track affinity for passthrough interrupts. pthru_all: Counts all passthrough interrupts whose IRQ mappings have been cached in the kvmppc_passthru_irq_map cache. pthru_host: Counts all cached passthrough interrupts that were injected from the host through kvm_set_irq. pthru_bad_aff: Counts how many cached passthrough interrupts have bad affinity (receiving CPU is not running VCPU that is the target of the virtual interrupt in the guest). Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com> --- arch/powerpc/include/asm/kvm_host.h | 3 +++ arch/powerpc/kvm/book3s.c | 3 +++ arch/powerpc/kvm/book3s_hv_rm_xics.c | 7 +++++++ 3 files changed, 13 insertions(+)