@@ -87,6 +87,11 @@ struct kvm_vcpu_stat {
u64 csr_exit_kernel;
u64 signal_exits;
u64 exits;
+ u64 instr_illegal_exits;
+ u64 load_misaligned_exits;
+ u64 store_misaligned_exits;
+ u64 load_access_exits;
+ u64 store_access_exits;
};
struct kvm_arch_memory_slot {
@@ -34,7 +34,12 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, csr_exit_user),
STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, exits)
+ STATS_DESC_COUNTER(VCPU, exits),
+ STATS_DESC_COUNTER(VCPU, instr_illegal_exits),
+ STATS_DESC_COUNTER(VCPU, load_misaligned_exits),
+ STATS_DESC_COUNTER(VCPU, store_misaligned_exits),
+ STATS_DESC_COUNTER(VCPU, load_access_exits),
+ STATS_DESC_COUNTER(VCPU, store_access_exits),
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -195,22 +195,27 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (trap->scause) {
case EXC_INST_ILLEGAL:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN);
+ vcpu->stat.instr_illegal_exits++;
ret = vcpu_redirect(vcpu, trap);
break;
case EXC_LOAD_MISALIGNED:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD);
+ vcpu->stat.load_misaligned_exits++;
ret = vcpu_redirect(vcpu, trap);
break;
case EXC_STORE_MISALIGNED:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE);
+ vcpu->stat.store_misaligned_exits++;
ret = vcpu_redirect(vcpu, trap);
break;
case EXC_LOAD_ACCESS:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD);
+ vcpu->stat.load_access_exits++;
ret = vcpu_redirect(vcpu, trap);
break;
case EXC_STORE_ACCESS:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE);
+ vcpu->stat.store_access_exits++;
ret = vcpu_redirect(vcpu, trap);
break;
case EXC_INST_ACCESS: