@@ -77,6 +77,16 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
return container_of(vcpu_arch, struct kvm_vcpu, arch);
}
+static u32 counter_index_to_reg(u64 idx)
+{
+ return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
+}
+
+static u32 counter_index_to_evtreg(u64 idx)
+{
+ return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
+}
+
/**
* kvm_pmu_get_counter_value - get PMU counter value
* @vcpu: The vcpu pointer
@@ -91,8 +101,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
- reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
+ reg = counter_index_to_reg(select_idx);
counter = __vcpu_sys_reg(vcpu, reg);
/*
@@ -122,8 +131,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
if (!kvm_vcpu_has_pmu(vcpu))
return;
- reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
+ reg = counter_index_to_reg(select_idx);
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
/* Recreate the perf event to reflect the updated sample_period */
@@ -158,10 +166,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
val = kvm_pmu_get_counter_value(vcpu, pmc->idx);
- if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- reg = PMCCNTR_EL0;
- else
- reg = PMEVCNTR0_EL0 + pmc->idx;
+ reg = counter_index_to_reg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = val;
@@ -406,16 +411,16 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
u64 type, reg;
/* Filter on event type */
- type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+ type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
type &= kvm_pmu_event_mask(vcpu->kvm);
if (type != event)
continue;
/* Increment this counter */
- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
if (!kvm_pmu_idx_is_64bit(vcpu, i))
reg = lower_32_bits(reg);
- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+ __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
/* No overflow? move on */
if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg))
@@ -551,8 +556,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
struct perf_event_attr attr;
u64 eventsel, counter, reg, data;
- reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
+ reg = counter_index_to_evtreg(select_idx);
data = __vcpu_sys_reg(vcpu, reg);
kvm_pmu_stop_counter(vcpu, pmc);
@@ -634,8 +638,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm);
- reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
+ reg = counter_index_to_evtreg(select_idx);
__vcpu_sys_reg(vcpu, reg) = data & mask;