@@ -213,7 +213,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
return allow_event;
}
-void reprogram_gp_counter(struct kvm_pmc *pmc)
+static void reprogram_gp_counter(struct kvm_pmc *pmc)
{
u64 config;
u32 type = PERF_TYPE_RAW;
@@ -256,9 +256,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc)
(eventsel & HSW_IN_TX),
(eventsel & HSW_IN_TX_CHECKPOINTED));
}
-EXPORT_SYMBOL_GPL(reprogram_gp_counter);
-void reprogram_fixed_counter(struct kvm_pmc *pmc)
+static void reprogram_fixed_counter(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
@@ -286,7 +285,6 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc)
!(en_field & 0x1), /* exclude kernel */
pmi, false, false);
}
-EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
void reprogram_counter(struct kvm_pmc *pmc)
{
@@ -140,8 +140,6 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
return sample_period;
}
-void reprogram_gp_counter(struct kvm_pmc *pmc);
-void reprogram_fixed_counter(struct kvm_pmc *pmc);
void reprogram_counter(struct kvm_pmc *pmc);
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
@@ -266,7 +266,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
if (!(data & pmu->reserved_bits)) {
pmc->eventsel = data;
- reprogram_gp_counter(pmc);
+ reprogram_counter(pmc);
return 0;
}
}
@@ -52,7 +52,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
- reprogram_fixed_counter(pmc);
+ reprogram_counter(pmc);
}
}
@@ -449,7 +449,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
if (!(data & pmu->reserved_bits)) {
pmc->eventsel = data;
- reprogram_gp_counter(pmc);
+ reprogram_counter(pmc);
return 0;
}
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))