@@ -522,6 +522,8 @@ struct kvm_pmc {
*/
u64 emulated_counter;
u64 eventsel;
+ u64 msr_counter;
+ u64 msr_eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
/*
@@ -177,6 +177,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
union cpuid_0x80000022_ebx ebx;
+ int i;
pmu->version = 1;
if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) {
@@ -210,6 +211,18 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->nr_arch_fixed_counters = 0;
bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
+
+ if (pmu->version > 1 || guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ pmu->gp_counters[i].msr_eventsel = MSR_F15H_PERF_CTL0 + 2 * i;
+ pmu->gp_counters[i].msr_counter = MSR_F15H_PERF_CTR0 + 2 * i;
+ }
+ } else {
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ pmu->gp_counters[i].msr_eventsel = MSR_K7_EVNTSEL0 + i;
+ pmu->gp_counters[i].msr_counter = MSR_K7_PERFCTR0 + i;
+ }
+ }
}
static void amd_pmu_init(struct kvm_vcpu *vcpu)
@@ -562,6 +562,19 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
~((1ull << pmu->nr_arch_gp_counters) - 1);
}
}
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ pmu->gp_counters[i].msr_eventsel = MSR_P6_EVNTSEL0 + i;
+ if (fw_writes_is_enabled(vcpu))
+ pmu->gp_counters[i].msr_counter = MSR_IA32_PMC0 + i;
+ else
+ pmu->gp_counters[i].msr_counter = MSR_IA32_PERFCTR0 + i;
+ }
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+ pmu->fixed_counters[i].msr_eventsel = MSR_CORE_PERF_FIXED_CTR_CTRL;
+ pmu->fixed_counters[i].msr_counter = MSR_CORE_PERF_FIXED_CTR0 + i;
+ }
}
static void intel_pmu_init(struct kvm_vcpu *vcpu)