@@ -524,6 +524,7 @@ struct kvm_pmc {
*/
u64 emulated_counter;
u64 eventsel;
+ u64 eventsel_hw;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
/*
@@ -552,6 +553,7 @@ struct kvm_pmu {
unsigned nr_arch_fixed_counters;
unsigned available_event_types;
u64 fixed_ctr_ctrl;
+ u64 fixed_ctr_ctrl_hw;
u64 fixed_ctr_ctrl_rsvd;
/*
* kvm_pmu_sync_global_ctrl_from_vmcs() must be called to update
@@ -794,11 +794,14 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
pmc->counter = 0;
pmc->emulated_counter = 0;
- if (pmc_is_gp(pmc))
+ if (pmc_is_gp(pmc)) {
pmc->eventsel = 0;
+ pmc->eventsel_hw = 0;
+ }
}
- pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
+ pmu->fixed_ctr_ctrl = pmu->fixed_ctr_ctrl_hw = 0;
+ pmu->global_ctrl = pmu->global_status = 0;
kvm_pmu_call(reset)(vcpu);
}
@@ -165,6 +165,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
data &= ~pmu->reserved_bits;
if (data != pmc->eventsel) {
pmc->eventsel = data;
+ pmc->eventsel_hw = data;
kvm_pmu_request_counter_reprogram(pmc);
}
return 0;
@@ -41,6 +41,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
int i;
pmu->fixed_ctr_ctrl = data;
+ pmu->fixed_ctr_ctrl_hw = data;
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
u8 new_ctrl = fixed_ctrl_field(data, i);
u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
@@ -403,6 +404,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data != pmc->eventsel) {
pmc->eventsel = data;
+ pmc->eventsel_hw = data;
kvm_pmu_request_counter_reprogram(pmc);
}
break;