@@ -133,6 +133,34 @@ static void intel_pmu_update_host_fixed_ctrl(u64 new_ctrl, u8 host_idx)
wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, host_ctrl);
}
+static void intel_pmu_enable_host_counter(struct kvm_pmc *pmc)
+{
+ u8 host_idx;
+
+ if (!intel_pmc_is_assigned(pmc))
+ return;
+
+ host_idx = pmc->perf_event->hw.idx;
+ if (host_idx >= INTEL_PMC_IDX_FIXED)
+ intel_pmu_enable_host_fixed_counter(pmc);
+ else
+ intel_pmu_enable_host_gp_counter(pmc);
+}
+
+static void intel_pmu_disable_host_counter(struct kvm_pmc *pmc)
+{
+ u8 host_idx;
+
+ if (!intel_pmc_is_assigned(pmc))
+ return;
+
+ host_idx = pmc->perf_event->hw.idx;
+ if (host_idx >= INTEL_PMC_IDX_FIXED)
+ intel_pmu_disable_host_fixed_counter(pmc);
+ else
+ intel_pmu_disable_host_gp_counter(pmc);
+}
+
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
int i;
@@ -262,6 +290,57 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
return ret;
}
+static void intel_pmu_save_guest_pmc(struct kvm_pmu *pmu, u32 idx)
+{
+ struct kvm_pmc *pmc = intel_pmc_idx_to_pmc(pmu, idx);
+
+ if (!intel_pmc_is_assigned(pmc))
+ return;
+
+ rdmsrl(pmc->perf_event->hw.event_base, pmc->counter);
+ wrmsrl(pmc->perf_event->hw.event_base, 0);
+}
+
+static void intel_pmu_restore_guest_pmc(struct kvm_pmu *pmu, u32 idx)
+{
+ struct kvm_pmc *pmc = intel_pmc_idx_to_pmc(pmu, idx);
+ u8 ctrl;
+
+ if (!intel_pmc_is_assigned(pmc))
+ return;
+
+ if (pmc->idx >= INTEL_PMC_IDX_FIXED) {
+ ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
+ pmc->idx - INTEL_PMC_IDX_FIXED);
+ if (ctrl)
+ intel_pmu_enable_host_counter(pmc);
+ else
+ intel_pmu_disable_host_counter(pmc);
+ } else {
+ if (!(pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE))
+ intel_pmu_disable_host_counter(pmc);
+ else
+ intel_pmu_enable_host_counter(pmc);
+ }
+
+ wrmsrl(pmc->perf_event->hw.event_base, pmc->counter);
+}
+
+static void intel_pmc_stop_counter(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+ if (!pmc->perf_event)
+ return;
+
+ intel_pmu_disable_host_counter(pmc);
+ intel_pmu_save_guest_pmc(pmu, pmc->idx);
+ pmc_read_counter(pmc);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ pmc->hw_life_count = 0;
+}
+
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -424,17 +503,20 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
int i;
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
- struct kvm_pmc *pmc = &pmu->gp_counters[i];
-
- pmc_stop_counter(pmc);
+ pmc = &pmu->gp_counters[i];
+ intel_pmc_stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
- for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
- pmc_stop_counter(&pmu->fixed_counters[i]);
+ for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
+ pmc = &pmu->fixed_counters[i];
+ intel_pmc_stop_counter(pmc);
+ pmc->counter = 0;
+ }
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
We may not assume the guest fixed vPMC would be assigned by an host fixed counter and vice versa. This issue (the host hw->idx has a different type of guest hw->idx) is named as the cross-mapping and it needs to keep semantics for mask select and enable ctrl. Signed-off-by: Like Xu <like.xu@linux.intel.com> --- arch/x86/kvm/vmx/pmu_intel.c | 92 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 87 insertions(+), 5 deletions(-)