Message ID | 20220905123946.95223-4-likexu@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/svm/pmu: Add AMD Guest PerfMonV2 support | expand |
On Mon, Sep 5, 2022 at 5:44 AM Like Xu <like.xu.linux@gmail.com> wrote: > > From: Like Xu <likexu@tencent.com> > > If AMD Performance Monitoring Version 2 (PerfMonV2) is detected > by the guest, it can use a new scheme to manage the Core PMCs using > the new global control and status registers. > > In addition to benefiting from the PerfMonV2 functionality in the same > way as the host (higher precision), the guest also can reduce the number > of vm-exits by lowering the total number of MSRs accesses. > > In terms of implementation details, amd_is_valid_msr() is resurrected > since three newly added MSRs could not be mapped to one vPMC. > The possibility of emulating PerfMonV2 on the mainframe has also > been eliminated for reasons of precision. > > Co-developed-by: Sandipan Das <sandipan.das@amd.com> > Signed-off-by: Sandipan Das <sandipan.das@amd.com> > Signed-off-by: Like Xu <likexu@tencent.com> > --- > arch/x86/kvm/pmu.c | 6 +++++ > arch/x86/kvm/svm/pmu.c | 50 +++++++++++++++++++++++++++++++++--------- > arch/x86/kvm/x86.c | 11 ++++++++++ > 3 files changed, 57 insertions(+), 10 deletions(-) > > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 7002e1b74108..56b4f898a246 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > > switch (msr) { > case MSR_CORE_PERF_GLOBAL_STATUS: > + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > msr_info->data = pmu->global_status; > return 0; > case MSR_CORE_PERF_GLOBAL_CTRL: > + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > msr_info->data = pmu->global_ctrl; > return 0; > case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > msr_info->data = 0; > return 0; > default: > @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > > switch (msr) { > case MSR_CORE_PERF_GLOBAL_STATUS: > + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > if (msr_info->host_initiated) { > pmu->global_status = data; > return 0; > } > break; /* RO MSR */ > case MSR_CORE_PERF_GLOBAL_CTRL: > + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > if (pmu->global_ctrl == data) > return 0; > if (kvm_valid_perf_global_ctrl(pmu, data)) { > @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > } > break; > case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > if (!(data & pmu->global_ovf_ctrl_mask)) { > if (!msr_info->host_initiated) > pmu->global_status &= ~data; > diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c > index 3a20972e9f1a..4c7d408e3caa 100644 > --- a/arch/x86/kvm/svm/pmu.c > +++ b/arch/x86/kvm/svm/pmu.c > @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, > return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); > } > > -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > -{ > - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ > - return false; > -} > - > static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) > { > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) > return pmc; > } > > +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > +{ > + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > + > + switch (msr) { > + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: > + return pmu->version > 0; > + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: > + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); > + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > + return pmu->version > 1; > + default: > + if (msr > MSR_F15H_PERF_CTR5 && > + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC) > + return pmu->version > 1; Should this be bounded by guest CPUID.80000022H:EBX[NumCorePmc] (unless host-initiated)? > + break; > + } > + > + return amd_msr_idx_to_pmc(vcpu, msr); > +} > + > static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > { > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > @@ -162,20 +179,31 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > static void amd_pmu_refresh(struct kvm_vcpu *vcpu) > { > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > + struct kvm_cpuid_entry2 *entry; > + union cpuid_0x80000022_ebx ebx; > > - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) > + pmu->version = 1; > + entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); > + if (kvm_pmu_cap.version > 1 && entry && (entry->eax & BIT(0))) { > + pmu->version = 2; > + ebx.full = entry->ebx; > + pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, > + (unsigned int)kvm_pmu_cap.num_counters_gp, > + (unsigned int)KVM_AMD_PMC_MAX_GENERIC); > + pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); > + pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; > + } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { > pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; The logic above doesn't seem quite right, since guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE) promises 6 PMCs, regardless of what CPUID.80000022 says.
On 6/9/2022 2:00 am, Jim Mattson wrote: > On Mon, Sep 5, 2022 at 5:44 AM Like Xu <like.xu.linux@gmail.com> wrote: >> >> From: Like Xu <likexu@tencent.com> >> >> If AMD Performance Monitoring Version 2 (PerfMonV2) is detected >> by the guest, it can use a new scheme to manage the Core PMCs using >> the new global control and status registers. >> >> In addition to benefiting from the PerfMonV2 functionality in the same >> way as the host (higher precision), the guest also can reduce the number >> of vm-exits by lowering the total number of MSRs accesses. >> >> In terms of implementation details, amd_is_valid_msr() is resurrected >> since three newly added MSRs could not be mapped to one vPMC. >> The possibility of emulating PerfMonV2 on the mainframe has also >> been eliminated for reasons of precision. >> >> Co-developed-by: Sandipan Das <sandipan.das@amd.com> >> Signed-off-by: Sandipan Das <sandipan.das@amd.com> >> Signed-off-by: Like Xu <likexu@tencent.com> >> --- >> arch/x86/kvm/pmu.c | 6 +++++ >> arch/x86/kvm/svm/pmu.c | 50 +++++++++++++++++++++++++++++++++--------- >> arch/x86/kvm/x86.c | 11 ++++++++++ >> 3 files changed, 57 insertions(+), 10 deletions(-) >> >> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c >> index 7002e1b74108..56b4f898a246 100644 >> --- a/arch/x86/kvm/pmu.c >> +++ b/arch/x86/kvm/pmu.c >> @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >> >> switch (msr) { >> case MSR_CORE_PERF_GLOBAL_STATUS: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: >> msr_info->data = pmu->global_status; >> return 0; >> case MSR_CORE_PERF_GLOBAL_CTRL: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: >> msr_info->data = pmu->global_ctrl; >> return 0; >> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: >> msr_info->data = 0; >> return 0; >> default: >> @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >> >> switch (msr) { >> case MSR_CORE_PERF_GLOBAL_STATUS: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: >> if (msr_info->host_initiated) { >> pmu->global_status = data; >> return 0; >> } >> break; /* RO MSR */ >> case MSR_CORE_PERF_GLOBAL_CTRL: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: >> if (pmu->global_ctrl == data) >> return 0; >> if (kvm_valid_perf_global_ctrl(pmu, data)) { >> @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >> } >> break; >> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: >> if (!(data & pmu->global_ovf_ctrl_mask)) { >> if (!msr_info->host_initiated) >> pmu->global_status &= ~data; >> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c >> index 3a20972e9f1a..4c7d408e3caa 100644 >> --- a/arch/x86/kvm/svm/pmu.c >> +++ b/arch/x86/kvm/svm/pmu.c >> @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, >> return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); >> } >> >> -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) >> -{ >> - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ >> - return false; >> -} >> - >> static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) >> { >> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >> @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) >> return pmc; >> } >> >> +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) >> +{ >> + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >> + >> + switch (msr) { >> + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: >> + return pmu->version > 0; >> + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: >> + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: >> + return pmu->version > 1; >> + default: >> + if (msr > MSR_F15H_PERF_CTR5 && >> + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC) >> + return pmu->version > 1; > > Should this be bounded by guest CPUID.80000022H:EBX[NumCorePmc] > (unless host-initiated)? Indeed, how about: default: if (msr > MSR_F15H_PERF_CTR5 && msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters) return pmu->version > 1; and for host-initiated: #define MSR_F15H_PERF_MSR_MAX \ (MSR_F15H_PERF_CTR0 + 2 * (KVM_AMD_PMC_MAX_GENERIC - 1)) kvm_{set|get}_msr_common() case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_MSR_MAX: if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); ? > >> + break; >> + } >> + >> + return amd_msr_idx_to_pmc(vcpu, msr); >> +} >> + >> static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >> { >> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >> @@ -162,20 +179,31 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >> static void amd_pmu_refresh(struct kvm_vcpu *vcpu) >> { >> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >> + struct kvm_cpuid_entry2 *entry; >> + union cpuid_0x80000022_ebx ebx; >> >> - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) >> + pmu->version = 1; >> + entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); >> + if (kvm_pmu_cap.version > 1 && entry && (entry->eax & BIT(0))) { >> + pmu->version = 2; >> + ebx.full = entry->ebx; >> + pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, >> + (unsigned int)kvm_pmu_cap.num_counters_gp, >> + (unsigned int)KVM_AMD_PMC_MAX_GENERIC); >> + pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); >> + pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; >> + } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { >> pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; > > The logic above doesn't seem quite right, since guest_cpuid_has(vcpu, > X86_FEATURE_PERFCTR_CORE) promises 6 PMCs, regardless of what > CPUID.80000022 says. I would have expected the appearance of CPUID.80000022 to override PERFCTR_CORE, now I don't think it's a good idea as you do, so how about: amd_pmu_refresh(): bool perfctr_core = guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); pmu->version = 1; if (kvm_pmu_cap.version > 1) entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); if (!perfctr_core) pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; if (entry && (entry->eax & BIT(0))) { pmu->version = 2; ebx.full = entry->ebx; pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, (unsigned int)kvm_pmu_cap.num_counters_gp, (unsigned int)KVM_AMD_PMC_MAX_GENERIC); } /* PERFCTR_CORE promises 6 PMCs, regardless of CPUID.80000022 */ if (perfctr_core) { pmu->nr_arch_gp_counters = max(pmu->nr_arch_gp_counters, AMD64_NUM_COUNTERS_CORE); } if (pmu->version > 1) { pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; } ?
On Tue, Sep 6, 2022 at 5:45 AM Like Xu <like.xu.linux@gmail.com> wrote: > > On 6/9/2022 2:00 am, Jim Mattson wrote: > > On Mon, Sep 5, 2022 at 5:44 AM Like Xu <like.xu.linux@gmail.com> wrote: > >> > >> From: Like Xu <likexu@tencent.com> > >> > >> If AMD Performance Monitoring Version 2 (PerfMonV2) is detected > >> by the guest, it can use a new scheme to manage the Core PMCs using > >> the new global control and status registers. > >> > >> In addition to benefiting from the PerfMonV2 functionality in the same > >> way as the host (higher precision), the guest also can reduce the number > >> of vm-exits by lowering the total number of MSRs accesses. > >> > >> In terms of implementation details, amd_is_valid_msr() is resurrected > >> since three newly added MSRs could not be mapped to one vPMC. > >> The possibility of emulating PerfMonV2 on the mainframe has also > >> been eliminated for reasons of precision. > >> > >> Co-developed-by: Sandipan Das <sandipan.das@amd.com> > >> Signed-off-by: Sandipan Das <sandipan.das@amd.com> > >> Signed-off-by: Like Xu <likexu@tencent.com> > >> --- > >> arch/x86/kvm/pmu.c | 6 +++++ > >> arch/x86/kvm/svm/pmu.c | 50 +++++++++++++++++++++++++++++++++--------- > >> arch/x86/kvm/x86.c | 11 ++++++++++ > >> 3 files changed, 57 insertions(+), 10 deletions(-) > >> > >> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > >> index 7002e1b74108..56b4f898a246 100644 > >> --- a/arch/x86/kvm/pmu.c > >> +++ b/arch/x86/kvm/pmu.c > >> @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >> > >> switch (msr) { > >> case MSR_CORE_PERF_GLOBAL_STATUS: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > >> msr_info->data = pmu->global_status; > >> return 0; > >> case MSR_CORE_PERF_GLOBAL_CTRL: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > >> msr_info->data = pmu->global_ctrl; > >> return 0; > >> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > >> msr_info->data = 0; > >> return 0; > >> default: > >> @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >> > >> switch (msr) { > >> case MSR_CORE_PERF_GLOBAL_STATUS: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > >> if (msr_info->host_initiated) { > >> pmu->global_status = data; > >> return 0; > >> } > >> break; /* RO MSR */ > >> case MSR_CORE_PERF_GLOBAL_CTRL: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > >> if (pmu->global_ctrl == data) > >> return 0; > >> if (kvm_valid_perf_global_ctrl(pmu, data)) { > >> @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >> } > >> break; > >> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > >> if (!(data & pmu->global_ovf_ctrl_mask)) { > >> if (!msr_info->host_initiated) > >> pmu->global_status &= ~data; > >> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c > >> index 3a20972e9f1a..4c7d408e3caa 100644 > >> --- a/arch/x86/kvm/svm/pmu.c > >> +++ b/arch/x86/kvm/svm/pmu.c > >> @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, > >> return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); > >> } > >> > >> -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > >> -{ > >> - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ > >> - return false; > >> -} > >> - > >> static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) > >> { > >> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > >> @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) > >> return pmc; > >> } > >> > >> +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > >> +{ > >> + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > >> + > >> + switch (msr) { > >> + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: > >> + return pmu->version > 0; > >> + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: > >> + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > >> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > >> + return pmu->version > 1; > >> + default: > >> + if (msr > MSR_F15H_PERF_CTR5 && > >> + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC) > >> + return pmu->version > 1; > > > > Should this be bounded by guest CPUID.80000022H:EBX[NumCorePmc] > > (unless host-initiated)? > > Indeed, how about: > > default: > if (msr > MSR_F15H_PERF_CTR5 && > msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters) > return pmu->version > 1; > > and for host-initiated: > > #define MSR_F15H_PERF_MSR_MAX \ > (MSR_F15H_PERF_CTR0 + 2 * (KVM_AMD_PMC_MAX_GENERIC - 1)) I think there may be an off-by-one error here. > > kvm_{set|get}_msr_common() > case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_MSR_MAX: > if (kvm_pmu_is_valid_msr(vcpu, msr)) > return kvm_pmu_set_msr(vcpu, msr_info); > ? > > > > >> + break; > >> + } > >> + > >> + return amd_msr_idx_to_pmc(vcpu, msr); > >> +} > >> + > >> static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >> { > >> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > >> @@ -162,20 +179,31 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >> static void amd_pmu_refresh(struct kvm_vcpu *vcpu) > >> { > >> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > >> + struct kvm_cpuid_entry2 *entry; > >> + union cpuid_0x80000022_ebx ebx; > >> > >> - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) > >> + pmu->version = 1; > >> + entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); > >> + if (kvm_pmu_cap.version > 1 && entry && (entry->eax & BIT(0))) { > >> + pmu->version = 2; > >> + ebx.full = entry->ebx; > >> + pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, > >> + (unsigned int)kvm_pmu_cap.num_counters_gp, > >> + (unsigned int)KVM_AMD_PMC_MAX_GENERIC); > >> + pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); > >> + pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; > >> + } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { > >> pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; > > > > The logic above doesn't seem quite right, since guest_cpuid_has(vcpu, > > X86_FEATURE_PERFCTR_CORE) promises 6 PMCs, regardless of what > > CPUID.80000022 says. > > I would have expected the appearance of CPUID.80000022 to override PERFCTR_CORE, > now I don't think it's a good idea as you do, so how about: > > amd_pmu_refresh(): > > bool perfctr_core = guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); > > pmu->version = 1; > if (kvm_pmu_cap.version > 1) > entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); > > if (!perfctr_core) > pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; > if (entry && (entry->eax & BIT(0))) { > pmu->version = 2; > ebx.full = entry->ebx; > pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, > (unsigned int)kvm_pmu_cap.num_counters_gp, > (unsigned int)KVM_AMD_PMC_MAX_GENERIC); > } > /* PERFCTR_CORE promises 6 PMCs, regardless of CPUID.80000022 */ > if (perfctr_core) { > pmu->nr_arch_gp_counters = max(pmu->nr_arch_gp_counters, > AMD64_NUM_COUNTERS_CORE); > } Even if X86_FEATURE_PERFCTR_CORE is clear, all AMD CPUs promise 4 PMCs. > > if (pmu->version > 1) { > pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); > pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; > } > > ? > >
On 7/9/2022 4:19 am, Jim Mattson wrote: > On Tue, Sep 6, 2022 at 5:45 AM Like Xu <like.xu.linux@gmail.com> wrote: >> >> On 6/9/2022 2:00 am, Jim Mattson wrote: >>> On Mon, Sep 5, 2022 at 5:44 AM Like Xu <like.xu.linux@gmail.com> wrote: >>>> >>>> From: Like Xu <likexu@tencent.com> >>>> >>>> If AMD Performance Monitoring Version 2 (PerfMonV2) is detected >>>> by the guest, it can use a new scheme to manage the Core PMCs using >>>> the new global control and status registers. >>>> >>>> In addition to benefiting from the PerfMonV2 functionality in the same >>>> way as the host (higher precision), the guest also can reduce the number >>>> of vm-exits by lowering the total number of MSRs accesses. >>>> >>>> In terms of implementation details, amd_is_valid_msr() is resurrected >>>> since three newly added MSRs could not be mapped to one vPMC. >>>> The possibility of emulating PerfMonV2 on the mainframe has also >>>> been eliminated for reasons of precision. >>>> >>>> Co-developed-by: Sandipan Das <sandipan.das@amd.com> >>>> Signed-off-by: Sandipan Das <sandipan.das@amd.com> >>>> Signed-off-by: Like Xu <likexu@tencent.com> >>>> --- >>>> arch/x86/kvm/pmu.c | 6 +++++ >>>> arch/x86/kvm/svm/pmu.c | 50 +++++++++++++++++++++++++++++++++--------- >>>> arch/x86/kvm/x86.c | 11 ++++++++++ >>>> 3 files changed, 57 insertions(+), 10 deletions(-) >>>> >>>> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c >>>> index 7002e1b74108..56b4f898a246 100644 >>>> --- a/arch/x86/kvm/pmu.c >>>> +++ b/arch/x86/kvm/pmu.c >>>> @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >>>> >>>> switch (msr) { >>>> case MSR_CORE_PERF_GLOBAL_STATUS: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: >>>> msr_info->data = pmu->global_status; >>>> return 0; >>>> case MSR_CORE_PERF_GLOBAL_CTRL: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: >>>> msr_info->data = pmu->global_ctrl; >>>> return 0; >>>> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: >>>> msr_info->data = 0; >>>> return 0; >>>> default: >>>> @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >>>> >>>> switch (msr) { >>>> case MSR_CORE_PERF_GLOBAL_STATUS: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: >>>> if (msr_info->host_initiated) { >>>> pmu->global_status = data; >>>> return 0; >>>> } >>>> break; /* RO MSR */ >>>> case MSR_CORE_PERF_GLOBAL_CTRL: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: >>>> if (pmu->global_ctrl == data) >>>> return 0; >>>> if (kvm_valid_perf_global_ctrl(pmu, data)) { >>>> @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >>>> } >>>> break; >>>> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: >>>> if (!(data & pmu->global_ovf_ctrl_mask)) { >>>> if (!msr_info->host_initiated) >>>> pmu->global_status &= ~data; >>>> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c >>>> index 3a20972e9f1a..4c7d408e3caa 100644 >>>> --- a/arch/x86/kvm/svm/pmu.c >>>> +++ b/arch/x86/kvm/svm/pmu.c >>>> @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, >>>> return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); >>>> } >>>> >>>> -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) >>>> -{ >>>> - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ >>>> - return false; >>>> -} >>>> - >>>> static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) >>>> { >>>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >>>> @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) >>>> return pmc; >>>> } >>>> >>>> +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) >>>> +{ >>>> + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >>>> + >>>> + switch (msr) { >>>> + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: >>>> + return pmu->version > 0; >>>> + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: >>>> + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: >>>> + return pmu->version > 1; >>>> + default: >>>> + if (msr > MSR_F15H_PERF_CTR5 && >>>> + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC) >>>> + return pmu->version > 1; >>> >>> Should this be bounded by guest CPUID.80000022H:EBX[NumCorePmc] >>> (unless host-initiated)? >> >> Indeed, how about: >> >> default: >> if (msr > MSR_F15H_PERF_CTR5 && >> msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters) >> return pmu->version > 1; >> >> and for host-initiated: >> >> #define MSR_F15H_PERF_MSR_MAX \ >> (MSR_F15H_PERF_CTR0 + 2 * (KVM_AMD_PMC_MAX_GENERIC - 1)) > > I think there may be an off-by-one error here. If KVM_AMD_PMC_MAX_GENERIC is 6: #define MSR_F15H_PERF_CTL 0xc0010200 #define MSR_F15H_PERF_CTL5 (MSR_F15H_PERF_CTL + 10) #define MSR_F15H_PERF_CTR 0xc0010201 #define MSR_F15H_PERF_CTR0 MSR_F15H_PERF_CTR #define MSR_F15H_PERF_CTR5 (MSR_F15H_PERF_CTR + 10) > >> >> kvm_{set|get}_msr_common() >> case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_MSR_MAX: the original code is "case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:", in that case, MSR_F15H_PERF_MSR_MAX make sense, right ? >> if (kvm_pmu_is_valid_msr(vcpu, msr)) >> return kvm_pmu_set_msr(vcpu, msr_info); >> ? >> >>> >>>> + break; >>>> + } >>>> + >>>> + return amd_msr_idx_to_pmc(vcpu, msr); >>>> +} >>>> + >>>> static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >>>> { >>>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >>>> @@ -162,20 +179,31 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) >>>> static void amd_pmu_refresh(struct kvm_vcpu *vcpu) >>>> { >>>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); >>>> + struct kvm_cpuid_entry2 *entry; >>>> + union cpuid_0x80000022_ebx ebx; >>>> >>>> - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) >>>> + pmu->version = 1; >>>> + entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); >>>> + if (kvm_pmu_cap.version > 1 && entry && (entry->eax & BIT(0))) { >>>> + pmu->version = 2; >>>> + ebx.full = entry->ebx; >>>> + pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, >>>> + (unsigned int)kvm_pmu_cap.num_counters_gp, >>>> + (unsigned int)KVM_AMD_PMC_MAX_GENERIC); >>>> + pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); >>>> + pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; >>>> + } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { >>>> pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; >>> >>> The logic above doesn't seem quite right, since guest_cpuid_has(vcpu, >>> X86_FEATURE_PERFCTR_CORE) promises 6 PMCs, regardless of what >>> CPUID.80000022 says. >> >> I would have expected the appearance of CPUID.80000022 to override PERFCTR_CORE, >> now I don't think it's a good idea as you do, so how about: >> >> amd_pmu_refresh(): >> >> bool perfctr_core = guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); >> >> pmu->version = 1; >> if (kvm_pmu_cap.version > 1) >> entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); >> >> if (!perfctr_core) >> pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; >> if (entry && (entry->eax & BIT(0))) { >> pmu->version = 2; >> ebx.full = entry->ebx; >> pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, >> (unsigned int)kvm_pmu_cap.num_counters_gp, >> (unsigned int)KVM_AMD_PMC_MAX_GENERIC); >> } >> /* PERFCTR_CORE promises 6 PMCs, regardless of CPUID.80000022 */ >> if (perfctr_core) { >> pmu->nr_arch_gp_counters = max(pmu->nr_arch_gp_counters, >> AMD64_NUM_COUNTERS_CORE); >> } > > Even if X86_FEATURE_PERFCTR_CORE is clear, all AMD CPUs promise 4 PMCs. > >> >> if (pmu->version > 1) { >> pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); >> pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; >> } >> >> ? >> >>
On Tue, Sep 6, 2022 at 8:50 PM Like Xu <like.xu.linux@gmail.com> wrote: > > On 7/9/2022 4:19 am, Jim Mattson wrote: > > On Tue, Sep 6, 2022 at 5:45 AM Like Xu <like.xu.linux@gmail.com> wrote: > >> > >> On 6/9/2022 2:00 am, Jim Mattson wrote: > >>> On Mon, Sep 5, 2022 at 5:44 AM Like Xu <like.xu.linux@gmail.com> wrote: > >>>> > >>>> From: Like Xu <likexu@tencent.com> > >>>> > >>>> If AMD Performance Monitoring Version 2 (PerfMonV2) is detected > >>>> by the guest, it can use a new scheme to manage the Core PMCs using > >>>> the new global control and status registers. > >>>> > >>>> In addition to benefiting from the PerfMonV2 functionality in the same > >>>> way as the host (higher precision), the guest also can reduce the number > >>>> of vm-exits by lowering the total number of MSRs accesses. > >>>> > >>>> In terms of implementation details, amd_is_valid_msr() is resurrected > >>>> since three newly added MSRs could not be mapped to one vPMC. > >>>> The possibility of emulating PerfMonV2 on the mainframe has also > >>>> been eliminated for reasons of precision. > >>>> > >>>> Co-developed-by: Sandipan Das <sandipan.das@amd.com> > >>>> Signed-off-by: Sandipan Das <sandipan.das@amd.com> > >>>> Signed-off-by: Like Xu <likexu@tencent.com> > >>>> --- > >>>> arch/x86/kvm/pmu.c | 6 +++++ > >>>> arch/x86/kvm/svm/pmu.c | 50 +++++++++++++++++++++++++++++++++--------- > >>>> arch/x86/kvm/x86.c | 11 ++++++++++ > >>>> 3 files changed, 57 insertions(+), 10 deletions(-) > >>>> > >>>> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > >>>> index 7002e1b74108..56b4f898a246 100644 > >>>> --- a/arch/x86/kvm/pmu.c > >>>> +++ b/arch/x86/kvm/pmu.c > >>>> @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >>>> > >>>> switch (msr) { > >>>> case MSR_CORE_PERF_GLOBAL_STATUS: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > >>>> msr_info->data = pmu->global_status; > >>>> return 0; > >>>> case MSR_CORE_PERF_GLOBAL_CTRL: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > >>>> msr_info->data = pmu->global_ctrl; > >>>> return 0; > >>>> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > >>>> msr_info->data = 0; > >>>> return 0; > >>>> default: > >>>> @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >>>> > >>>> switch (msr) { > >>>> case MSR_CORE_PERF_GLOBAL_STATUS: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > >>>> if (msr_info->host_initiated) { > >>>> pmu->global_status = data; > >>>> return 0; > >>>> } > >>>> break; /* RO MSR */ > >>>> case MSR_CORE_PERF_GLOBAL_CTRL: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > >>>> if (pmu->global_ctrl == data) > >>>> return 0; > >>>> if (kvm_valid_perf_global_ctrl(pmu, data)) { > >>>> @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > >>>> } > >>>> break; > >>>> case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > >>>> if (!(data & pmu->global_ovf_ctrl_mask)) { > >>>> if (!msr_info->host_initiated) > >>>> pmu->global_status &= ~data; > >>>> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c > >>>> index 3a20972e9f1a..4c7d408e3caa 100644 > >>>> --- a/arch/x86/kvm/svm/pmu.c > >>>> +++ b/arch/x86/kvm/svm/pmu.c > >>>> @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, > >>>> return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); > >>>> } > >>>> > >>>> -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > >>>> -{ > >>>> - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ > >>>> - return false; > >>>> -} > >>>> - > >>>> static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) > >>>> { > >>>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > >>>> @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) > >>>> return pmc; > >>>> } > >>>> > >>>> +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > >>>> +{ > >>>> + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > >>>> + > >>>> + switch (msr) { > >>>> + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: > >>>> + return pmu->version > 0; > >>>> + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: > >>>> + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: > >>>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: > >>>> + return pmu->version > 1; > >>>> + default: > >>>> + if (msr > MSR_F15H_PERF_CTR5 && > >>>> + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC) > >>>> + return pmu->version > 1; > >>> > >>> Should this be bounded by guest CPUID.80000022H:EBX[NumCorePmc] > >>> (unless host-initiated)? > >> > >> Indeed, how about: > >> > >> default: > >> if (msr > MSR_F15H_PERF_CTR5 && > >> msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters) > >> return pmu->version > 1; > >> > >> and for host-initiated: > >> > >> #define MSR_F15H_PERF_MSR_MAX \ > >> (MSR_F15H_PERF_CTR0 + 2 * (KVM_AMD_PMC_MAX_GENERIC - 1)) > > > > I think there may be an off-by-one error here. > > If KVM_AMD_PMC_MAX_GENERIC is 6: > > #define MSR_F15H_PERF_CTL 0xc0010200 > #define MSR_F15H_PERF_CTL5 (MSR_F15H_PERF_CTL + 10) > > #define MSR_F15H_PERF_CTR 0xc0010201 > #define MSR_F15H_PERF_CTR0 MSR_F15H_PERF_CTR > #define MSR_F15H_PERF_CTR5 (MSR_F15H_PERF_CTR + 10) > > > > >> > >> kvm_{set|get}_msr_common() > >> case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_MSR_MAX: > > the original code is "case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:", > > in that case, MSR_F15H_PERF_MSR_MAX make sense, right ? Right. I was misreading the definition.
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 7002e1b74108..56b4f898a246 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_GLOBAL_STATUS: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: msr_info->data = pmu->global_status; return 0; case MSR_CORE_PERF_GLOBAL_CTRL: + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: msr_info->data = pmu->global_ctrl; return 0; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: msr_info->data = 0; return 0; default: @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_GLOBAL_STATUS: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: if (msr_info->host_initiated) { pmu->global_status = data; return 0; } break; /* RO MSR */ case MSR_CORE_PERF_GLOBAL_CTRL: + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: if (pmu->global_ctrl == data) return 0; if (kvm_valid_perf_global_ctrl(pmu, data)) { @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: if (!(data & pmu->global_ovf_ctrl_mask)) { if (!msr_info->host_initiated) pmu->global_status &= ~data; diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 3a20972e9f1a..4c7d408e3caa 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); } -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) -{ - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ - return false; -} - static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) return pmc; } +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + + switch (msr) { + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: + return pmu->version > 0; + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE); + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: + return pmu->version > 1; + default: + if (msr > MSR_F15H_PERF_CTR5 && + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC) + return pmu->version > 1; + break; + } + + return amd_msr_idx_to_pmc(vcpu, msr); +} + static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -162,20 +179,31 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static void amd_pmu_refresh(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct kvm_cpuid_entry2 *entry; + union cpuid_0x80000022_ebx ebx; - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) + pmu->version = 1; + entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0); + if (kvm_pmu_cap.version > 1 && entry && (entry->eax & BIT(0))) { + pmu->version = 2; + ebx.full = entry->ebx; + pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc, + (unsigned int)kvm_pmu_cap.num_counters_gp, + (unsigned int)KVM_AMD_PMC_MAX_GENERIC); + pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); + pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask; + } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; - else + } else { pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; + } pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; pmu->reserved_bits = 0xfffffff000280000ull; pmu->raw_event_mask = AMD64_RAW_EVENT_MASK; - pmu->version = 1; /* not applicable to AMD; but clean them to prevent any fall out */ pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->nr_arch_fixed_counters = 0; - pmu->global_status = 0; bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); } @@ -206,6 +234,8 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) pmc_stop_counter(pmc); pmc->counter = pmc->prev_counter = pmc->eventsel = 0; } + + pmu->global_ctrl = pmu->global_status = 0; } struct kvm_pmu_ops amd_pmu_ops __initdata = { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b9738efd8425..96bb01c5eab8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1424,6 +1424,11 @@ static const u32 msrs_to_save_all[] = { MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, + + MSR_AMD64_PERF_CNTR_GLOBAL_CTL, + MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, + MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, + MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, @@ -3856,6 +3861,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_DS_AREA: case MSR_PEBS_DATA_CFG: case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); /* @@ -3959,6 +3967,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_DS_AREA: case MSR_PEBS_DATA_CFG: case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info); /*