diff mbox series

KVM: x86/pmu: Update pmu->pebs_enable_mask with actual counter_mask

Message ID 20220526085723.91292-1-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: Update pmu->pebs_enable_mask with actual counter_mask | expand

Commit Message

Like Xu May 26, 2022, 8:57 a.m. UTC
From: Like Xu <likexu@tencent.com>

The blamed commit is posted before the PEBS merge in, but is applied after
the latter is merged in. Fix dependency of pebs_enable_mask on
a new reusable counter_mask instead of zero-initialized global_ctrl.

Fixes: 94e05293f839 ("KVM: x86/pmu: Don't overwrite the pmu->global_ctrl when refreshing")
Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini May 26, 2022, 10:16 a.m. UTC | #1
On 5/26/22 10:57, Like Xu wrote:
> Subject:
> [PATCH] KVM: x86/pmu: Update pmu->pebs_enable_mask with actual counter_mask
> From:
> Like Xu <like.xu.linux@gmail.com>
> Date:
> 5/26/22, 10:57
> 
> To:
> Paolo Bonzini <pbonzini@redhat.com>
> CC:
> Yanfei Xu <yanfei.xu@intel.com>, Sean Christopherson 
> <seanjc@google.com>, Vitaly Kuznetsov <vkuznets@redhat.com>, Wanpeng Li 
> <wanpengli@tencent.com>, Jim Mattson <jmattson@google.com>, Joerg Roedel 
> <joro@8bytes.org>, kvm@vger.kernel.org, linux-kernel@vger.kernel.org
> 
> 
> From: Like Xu<likexu@tencent.com>
> 
> The blamed commit is posted before the PEBS merge in, but is applied after
> the latter is merged in. Fix dependency of pebs_enable_mask on
> a new reusable counter_mask instead of zero-initialized global_ctrl.
> 
> Fixes: 94e05293f839 ("KVM: x86/pmu: Don't overwrite the pmu->global_ctrl when refreshing")
> Signed-off-by: Like Xu<likexu@tencent.com>
> ---
>   arch/x86/kvm/vmx/pmu_intel.c | 6 ++++--
>   1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index ddf837130d1f..72bbcb3f9f8a 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -621,6 +621,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   	struct kvm_cpuid_entry2 *entry;
>   	union cpuid10_eax eax;
>   	union cpuid10_edx edx;
> +	u64 counter_mask;
>   	int i;
>   
>   	pmu->nr_arch_gp_counters = 0;
> @@ -672,8 +673,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   
>   	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
>   		pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
> -	pmu->global_ctrl_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
> +	counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
>   		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
> +	pmu->global_ctrl_mask = counter_mask;
>   	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
>   			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
>   			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
> @@ -713,7 +715,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   	if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) {
>   		vcpu->arch.ia32_misc_enable_msr &= ~MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
>   		if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) {
> -			pmu->pebs_enable_mask = ~pmu->global_ctrl;
> +			pmu->pebs_enable_mask = counter_mask;
>   			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
>   			for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
>   				pmu->fixed_ctr_ctrl_mask &=
> -- 2.36.1
> 

Squashed, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index ddf837130d1f..72bbcb3f9f8a 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -621,6 +621,7 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	struct kvm_cpuid_entry2 *entry;
 	union cpuid10_eax eax;
 	union cpuid10_edx edx;
+	u64 counter_mask;
 	int i;
 
 	pmu->nr_arch_gp_counters = 0;
@@ -672,8 +673,9 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
 		pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
-	pmu->global_ctrl_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
+	counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
+	pmu->global_ctrl_mask = counter_mask;
 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
@@ -713,7 +715,7 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) {
 		vcpu->arch.ia32_misc_enable_msr &= ~MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
 		if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) {
-			pmu->pebs_enable_mask = ~pmu->global_ctrl;
+			pmu->pebs_enable_mask = counter_mask;
 			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
 			for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
 				pmu->fixed_ctr_ctrl_mask &=