diff mbox series

[v2,04/15] KVM: PMU: disable LBR handling if architectural LBR is available

Message ID 20221125040604.5051-5-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce Architectural LBR for vPMU | expand

Commit Message

Yang, Weijiang Nov. 25, 2022, 4:05 a.m. UTC
From: Paolo Bonzini <pbonzini@redhat.com>

Traditional LBR is absent on CPU models that have architectural LBR, so
disable all processing of traditional LBR MSRs if they are not there.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 32 ++++++++++++++++++++++----------
 1 file changed, 22 insertions(+), 10 deletions(-)

Comments

Sean Christopherson Jan. 27, 2023, 8:10 p.m. UTC | #1
On Thu, Nov 24, 2022, Yang Weijiang wrote:
> From: Paolo Bonzini <pbonzini@redhat.com>
> 
> Traditional LBR is absent on CPU models that have architectural LBR, so
> disable all processing of traditional LBR MSRs if they are not there.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>  arch/x86/kvm/vmx/pmu_intel.c | 32 ++++++++++++++++++++++----------
>  1 file changed, 22 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index e5cec07ca8d9..905673228932 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -170,19 +170,23 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
>  static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
>  {
>  	struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
> -	bool ret = false;
>  
>  	if (!intel_pmu_lbr_is_enabled(vcpu))
> -		return ret;
> +		return false;
>  
> -	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
> -		(index >= records->from && index < records->from + records->nr) ||
> -		(index >= records->to && index < records->to + records->nr);
> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&

IIUC, the MSRs flat out don't exist _and_ KVM expects to passthrough MSRs to the
guest, i.e. KVM should check host support, not guest support.  Probably a moot
point from a functionality perspective since KVM shouldn't allow LBRs to shouldn't
be enabled for the guest, but from a performance perspective, checking guest CPUID
is slooow.

That brings me to point #2, which is that KVM needs to disallow enabling legacy
LBRs on CPUs that support arch LBRs.  Again, IIUC, because KVM doesn't have the
option to fallback to legacy LBRs, that restriction needs to be treated as a bug
fix.  I'll post a separate patch unless my understanding is wrong.

> +	    (index == MSR_LBR_SELECT || index == MSR_LBR_TOS))
> +		return true;
>  
> -	if (!ret && records->info)
> -		ret = (index >= records->info && index < records->info + records->nr);
> +	if ((index >= records->from && index < records->from + records->nr) ||
> +	    (index >= records->to && index < records->to + records->nr))
> +		return true;
>  
> -	return ret;
> +	if (records->info && index >= records->info &&
> +	    index < records->info + records->nr)
> +		return true;
> +
> +	return false;
>  }
>  
>  static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
> @@ -702,6 +706,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
>  			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
>  	}
>  
> +	if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))

Similar to above, I really don't want to query guest CPUID in the VM-Enter path.
If we establish the rule that LBRs can be enabled if and only if the correct type
is enabled (traditional/legacy vs. arch), then this can simply check host support.

> +		return;
> +
>  	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
>  	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
>  }
> @@ -742,10 +749,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>  	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
> +	bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);

Unnecessary guest CPUID lookup and VMCS read, i.e. this can be deferred to the
!lbr_desc->event path.

>  
>  	if (!lbr_desc->event) {
>  		vmx_disable_lbr_msrs_passthrough(vcpu);
> -		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
> +		if (lbr_enable)
>  			goto warn;
>  		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
>  			goto warn;
> @@ -768,7 +777,10 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>  
>  static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
>  {
> -	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
> +	bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
> +
> +	if (!lbr_enable)
>  		intel_pmu_release_guest_lbr_event(vcpu);
>  }
>  
> -- 
> 2.27.0
>
Yang, Weijiang Jan. 30, 2023, 8:10 a.m. UTC | #2
On 1/28/2023 4:10 AM, Sean Christopherson wrote:
> On Thu, Nov 24, 2022, Yang Weijiang wrote:
>> From: Paolo Bonzini <pbonzini@redhat.com>
>>
>> Traditional LBR is absent on CPU models that have architectural LBR, so
>> disable all processing of traditional LBR MSRs if they are not there.
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> ---
>>   arch/x86/kvm/vmx/pmu_intel.c | 32 ++++++++++++++++++++++----------
>>   1 file changed, 22 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
>> index e5cec07ca8d9..905673228932 100644
>> --- a/arch/x86/kvm/vmx/pmu_intel.c
>> +++ b/arch/x86/kvm/vmx/pmu_intel.c
>> @@ -170,19 +170,23 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
>>   static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
>>   {
>>   	struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
>> -	bool ret = false;
>>   
>>   	if (!intel_pmu_lbr_is_enabled(vcpu))
>> -		return ret;
>> +		return false;
>>   
>> -	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
>> -		(index >= records->from && index < records->from + records->nr) ||
>> -		(index >= records->to && index < records->to + records->nr);
>> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
> IIUC, the MSRs flat out don't exist _and_ KVM expects to passthrough MSRs to the
> guest, i.e. KVM should check host support, not guest support.  Probably a moot
> point from a functionality perspective since KVM shouldn't allow LBRs to shouldn't
> be enabled for the guest, but from a performance perspective, checking guest CPUID
> is slooow.


OK, I'll change the check.


>
> That brings me to point #2, which is that KVM needs to disallow enabling legacy
> LBRs on CPUs that support arch LBRs.  Again, IIUC, because KVM doesn't have the
> option to fallback to legacy LBRs,


Legacy LBR and Arch-lbr are exclusive on any platforms, on old 
platforms, legacy LBR is available,

on new platforms, e.g., SPR, arch-lbr is present, so we don't have 
fallback logic.


> that restriction needs to be treated as a bug
> fix.  I'll post a separate patch unless my understanding is wrong.
>
>> +	    (index == MSR_LBR_SELECT || index == MSR_LBR_TOS))
>> +		return true;
>>   
>> -	if (!ret && records->info)
>> -		ret = (index >= records->info && index < records->info + records->nr);
>> +	if ((index >= records->from && index < records->from + records->nr) ||
>> +	    (index >= records->to && index < records->to + records->nr))
>> +		return true;
>>   
>> -	return ret;
>> +	if (records->info && index >= records->info &&
>> +	    index < records->info + records->nr)
>> +		return true;
>> +
>> +	return false;
>>   }
>>   
>>   static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
>> @@ -702,6 +706,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
>>   			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
>>   	}
>>   
>> +	if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
> Similar to above, I really don't want to query guest CPUID in the VM-Enter path.
> If we establish the rule that LBRs can be enabled if and only if the correct type
> is enabled (traditional/legacy vs. arch), then this can simply check host support.

I understand your concerns, will try to use other efficient ways to 
check guest arch-lbr support.


>
>> +		return;
>> +
>>   	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
>>   	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
>>   }
>> @@ -742,10 +749,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>>   {
>>   	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>>   	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
>> +	bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
>> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
> Unnecessary guest CPUID lookup and VMCS read, i.e. this can be deferred to the
> !lbr_desc->event path.

OK


>
>>   
>>   	if (!lbr_desc->event) {
>>   		vmx_disable_lbr_msrs_passthrough(vcpu);
>> -		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
>> +		if (lbr_enable)
>>   			goto warn;
>>   		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
>>   			goto warn;
>> @@ -768,7 +777,10 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>>   
>>   static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
>>   {
>> -	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
>> +	bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
>> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
>> +
>> +	if (!lbr_enable)
>>   		intel_pmu_release_guest_lbr_event(vcpu);
>>   }
>>   
>> -- 
>> 2.27.0
>>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index e5cec07ca8d9..905673228932 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -170,19 +170,23 @@  static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 {
 	struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
-	bool ret = false;
 
 	if (!intel_pmu_lbr_is_enabled(vcpu))
-		return ret;
+		return false;
 
-	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
-		(index >= records->from && index < records->from + records->nr) ||
-		(index >= records->to && index < records->to + records->nr);
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+	    (index == MSR_LBR_SELECT || index == MSR_LBR_TOS))
+		return true;
 
-	if (!ret && records->info)
-		ret = (index >= records->info && index < records->info + records->nr);
+	if ((index >= records->from && index < records->from + records->nr) ||
+	    (index >= records->to && index < records->to + records->nr))
+		return true;
 
-	return ret;
+	if (records->info && index >= records->info &&
+	    index < records->info + records->nr)
+		return true;
+
+	return false;
 }
 
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
@@ -702,6 +706,9 @@  static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
 			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
 	}
 
+	if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+		return;
+
 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
 }
@@ -742,10 +749,12 @@  void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+	bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
 
 	if (!lbr_desc->event) {
 		vmx_disable_lbr_msrs_passthrough(vcpu);
-		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+		if (lbr_enable)
 			goto warn;
 		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
 			goto warn;
@@ -768,7 +777,10 @@  void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
 
 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
 {
-	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+	bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
+
+	if (!lbr_enable)
 		intel_pmu_release_guest_lbr_event(vcpu);
 }