diff mbox series

[v10,08/16] KVM: x86/pmu: Refactor code to support guest Arch LBR

Message ID 20220422075509.353942-9-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce Architectural LBR for vPMU | expand

Commit Message

Yang, Weijiang April 22, 2022, 7:55 a.m. UTC
Take account of Arch LBR when do sanity checks before program
vPMU for guest. Pass through Arch LBR recording MSRs to guest
to gain better performance. Note, Arch LBR and Legacy LBR support
are mutually exclusive, i.e., they're not both available on one
platform.

Co-developed-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 37 +++++++++++++++++++++++++++++-------
 arch/x86/kvm/vmx/vmx.c       |  3 +++
 2 files changed, 33 insertions(+), 7 deletions(-)

Comments

Liang, Kan April 28, 2022, 2:18 p.m. UTC | #1
On 4/22/2022 3:55 AM, Yang Weijiang wrote:
> Take account of Arch LBR when do sanity checks before program
> vPMU for guest. Pass through Arch LBR recording MSRs to guest
> to gain better performance. Note, Arch LBR and Legacy LBR support
> are mutually exclusive, i.e., they're not both available on one
> platform.
> 
> Co-developed-by: Like Xu <like.xu@linux.intel.com>
> Signed-off-by: Like Xu <like.xu@linux.intel.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>   arch/x86/kvm/vmx/pmu_intel.c | 37 +++++++++++++++++++++++++++++-------
>   arch/x86/kvm/vmx/vmx.c       |  3 +++
>   2 files changed, 33 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index 7dc8a5783df7..cb28888e9f4f 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -170,12 +170,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
>   
>   bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
>   {
> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
> +		return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR);
> +
>   	/*
>   	 * As a first step, a guest could only enable LBR feature if its
>   	 * cpu model is the same as the host because the LBR registers
>   	 * would be pass-through to the guest and they're model specific.
>   	 */
> -	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
> +	return !boot_cpu_has(X86_FEATURE_ARCH_LBR) &&
> +		boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
>   }
>   
>   bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
> @@ -193,12 +197,19 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)

I think we should move MSR_ARCH_LBR_DEPTH and MSR_ARCH_LBR_CTL to this 
function as well, since they are LBR related MSRs.

>   	if (!intel_pmu_lbr_is_enabled(vcpu))
>   		return ret;
>   
> -	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
> -		(index >= records->from && index < records->from + records->nr) ||
> -		(index >= records->to && index < records->to + records->nr);
> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
> +		ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS);
> +
> +	if (!ret) {
> +		ret = (index >= records->from &&
> +		       index < records->from + records->nr) ||
> +		      (index >= records->to &&
> +		       index < records->to + records->nr);
> +	}
>   
>   	if (!ret && records->info)
> -		ret = (index >= records->info && index < records->info + records->nr);
> +		ret = (index >= records->info &&
> +		       index < records->info + records->nr);

Please use "{}" since you split it to two lines.

Thanks,
Kan
>   
>   	return ret;
>   }
> @@ -747,6 +758,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
>   			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
>   	}
>   
> +	if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
> +		return;
> +
>   	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
>   	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
>   }
> @@ -787,10 +801,13 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>   {
>   	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>   	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
> +	bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
> +		(vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
>   
>   	if (!lbr_desc->event) {
>   		vmx_disable_lbr_msrs_passthrough(vcpu);
> -		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
> +		if (lbr_enable)
>   			goto warn;
>   		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
>   			goto warn;
> @@ -807,13 +824,19 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>   	return;
>   
>   warn:
> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
> +		wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr);
>   	pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
>   		vcpu->vcpu_id);
>   }
>   
>   static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
>   {
> -	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
> +	bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
> +		(vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
> +
> +	if (!lbr_enable)
>   		intel_pmu_release_guest_lbr_event(vcpu);
>   }
>   
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 73961fcfb62d..a1816c6597f5 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -573,6 +573,9 @@ static bool is_valid_passthrough_msr(u32 msr)
>   	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
>   	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
>   	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
> +	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
> +	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
> +	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
>   		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
>   		return true;
>   	}
Yang, Weijiang April 29, 2022, 5:45 a.m. UTC | #2
On 4/28/2022 10:18 PM, Liang, Kan wrote:
>
> On 4/22/2022 3:55 AM, Yang Weijiang wrote:
>> Take account of Arch LBR when do sanity checks before program
>> vPMU for guest. Pass through Arch LBR recording MSRs to guest
>> to gain better performance. Note, Arch LBR and Legacy LBR support
>> are mutually exclusive, i.e., they're not both available on one
>> platform.
>>
>> Co-developed-by: Like Xu <like.xu@linux.intel.com>
>> Signed-off-by: Like Xu <like.xu@linux.intel.com>
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> ---
>>    arch/x86/kvm/vmx/pmu_intel.c | 37 +++++++++++++++++++++++++++++-------
>>    arch/x86/kvm/vmx/vmx.c       |  3 +++
>>    2 files changed, 33 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
>> index 7dc8a5783df7..cb28888e9f4f 100644
>> --- a/arch/x86/kvm/vmx/pmu_intel.c
>> +++ b/arch/x86/kvm/vmx/pmu_intel.c
>> @@ -170,12 +170,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
>>    
>>    bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
>>    {
>> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
>> +		return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR);
>> +
>>    	/*
>>    	 * As a first step, a guest could only enable LBR feature if its
>>    	 * cpu model is the same as the host because the LBR registers
>>    	 * would be pass-through to the guest and they're model specific.
>>    	 */
>> -	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
>> +	return !boot_cpu_has(X86_FEATURE_ARCH_LBR) &&
>> +		boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
>>    }
>>    
>>    bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
>> @@ -193,12 +197,19 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
> I think we should move MSR_ARCH_LBR_DEPTH and MSR_ARCH_LBR_CTL to this
> function as well, since they are LBR related MSRs.
Makes sense, will change it in next version.
>
>>    	if (!intel_pmu_lbr_is_enabled(vcpu))
>>    		return ret;
>>    
>> -	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
>> -		(index >= records->from && index < records->from + records->nr) ||
>> -		(index >= records->to && index < records->to + records->nr);
>> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
>> +		ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS);
>> +
>> +	if (!ret) {
>> +		ret = (index >= records->from &&
>> +		       index < records->from + records->nr) ||
>> +		      (index >= records->to &&
>> +		       index < records->to + records->nr);
>> +	}
>>    
>>    	if (!ret && records->info)
>> -		ret = (index >= records->info && index < records->info + records->nr);
>> +		ret = (index >= records->info &&
>> +		       index < records->info + records->nr);
> Please use "{}" since you split it to two lines.
OK.
>
> Thanks,
> Kan
>>    
>>    	return ret;
>>    }
>> @@ -747,6 +758,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
>>    			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
>>    	}
>>    
>> +	if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
>> +		return;
>> +
>>    	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
>>    	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
>>    }
>> @@ -787,10 +801,13 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>>    {
>>    	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>>    	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
>> +	bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
>> +		(vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
>> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
>>    
>>    	if (!lbr_desc->event) {
>>    		vmx_disable_lbr_msrs_passthrough(vcpu);
>> -		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
>> +		if (lbr_enable)
>>    			goto warn;
>>    		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
>>    			goto warn;
>> @@ -807,13 +824,19 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>>    	return;
>>    
>>    warn:
>> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
>> +		wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr);
>>    	pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
>>    		vcpu->vcpu_id);
>>    }
>>    
>>    static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
>>    {
>> -	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
>> +	bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
>> +		(vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
>> +		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
>> +
>> +	if (!lbr_enable)
>>    		intel_pmu_release_guest_lbr_event(vcpu);
>>    }
>>    
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index 73961fcfb62d..a1816c6597f5 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -573,6 +573,9 @@ static bool is_valid_passthrough_msr(u32 msr)
>>    	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
>>    	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
>>    	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
>> +	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
>> +	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
>> +	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
>>    		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
>>    		return true;
>>    	}
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 7dc8a5783df7..cb28888e9f4f 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -170,12 +170,16 @@  static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
 
 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
 {
+	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
+		return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR);
+
 	/*
 	 * As a first step, a guest could only enable LBR feature if its
 	 * cpu model is the same as the host because the LBR registers
 	 * would be pass-through to the guest and they're model specific.
 	 */
-	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
+	return !boot_cpu_has(X86_FEATURE_ARCH_LBR) &&
+		boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
 }
 
 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
@@ -193,12 +197,19 @@  static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 	if (!intel_pmu_lbr_is_enabled(vcpu))
 		return ret;
 
-	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
-		(index >= records->from && index < records->from + records->nr) ||
-		(index >= records->to && index < records->to + records->nr);
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+		ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS);
+
+	if (!ret) {
+		ret = (index >= records->from &&
+		       index < records->from + records->nr) ||
+		      (index >= records->to &&
+		       index < records->to + records->nr);
+	}
 
 	if (!ret && records->info)
-		ret = (index >= records->info && index < records->info + records->nr);
+		ret = (index >= records->info &&
+		       index < records->info + records->nr);
 
 	return ret;
 }
@@ -747,6 +758,9 @@  static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
 			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
 	}
 
+	if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+		return;
+
 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
 }
@@ -787,10 +801,13 @@  void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+	bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
+		(vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
+		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
 
 	if (!lbr_desc->event) {
 		vmx_disable_lbr_msrs_passthrough(vcpu);
-		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+		if (lbr_enable)
 			goto warn;
 		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
 			goto warn;
@@ -807,13 +824,19 @@  void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
 	return;
 
 warn:
+	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
+		wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr);
 	pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
 		vcpu->vcpu_id);
 }
 
 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
 {
-	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+	bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
+		(vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
+		(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
+
+	if (!lbr_enable)
 		intel_pmu_release_guest_lbr_event(vcpu);
 }
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 73961fcfb62d..a1816c6597f5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -573,6 +573,9 @@  static bool is_valid_passthrough_msr(u32 msr)
 	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
 	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
 	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
+	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
 		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
 		return true;
 	}