diff mbox series

[v5,5/8] KVM: x86: Track supported PERF_CAPABILITIES in kvm_caps

Message ID 20221006000314.73240-6-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Intel LBR related perf cleanups | expand

Commit Message

Sean Christopherson Oct. 6, 2022, 12:03 a.m. UTC
Track KVM's supported PERF_CAPABILITIES in kvm_caps instead of computing
the supported capabilities on the fly every time.  Using kvm_caps will
also allow for future cleanups as the kvm_caps values can be used
directly in common x86 code.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/svm.c          |  2 ++
 arch/x86/kvm/vmx/capabilities.h | 25 ------------------------
 arch/x86/kvm/vmx/pmu_intel.c    |  2 +-
 arch/x86/kvm/vmx/vmx.c          | 34 +++++++++++++++++++++++++++++----
 arch/x86/kvm/x86.h              |  1 +
 5 files changed, 34 insertions(+), 30 deletions(-)

Comments

Like Xu Oct. 14, 2022, 9:25 a.m. UTC | #1
On 6/10/2022 8:03 am, Sean Christopherson wrote:
> Track KVM's supported PERF_CAPABILITIES in kvm_caps instead of computing
> the supported capabilities on the fly every time.  Using kvm_caps will
> also allow for future cleanups as the kvm_caps values can be used
> directly in common x86 code.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>

A similar change was made to one of my git branches. Please move forward.

Acked-by: Like Xu <likexu@tencent.com>

> ---
>   arch/x86/kvm/svm/svm.c          |  2 ++
>   arch/x86/kvm/vmx/capabilities.h | 25 ------------------------
>   arch/x86/kvm/vmx/pmu_intel.c    |  2 +-
>   arch/x86/kvm/vmx/vmx.c          | 34 +++++++++++++++++++++++++++++----
>   arch/x86/kvm/x86.h              |  1 +
>   5 files changed, 34 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 58f0077d9357..6b680b249975 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2715,6 +2715,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
>   			msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
>   		break;
>   	case MSR_IA32_PERF_CAPABILITIES:
> +		msr->data = kvm_caps.supported_perf_cap;
>   		return 0;
>   	default:
>   		return KVM_MSR_RET_INVALID;
> @@ -4898,6 +4899,7 @@ static __init void svm_set_cpu_caps(void)
>   {
>   	kvm_set_cpu_caps();
>   
> +	kvm_caps.supported_perf_cap = 0;
>   	kvm_caps.supported_xss = 0;
>   
>   	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
> diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
> index 479124e49bbd..cd2ac9536c99 100644
> --- a/arch/x86/kvm/vmx/capabilities.h
> +++ b/arch/x86/kvm/vmx/capabilities.h
> @@ -395,31 +395,6 @@ static inline bool vmx_pebs_supported(void)
>   	return boot_cpu_has(X86_FEATURE_PEBS) && kvm_pmu_cap.pebs_ept;
>   }
>   
> -static inline u64 vmx_get_perf_capabilities(void)
> -{
> -	u64 perf_cap = PMU_CAP_FW_WRITES;
> -	struct x86_pmu_lbr lbr;
> -	u64 host_perf_cap = 0;
> -
> -	if (!enable_pmu)
> -		return 0;
> -
> -	if (boot_cpu_has(X86_FEATURE_PDCM))
> -		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
> -
> -	x86_perf_get_lbr(&lbr);
> -	if (lbr.nr)
> -		perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
> -
> -	if (vmx_pebs_supported()) {
> -		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
> -		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
> -			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
> -	}
> -
> -	return perf_cap;
> -}
> -
>   static inline bool cpu_has_notify_vmexit(void)
>   {
>   	return vmcs_config.cpu_based_2nd_exec_ctrl &
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index 25b70a85bef5..24f4c22691f8 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -631,7 +631,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
>   		pmu->fixed_counters[i].current_config = 0;
>   	}
>   
> -	vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
> +	vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
>   	lbr_desc->records.nr = 0;
>   	lbr_desc->event = NULL;
>   	lbr_desc->msr_passthrough = false;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index e70ac91cd2fb..850ff6e683d1 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1850,7 +1850,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
>   			return 1;
>   		return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
>   	case MSR_IA32_PERF_CAPABILITIES:
> -		msr->data = vmx_get_perf_capabilities();
> +		msr->data = kvm_caps.supported_perf_cap;
>   		return 0;
>   	default:
>   		return KVM_MSR_RET_INVALID;
> @@ -2029,7 +2029,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
>   	    (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
>   		debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
>   
> -	if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) &&
> +	if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
>   	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
>   		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
>   
> @@ -2342,14 +2342,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   			return 1;
>   		if (data & PMU_CAP_LBR_FMT) {
>   			if ((data & PMU_CAP_LBR_FMT) !=
> -			    (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT))
> +			    (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
>   				return 1;
>   			if (!cpuid_model_is_consistent(vcpu))
>   				return 1;
>   		}
>   		if (data & PERF_CAP_PEBS_FORMAT) {
>   			if ((data & PERF_CAP_PEBS_MASK) !=
> -			    (vmx_get_perf_capabilities() & PERF_CAP_PEBS_MASK))
> +			    (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
>   				return 1;
>   			if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
>   				return 1;
> @@ -7669,6 +7669,31 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>   	vmx_update_exception_bitmap(vcpu);
>   }
>   
> +static u64 vmx_get_perf_capabilities(void)
> +{
> +	u64 perf_cap = PMU_CAP_FW_WRITES;
> +	struct x86_pmu_lbr lbr;
> +	u64 host_perf_cap = 0;
> +
> +	if (!enable_pmu)
> +		return 0;
> +
> +	if (boot_cpu_has(X86_FEATURE_PDCM))
> +		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
> +
> +	x86_perf_get_lbr(&lbr);
> +	if (lbr.nr)
> +		perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
> +
> +	if (vmx_pebs_supported()) {
> +		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
> +		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
> +			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
> +	}
> +
> +	return perf_cap;
> +}
> +
>   static __init void vmx_set_cpu_caps(void)
>   {
>   	kvm_set_cpu_caps();
> @@ -7691,6 +7716,7 @@ static __init void vmx_set_cpu_caps(void)
>   
>   	if (!enable_pmu)
>   		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
> +	kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
>   
>   	if (!enable_sgx) {
>   		kvm_cpu_cap_clear(X86_FEATURE_SGX);
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 829d3134c1eb..9de72586f406 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -27,6 +27,7 @@ struct kvm_caps {
>   	u64 supported_mce_cap;
>   	u64 supported_xcr0;
>   	u64 supported_xss;
> +	u64 supported_perf_cap;
>   };
>   
>   void kvm_spurious_fault(void);
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 58f0077d9357..6b680b249975 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2715,6 +2715,7 @@  static int svm_get_msr_feature(struct kvm_msr_entry *msr)
 			msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
 		break;
 	case MSR_IA32_PERF_CAPABILITIES:
+		msr->data = kvm_caps.supported_perf_cap;
 		return 0;
 	default:
 		return KVM_MSR_RET_INVALID;
@@ -4898,6 +4899,7 @@  static __init void svm_set_cpu_caps(void)
 {
 	kvm_set_cpu_caps();
 
+	kvm_caps.supported_perf_cap = 0;
 	kvm_caps.supported_xss = 0;
 
 	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 479124e49bbd..cd2ac9536c99 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -395,31 +395,6 @@  static inline bool vmx_pebs_supported(void)
 	return boot_cpu_has(X86_FEATURE_PEBS) && kvm_pmu_cap.pebs_ept;
 }
 
-static inline u64 vmx_get_perf_capabilities(void)
-{
-	u64 perf_cap = PMU_CAP_FW_WRITES;
-	struct x86_pmu_lbr lbr;
-	u64 host_perf_cap = 0;
-
-	if (!enable_pmu)
-		return 0;
-
-	if (boot_cpu_has(X86_FEATURE_PDCM))
-		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
-
-	x86_perf_get_lbr(&lbr);
-	if (lbr.nr)
-		perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
-
-	if (vmx_pebs_supported()) {
-		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
-		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
-			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
-	}
-
-	return perf_cap;
-}
-
 static inline bool cpu_has_notify_vmexit(void)
 {
 	return vmcs_config.cpu_based_2nd_exec_ctrl &
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 25b70a85bef5..24f4c22691f8 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -631,7 +631,7 @@  static void intel_pmu_init(struct kvm_vcpu *vcpu)
 		pmu->fixed_counters[i].current_config = 0;
 	}
 
-	vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
+	vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
 	lbr_desc->records.nr = 0;
 	lbr_desc->event = NULL;
 	lbr_desc->msr_passthrough = false;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e70ac91cd2fb..850ff6e683d1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1850,7 +1850,7 @@  static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
 			return 1;
 		return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
 	case MSR_IA32_PERF_CAPABILITIES:
-		msr->data = vmx_get_perf_capabilities();
+		msr->data = kvm_caps.supported_perf_cap;
 		return 0;
 	default:
 		return KVM_MSR_RET_INVALID;
@@ -2029,7 +2029,7 @@  static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
 	    (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
 		debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
 
-	if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) &&
+	if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
 	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
 		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
 
@@ -2342,14 +2342,14 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		if (data & PMU_CAP_LBR_FMT) {
 			if ((data & PMU_CAP_LBR_FMT) !=
-			    (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT))
+			    (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
 				return 1;
 			if (!cpuid_model_is_consistent(vcpu))
 				return 1;
 		}
 		if (data & PERF_CAP_PEBS_FORMAT) {
 			if ((data & PERF_CAP_PEBS_MASK) !=
-			    (vmx_get_perf_capabilities() & PERF_CAP_PEBS_MASK))
+			    (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
 				return 1;
 			if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
 				return 1;
@@ -7669,6 +7669,31 @@  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 	vmx_update_exception_bitmap(vcpu);
 }
 
+static u64 vmx_get_perf_capabilities(void)
+{
+	u64 perf_cap = PMU_CAP_FW_WRITES;
+	struct x86_pmu_lbr lbr;
+	u64 host_perf_cap = 0;
+
+	if (!enable_pmu)
+		return 0;
+
+	if (boot_cpu_has(X86_FEATURE_PDCM))
+		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
+
+	x86_perf_get_lbr(&lbr);
+	if (lbr.nr)
+		perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
+
+	if (vmx_pebs_supported()) {
+		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
+			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+	}
+
+	return perf_cap;
+}
+
 static __init void vmx_set_cpu_caps(void)
 {
 	kvm_set_cpu_caps();
@@ -7691,6 +7716,7 @@  static __init void vmx_set_cpu_caps(void)
 
 	if (!enable_pmu)
 		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
+	kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
 
 	if (!enable_sgx) {
 		kvm_cpu_cap_clear(X86_FEATURE_SGX);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 829d3134c1eb..9de72586f406 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -27,6 +27,7 @@  struct kvm_caps {
 	u64 supported_mce_cap;
 	u64 supported_xcr0;
 	u64 supported_xss;
+	u64 supported_perf_cap;
 };
 
 void kvm_spurious_fault(void);