diff mbox series

[RESEND,v2,7/8] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc()

Message ID 20220823093221.38075-8-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series x86/pmu: Corner cases fixes and optimization | expand

Commit Message

Like Xu Aug. 23, 2022, 9:32 a.m. UTC
From: Like Xu <likexu@tencent.com>

AMD only has gp counters, whose corresponding vPMCs are initialised
and stored in pmu->gp_counter[] in order of idx, so we can access this
array directly based on any valid pmc->idx, without any help from other
interfaces at all. The amd_rdpmc_ecx_to_pmc() can now reuse this part
of the code quite naturally.

Opportunistically apply array_index_nospec() to reduce the attack
surface for speculative execution and remove the dead code.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/svm/pmu.c | 41 +++++------------------------------------
 1 file changed, 5 insertions(+), 36 deletions(-)

Comments

Sean Christopherson Aug. 30, 2022, 6:07 p.m. UTC | #1
On Tue, Aug 23, 2022, Like Xu wrote:
> From: Like Xu <likexu@tencent.com>
> 
> AMD only has gp counters, whose corresponding vPMCs are initialised
> and stored in pmu->gp_counter[] in order of idx, so we can access this

Avoid pronouns, and state what the patch is doing, not what it _can_ do.  IIUC:

  Access PMU counters on AMD by directly indexing the array of general
  purpose counters instead of translating the PMC index to an MSR index.
  AMD only supports gp counters, there's no need to translate a PMC index
  to an MSR index and back to a PMC index.

> array directly based on any valid pmc->idx, without any help from other
> interfaces at all. The amd_rdpmc_ecx_to_pmc() can now reuse this part
> of the code quite naturally.
> 
> Opportunistically apply array_index_nospec() to reduce the attack
> surface for speculative execution and remove the dead code.
> 
> Signed-off-by: Like Xu <likexu@tencent.com>
> ---
>  arch/x86/kvm/svm/pmu.c | 41 +++++------------------------------------
>  1 file changed, 5 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
> index e9c66dd659a6..e57eb0555a04 100644
> --- a/arch/x86/kvm/svm/pmu.c
> +++ b/arch/x86/kvm/svm/pmu.c
> @@ -33,23 +33,6 @@ enum index {
>  	INDEX_ERROR,
>  };
>  
> -static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
> -{
> -	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
> -
> -	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
> -		if (type == PMU_TYPE_COUNTER)
> -			return MSR_F15H_PERF_CTR;
> -		else
> -			return MSR_F15H_PERF_CTL;
> -	} else {
> -		if (type == PMU_TYPE_COUNTER)
> -			return MSR_K7_PERFCTR0;
> -		else
> -			return MSR_K7_EVNTSEL0;
> -	}
> -}
> -
>  static enum index msr_to_index(u32 msr)
>  {
>  	switch (msr) {
> @@ -141,18 +124,12 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
>  
>  static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
>  {
> -	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
> -	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
> +	unsigned int num_counters = pmu->nr_arch_gp_counters;
>  
> -	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
> -		/*
> -		 * The idx is contiguous. The MSRs are not. The counter MSRs
> -		 * are interleaved with the event select MSRs.
> -		 */
> -		pmc_idx *= 2;
> -	}
> +	if (pmc_idx >= num_counters)
> +		return NULL;
>  
> -	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
> +	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
>  }
>  
>  static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
> @@ -168,15 +145,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
>  static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
>  	unsigned int idx, u64 *mask)
>  {
> -	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> -	struct kvm_pmc *counters;
> -
> -	idx &= ~(3u << 30);
> -	if (idx >= pmu->nr_arch_gp_counters)
> -		return NULL;
> -	counters = pmu->gp_counters;
> -
> -	return &counters[idx];
> +	return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
>  }
>  
>  static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
> -- 
> 2.37.2
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index e9c66dd659a6..e57eb0555a04 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -33,23 +33,6 @@  enum index {
 	INDEX_ERROR,
 };
 
-static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
-{
-	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
-	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
-		if (type == PMU_TYPE_COUNTER)
-			return MSR_F15H_PERF_CTR;
-		else
-			return MSR_F15H_PERF_CTL;
-	} else {
-		if (type == PMU_TYPE_COUNTER)
-			return MSR_K7_PERFCTR0;
-		else
-			return MSR_K7_EVNTSEL0;
-	}
-}
-
 static enum index msr_to_index(u32 msr)
 {
 	switch (msr) {
@@ -141,18 +124,12 @@  static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 
 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
 {
-	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
-	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+	unsigned int num_counters = pmu->nr_arch_gp_counters;
 
-	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
-		/*
-		 * The idx is contiguous. The MSRs are not. The counter MSRs
-		 * are interleaved with the event select MSRs.
-		 */
-		pmc_idx *= 2;
-	}
+	if (pmc_idx >= num_counters)
+		return NULL;
 
-	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
+	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
 }
 
 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
@@ -168,15 +145,7 @@  static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 	unsigned int idx, u64 *mask)
 {
-	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-	struct kvm_pmc *counters;
-
-	idx &= ~(3u << 30);
-	if (idx >= pmu->nr_arch_gp_counters)
-		return NULL;
-	counters = pmu->gp_counters;
-
-	return &counters[idx];
+	return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
 }
 
 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)