diff mbox series

KVM: x86/pmu: Disable vPMU if EVENTSEL_GUESTONLY bit doesn't exist

Message ID 20230307113819.34089-1-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: Disable vPMU if EVENTSEL_GUESTONLY bit doesn't exist | expand

Commit Message

Like Xu March 7, 2023, 11:38 a.m. UTC
From: Like Xu <likexu@tencent.com>

Unlike Intel's msr atomic_switch mechanism, AMD supports guest pmu
basic counter feature by setting the GUESTONLY bit on the host, so the
presence or absence of this bit determines whether vPMU is emulatable
(e.g. in nested virtualization). Since on AMD, writing reserved bits of
EVENTSEL register does not bring #GP, KVM needs to update the global
enable_pmu value by checking the persistence of this GUESTONLY bit.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/svm/svm.c | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)


base-commit: 13738a3647368f7f600b30d241779bcd2a3ebbfd

Comments

Sean Christopherson April 7, 2023, 2:06 a.m. UTC | #1
On Tue, Mar 07, 2023, Like Xu wrote:
> From: Like Xu <likexu@tencent.com>
> 
> Unlike Intel's msr atomic_switch mechanism, AMD supports guest pmu
> basic counter feature by setting the GUESTONLY bit on the host, so the
> presence or absence of this bit determines whether vPMU is emulatable
> (e.g. in nested virtualization). Since on AMD, writing reserved bits of
> EVENTSEL register does not bring #GP, KVM needs to update the global
> enable_pmu value by checking the persistence of this GUESTONLY bit.
> 
> Signed-off-by: Like Xu <likexu@tencent.com>
> ---
>  arch/x86/kvm/svm/svm.c | 15 ++++++++++++++-
>  1 file changed, 14 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index dd21e8b1a259..f41d96e638ef 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4866,6 +4866,16 @@ static __init void svm_adjust_mmio_mask(void)
>  	kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
>  }
>  
> +static __init bool pmu_has_guestonly_mode(void)
> +{
> +	u64 value;
> +
> +	wrmsrl(MSR_F15H_PERF_CTL0, AMD64_EVENTSEL_GUESTONLY);
> +	rdmsrl(MSR_F15H_PERF_CTL0, value);

Preemption needs to be disabled to ensure a stable CPU.  I gotta imagine KVM should
be restoring the original value too.

I'm guessing I'm not going to like the answer, but is there really no better way
to probe for GUESTONLY support?

> +
> +	return value == AMD64_EVENTSEL_GUESTONLY;
> +}
> +
>  static __init void svm_set_cpu_caps(void)
>  {
>  	kvm_set_cpu_caps();
> @@ -4911,8 +4921,11 @@ static __init void svm_set_cpu_caps(void)
>  		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
>  
>  	/* AMD PMU PERFCTR_CORE CPUID */
> -	if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
> +	if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE) &&
> +	    pmu_has_guestonly_mode())
>  		kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
> +	else
> +		enable_pmu = false;

Why does lack of AMD64_EVENTSEL_GUESTONLY disable the PMU, but if and only if
X86_FEATURE_PERFCTR_CORE?  E.g. why does the behavior not also apply to legacy
perfmon support?

>  
>  	/* CPUID 0x8000001F (SME/SEV features) */
>  	sev_set_cpu_caps();
> 
> base-commit: 13738a3647368f7f600b30d241779bcd2a3ebbfd
> -- 
> 2.39.2
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dd21e8b1a259..f41d96e638ef 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4866,6 +4866,16 @@  static __init void svm_adjust_mmio_mask(void)
 	kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
 }
 
+static __init bool pmu_has_guestonly_mode(void)
+{
+	u64 value;
+
+	wrmsrl(MSR_F15H_PERF_CTL0, AMD64_EVENTSEL_GUESTONLY);
+	rdmsrl(MSR_F15H_PERF_CTL0, value);
+
+	return value == AMD64_EVENTSEL_GUESTONLY;
+}
+
 static __init void svm_set_cpu_caps(void)
 {
 	kvm_set_cpu_caps();
@@ -4911,8 +4921,11 @@  static __init void svm_set_cpu_caps(void)
 		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
 
 	/* AMD PMU PERFCTR_CORE CPUID */
-	if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+	if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE) &&
+	    pmu_has_guestonly_mode())
 		kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
+	else
+		enable_pmu = false;
 
 	/* CPUID 0x8000001F (SME/SEV features) */
 	sev_set_cpu_caps();