diff mbox series

[2/3] KVM: x86/pmu: Restrict advanced features based on module enable_pmu

Message ID 20220601031925.59693-2-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series [1/3] KVM: x86/pmu: Accept 0 for absent PMU MSRs when host-initiated if !enable_pmu | expand

Commit Message

Like Xu June 1, 2022, 3:19 a.m. UTC
From: Like Xu <likexu@tencent.com>

Once vPMU is disabled, the KVM would not expose features like:
PEBS (via clear kvm_pmu_cap.pebs_ept), legacy LBR and ARCH_LBR,
CPUID 0xA leaf, PDCM bit and MSR_IA32_PERF_CAPABILITIES, plus
PT_MODE_HOST_GUEST mode.

What these associative features have in common is that their use
relies on the underlying PMU counter and the host perf_event as a
back-end resource requester or sharing part of the irq delivery path.

Signed-off-by: Like Xu <likexu@tencent.com>
---
Follow up: a pmu_disable kvm-unit-test will be proposed later.

 arch/x86/kvm/pmu.h              | 6 ++++--
 arch/x86/kvm/vmx/capabilities.h | 6 +++++-
 arch/x86/kvm/vmx/vmx.c          | 7 +++++--
 3 files changed, 14 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index d59e1cb3b5dc..8fbce2bc06d9 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -167,9 +167,11 @@  static inline void kvm_init_pmu_capability(void)
 	  * For Intel, only support guest architectural pmu
 	  * on a host with architectural pmu.
 	  */
-	if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp) {
-		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
+	if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
 		enable_pmu = false;
+
+	if (!enable_pmu) {
+		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
 		return;
 	}
 
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index dc2cb8a16e76..96d025483b7b 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -7,6 +7,7 @@ 
 #include "lapic.h"
 #include "x86.h"
 #include "pmu.h"
+#include "cpuid.h"
 
 extern bool __read_mostly enable_vpid;
 extern bool __read_mostly flexpriority_enabled;
@@ -415,6 +416,9 @@  static inline u64 vmx_get_perf_capabilities(void)
 	u64 perf_cap = PMU_CAP_FW_WRITES;
 	u64 host_perf_cap = 0;
 
+	if (!enable_pmu)
+		return 0;
+
 	if (boot_cpu_has(X86_FEATURE_PDCM))
 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
 
@@ -426,7 +430,7 @@  static inline u64 vmx_get_perf_capabilities(void)
 			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
 	}
 
-	if (boot_cpu_has(X86_FEATURE_ARCH_LBR) && !cpu_has_vmx_arch_lbr())
+	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) && !cpu_has_vmx_arch_lbr())
 		perf_cap &= ~PMU_CAP_LBR_FMT;
 
 	return perf_cap;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6927f6e8ec31..11bad594fedd 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7575,11 +7575,14 @@  static __init void vmx_set_cpu_caps(void)
 		kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
 		kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
 	}
-	if (!cpu_has_vmx_arch_lbr()) {
+	if (!enable_pmu || !cpu_has_vmx_arch_lbr()) {
 		kvm_cpu_cap_clear(X86_FEATURE_ARCH_LBR);
 		supported_xss &= ~XFEATURE_MASK_LBR;
 	}
 
+	if (!enable_pmu)
+		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
+
 	if (!enable_sgx) {
 		kvm_cpu_cap_clear(X86_FEATURE_SGX);
 		kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
@@ -8269,7 +8272,7 @@  static __init int hardware_setup(void)
 
 	if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
 		return -EINVAL;
-	if (!enable_ept || !cpu_has_vmx_intel_pt())
+	if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
 		pt_mode = PT_MODE_SYSTEM;
 	if (pt_mode == PT_MODE_HOST_GUEST)
 		vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;