diff mbox series

[7/7] KVM: selftests: Test Intel counters' bit width emulation

Message ID 20230323072714.82289-8-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Test the consistency of the PMU's CPUID and its features | expand

Commit Message

Like Xu March 23, 2023, 7:27 a.m. UTC
From: Like Xu <likexu@tencent.com>

Add tests to cover Intel counters' bit-width emulation. When the VM has
FW_WRITES bit, the bitwidth of the gp and fixed counters will be specified
by the CPUID (no less than 32 bits and no greater than the host bitwidth)
and accessing bits that are not within the bitwidth will generate #GP.
However, when it does not have FW_WRITES bit, only the low 32-bits
signed data will be in effect and naturally #GP will not be generated.

Co-developed-by: Jinrong Liang <cloudliang@tencent.com>
Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
Signed-off-by: Like Xu <likexu@tencent.com>
---
 .../selftests/kvm/x86_64/pmu_cpuid_test.c     | 105 ++++++++++++++++++
 1 file changed, 105 insertions(+)

Comments

Sean Christopherson May 24, 2023, 10:52 p.m. UTC | #1
On Thu, Mar 23, 2023, Like Xu wrote:
> +static uint64_t test_ctrs_bit_width_setup(struct kvm_vcpu *vcpu,
> +					  uint8_t bit_width,
> +					  uint64_t perf_cap,
> +					  uint32_t msr_base)
> +{
> +	struct kvm_cpuid_entry2 *entry;
> +	bool fw_wr = perf_cap & PMU_CAP_FW_WRITES;
> +	uint64_t kvm_width;
> +	uint64_t value;
> +
> +	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
> +	if (msr_base != MSR_CORE_PERF_FIXED_CTR0) {
> +		kvm_width = kvm_gp_ctr_bit_width();
> +		entry->eax = (entry->eax & ~GP_WIDTH_MASK) |
> +			(bit_width << GP_WIDTH_OFS_BIT);
> +	} else {
> +		kvm_width = kvm_fixed_ctr_bit_width();
> +		entry->edx = (entry->edx & ~FIXED_WIDTH_MASK) |
> +			(bit_width << FIXED_WIDTH_OFS_BIT);
> +	}
> +	TEST_REQUIRE(kvm_width > 31);

Unfortunately, using TEST_REQUIRE() in a subtest is generally a bad idea.  This
will skip _all_ tests if the requirement isn't met.  That might be a signal that
the test is doing too much, i.e. should be split into multiple tests.  Unlike KUT,
selftests are more geared towards lots of small tests, not a handful of massive
tests.
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
index caf0d98079c7..e7465b01178a 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
@@ -29,6 +29,10 @@ 
 #define INTEL_PMC_IDX_FIXED 32
 #define RDPMC_FIXED_BASE BIT_ULL(30)
 #define FIXED_CTR_NUM_MASK GENMASK_ULL(4, 0)
+#define GP_WIDTH_OFS_BIT 16
+#define GP_WIDTH_MASK GENMASK_ULL(23, GP_WIDTH_OFS_BIT)
+#define FIXED_WIDTH_OFS_BIT 5
+#define FIXED_WIDTH_MASK GENMASK_ULL(12, FIXED_WIDTH_OFS_BIT)
 
 #define ARCH_EVENT(select, umask) (((select) & 0xff) | ((umask) & 0xff) << 8)
 
@@ -62,6 +66,16 @@  static const uint64_t perf_caps[] = {
  */
 #define MSR_INTEL_ARCH_PMU_GPCTR (MSR_IA32_PERFCTR0 + 2)
 
+static const uint32_t msr_bases[] = {
+	MSR_INTEL_ARCH_PMU_GPCTR,
+	MSR_IA32_PMC0,
+	MSR_CORE_PERF_FIXED_CTR0,
+};
+
+static const uint64_t bit_widths[] = {
+	0, 1, 31, 32, 47, 48, 63, 64,
+};
+
 static uint64_t evt_code_for_fixed_ctr(uint8_t idx)
 {
 	return arch_events[fixed_events[idx]];
@@ -99,6 +113,22 @@  static uint32_t kvm_max_pmu_version(void)
 	return kvm_entry->eax & PMU_VERSION_MASK;
 }
 
+static uint32_t kvm_gp_ctr_bit_width(void)
+{
+	const struct kvm_cpuid_entry2 *kvm_entry;
+
+	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
+	return (kvm_entry->eax & GP_WIDTH_MASK) >> GP_WIDTH_OFS_BIT;
+}
+
+static uint32_t kvm_fixed_ctr_bit_width(void)
+{
+	const struct kvm_cpuid_entry2 *kvm_entry;
+
+	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
+	return (kvm_entry->edx & FIXED_WIDTH_MASK) >> FIXED_WIDTH_OFS_BIT;
+}
+
 static struct kvm_vcpu *new_vcpu(void *guest_code)
 {
 	struct kvm_vm *vm;
@@ -381,6 +411,50 @@  static void test_pmu_version_setup(struct kvm_vcpu *vcpu, uint8_t version)
 	vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
 }
 
+static uint64_t test_ctrs_bit_width_setup(struct kvm_vcpu *vcpu,
+					  uint8_t bit_width,
+					  uint64_t perf_cap,
+					  uint32_t msr_base)
+{
+	struct kvm_cpuid_entry2 *entry;
+	bool fw_wr = perf_cap & PMU_CAP_FW_WRITES;
+	uint64_t kvm_width;
+	uint64_t value;
+
+	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+	if (msr_base != MSR_CORE_PERF_FIXED_CTR0) {
+		kvm_width = kvm_gp_ctr_bit_width();
+		entry->eax = (entry->eax & ~GP_WIDTH_MASK) |
+			(bit_width << GP_WIDTH_OFS_BIT);
+	} else {
+		kvm_width = kvm_fixed_ctr_bit_width();
+		entry->edx = (entry->edx & ~FIXED_WIDTH_MASK) |
+			(bit_width << FIXED_WIDTH_OFS_BIT);
+	}
+	TEST_REQUIRE(kvm_width > 31);
+
+	vcpu_set_cpuid(vcpu);
+	vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap);
+
+	/* No less than 32 bits and no greater than the host bitwidth */
+	bit_width = fw_wr ? max_t(int, 32, bit_width) : 32;
+	bit_width = min_t(int, bit_width, kvm_width);
+
+	/* Unconditionally set signed bit 31 for the case w/o FW_WRITES */
+	value = BIT_ULL(bit_width) | 0x1234567ull | BIT_ULL(31);
+	vcpu_args_set(vcpu, 4, msr_base, value, 1, 1);
+
+	if (fw_wr && msr_base != MSR_INTEL_ARCH_PMU_GPCTR) {
+		vm_install_exception_handler(vcpu->vm, GP_VECTOR,
+					     guest_gp_handler);
+		return GP_VECTOR;
+	} else if (msr_base == MSR_INTEL_ARCH_PMU_GPCTR) {
+		value = (s32)(value & (BIT_ULL(32) - 1));
+	}
+
+	return value & (BIT_ULL(bit_width) - 1);
+}
+
 static void intel_check_arch_event_is_unavl(uint8_t idx)
 {
 	const char *msg = "Unavailable arch event is counting.";
@@ -497,12 +571,43 @@  static void intel_test_pmu_version(void)
 	}
 }
 
+static void vcpu_run_bit_width(uint8_t bit_width, uint64_t perf_cap,
+			       uint32_t msr_base)
+{
+	const char *msg = "Fail to emulate counters' bit width.";
+	struct kvm_vcpu *vcpu;
+	uint64_t ret;
+
+	vcpu = new_vcpu(guest_wr_and_rd_msrs);
+	ret = test_ctrs_bit_width_setup(vcpu, bit_width, perf_cap, msr_base);
+	run_vcpu(vcpu, msg, first_uc_arg_equals, (void *)ret);
+	free_vcpu(vcpu);
+}
+
+static void intel_test_counters_bit_width(void)
+{
+	uint8_t i, j, k;
+
+	for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
+		for (j = 0; j < ARRAY_SIZE(msr_bases); j++) {
+			if (!(perf_caps[i] & PMU_CAP_FW_WRITES) &&
+			    msr_bases[j] == MSR_IA32_PMC0)
+				continue;
+
+			for (k = 0; k < ARRAY_SIZE(bit_widths); k++)
+				vcpu_run_bit_width(bit_widths[k], perf_caps[i],
+						   msr_bases[j]);
+		}
+	}
+}
+
 static void intel_test_pmu_cpuid(void)
 {
 	intel_test_arch_events();
 	intel_test_counters_num();
 	intel_test_fixed_counters();
 	intel_test_pmu_version();
+	intel_test_counters_bit_width();
 }
 
 int main(int argc, char *argv[])