@@ -49,11 +49,31 @@ static const uint64_t arch_events[] = {
/* Association of Fixed Counters with Architectural Performance Events */
static int fixed_events[] = {1, 0, 7};
+static const uint64_t perf_caps[] = {
+ 0,
+ PMU_CAP_FW_WRITES,
+};
+
+/*
+ * KVM implements the first two non-existent counters (MSR_P6_PERFCTRx)
+ * via kvm_pr_unimpl_wrmsr() instead of #GP. It is acceptable here to test
+ * the third counter as there are usually more than 3 available gp counters.
+ */
+#define MSR_INTEL_ARCH_PMU_GPCTR (MSR_IA32_PERFCTR0 + 2)
+
static uint64_t evt_code_for_fixed_ctr(uint8_t idx)
{
return arch_events[fixed_events[idx]];
}
+static uint8_t kvm_gp_ctrs_num(void)
+{
+ const struct kvm_cpuid_entry2 *kvm_entry;
+
+ kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
+ return (kvm_entry->eax & GP_CTR_NUM_MASK) >> GP_CTR_NUM_OFS_BIT;
+}
+
static struct kvm_vcpu *new_vcpu(void *guest_code)
{
struct kvm_vm *vm;
@@ -98,6 +118,30 @@ static bool first_uc_arg_non_zero(struct ucall *uc, void *data)
return uc->args[1];
}
+static bool first_uc_arg_equals(struct ucall *uc, void *data)
+{
+ return uc->args[1] == (uint64_t)data;
+}
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+ GUEST_SYNC(GP_VECTOR);
+ GUEST_DONE();
+}
+
+static void guest_wr_and_rd_msrs(uint32_t base, uint64_t value,
+ uint8_t begin, uint8_t offset)
+{
+ unsigned int i;
+
+ for (i = begin; i < begin + offset; i++) {
+ wrmsr(base + i, value);
+ GUEST_SYNC(rdmsr(base + i));
+ }
+
+ GUEST_DONE();
+}
+
static void intel_guest_run_arch_event(uint8_t version, uint8_t max_gp_num,
uint8_t max_fixed_num, bool supported,
uint32_t ctr_base_msr, uint64_t evt_code)
@@ -165,6 +209,27 @@ static void test_arch_events_setup(struct kvm_vcpu *vcpu, uint8_t evt_vector,
ctr_msr, arch_events[idx]);
}
+static void test_oob_gp_counter_setup(struct kvm_vcpu *vcpu, uint8_t eax_gp_num,
+ uint64_t perf_cap)
+{
+ struct kvm_cpuid_entry2 *entry;
+ uint32_t ctr_msr = MSR_IA32_PERFCTR0;
+
+ entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+ entry->eax = (entry->eax & ~GP_CTR_NUM_MASK) |
+ (eax_gp_num << GP_CTR_NUM_OFS_BIT);
+ vcpu_set_cpuid(vcpu);
+
+ if (perf_cap & PMU_CAP_FW_WRITES)
+ ctr_msr = MSR_IA32_PMC0;
+
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap);
+ vcpu_args_set(vcpu, 4, ctr_msr, 0xffff,
+ min(eax_gp_num, kvm_gp_ctrs_num()), 1);
+
+ vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
+}
+
static void intel_check_arch_event_is_unavl(uint8_t idx)
{
const char *msg = "Unavailable arch event is counting.";
@@ -190,6 +255,42 @@ static void intel_check_arch_event_is_unavl(uint8_t idx)
}
}
+/* Access the first out-of-range counter register to trigger #GP */
+static void test_oob_gp_counter(uint8_t eax_gp_num, uint64_t perf_cap)
+{
+ const char *msg = "At least one unsupported GP counter is visible.";
+ struct kvm_vcpu *vcpu;
+
+ vcpu = new_vcpu(guest_wr_and_rd_msrs);
+ test_oob_gp_counter_setup(vcpu, eax_gp_num, perf_cap);
+ run_vcpu(vcpu, msg, first_uc_arg_equals, (void *)GP_VECTOR);
+ free_vcpu(vcpu);
+}
+
+static void intel_test_counters_num(void)
+{
+ uint8_t kvm_gp_num = kvm_gp_ctrs_num();
+ unsigned int i;
+
+ TEST_REQUIRE(kvm_gp_num > 2);
+
+ for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
+ /*
+ * For compatibility reasons, KVM does not emulate #GP
+ * when MSR_P6_PERFCTR[0|1] is not present, but it doesn't
+ * affect checking the presence of MSR_IA32_PMCx with #GP.
+ */
+ if (perf_caps[i] & PMU_CAP_FW_WRITES)
+ test_oob_gp_counter(0, perf_caps[i]);
+
+ test_oob_gp_counter(2, perf_caps[i]);
+ test_oob_gp_counter(kvm_gp_num, perf_caps[i]);
+
+ /* KVM doesn't emulate more counters than it can support. */
+ test_oob_gp_counter(kvm_gp_num + 1, perf_caps[i]);
+ }
+}
+
static void intel_test_arch_events(void)
{
uint8_t idx;
@@ -213,6 +314,7 @@ static void intel_test_arch_events(void)
static void intel_test_pmu_cpuid(void)
{
intel_test_arch_events();
+ intel_test_counters_num();
}
int main(int argc, char *argv[])