diff mbox series

[v3,4/5] selftests: kvm/x86: Add testing for inverted masked events

Message ID 20220709011726.1006267-5-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: x86/pmu: Introduce and test masked events | expand

Commit Message

Aaron Lewis July 9, 2022, 1:17 a.m. UTC
Add four tests for inverted masked events.

In the first two tests the guest event matches the masked event *and*
the inverted masked event.  That causes the guest event to not be
filtered.  Matching the inverted event negates the original match.  As
a result it behaves as if it was not filtered.  So, for an allow list
the guest event will not be programmed in the PMU, and for a deny list
it will.

In the second two tests the opposite happens.  The inverted masked
events do not match the guest event, but it does match the masked
event.  As a result they are both filtered.  For the allow list the
guest event is programmed in the PMU.  For the deny list it is not.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 .../kvm/x86_64/pmu_event_filter_test.c        | 69 +++++++++++++++++++
 1 file changed, 69 insertions(+)
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 29abe9c88f4f..95beec32d9eb 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -542,6 +542,74 @@  static void test_masked_events(struct kvm_vm *vm)
 	run_masked_events_tests(vm, masked_events, nmasked_events, event);
 }
 
+static uint64_t run_inverted_masked_events_test(struct kvm_vm *vm,
+						uint64_t masked_events[],
+						const int nmasked_events,
+						uint32_t action)
+{
+	struct kvm_pmu_event_filter *f;
+	uint64_t count;
+
+	f = create_pmu_event_filter(masked_events, nmasked_events, action,
+				    KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+	count = test_with_filter(vm, f);
+	free(f);
+
+	return count;
+}
+
+static void test_inverted_masked_events(struct kvm_vm *vm)
+{
+	uint64_t masked_events[] = {
+		/*
+		 * Force the guest's unit mask to match the inverted masked
+		 * event by setting the match to the only valid unit mask
+		 * possible (0).
+		 * ie: guest_unit_mask & 0xff == 0.
+		 */
+		ENCODE_MASKED_EVENT(AMD_ZEN_BR_RETIRED, ~0x00, 0, 1),
+		ENCODE_MASKED_EVENT(INTEL_BR_RETIRED, ~0x00, 0, 1),
+		/*
+		 * Set the masked events to match any unit mask.
+		 * ie: guest_unit_mask & 0 == 0.
+		 */
+		ENCODE_MASKED_EVENT(AMD_ZEN_BR_RETIRED, 0, 0, 0),
+		ENCODE_MASKED_EVENT(INTEL_BR_RETIRED, 0, 0, 0),
+	};
+	const int nmasked_events = ARRAY_SIZE(masked_events);
+	uint64_t count;
+
+	count = run_inverted_masked_events_test(vm, masked_events,
+						nmasked_events,
+						KVM_PMU_EVENT_ALLOW);
+	expect_failure(count);
+
+	count = run_inverted_masked_events_test(vm, masked_events,
+						nmasked_events,
+						KVM_PMU_EVENT_DENY);
+	expect_success(count);
+
+	/*
+	 * Force the guest's unit mask to *not* match the inverted masked
+	 * event by setting the match to an invalid unit mask (1).
+	 * ie: guest_unit_mask & 0xff == 1.
+	 */
+	masked_events[0] =
+		ENCODE_MASKED_EVENT(AMD_ZEN_BR_RETIRED, ~0x00, 1, 1);
+	masked_events[1] =
+		ENCODE_MASKED_EVENT(INTEL_BR_RETIRED, ~0x00, 1, 1);
+
+	count = run_inverted_masked_events_test(vm, masked_events,
+						nmasked_events,
+						KVM_PMU_EVENT_ALLOW);
+	expect_success(count);
+
+	count = run_inverted_masked_events_test(vm, masked_events,
+						nmasked_events,
+						KVM_PMU_EVENT_DENY);
+	expect_failure(count);
+}
+
 int main(int argc, char *argv[])
 {
 	void (*guest_code)(void) = NULL;
@@ -587,6 +655,7 @@  int main(int argc, char *argv[])
 	test_not_member_allow_list(vm);
 
 	test_masked_events(vm);
+	test_inverted_masked_events(vm);
 
 	kvm_vm_free(vm);