diff mbox series

[kvm-unit-tests,3/3] x86: Add test coverage for the routing logic when exceptions occur in L2

Message ID 20211209182624.2316453-4-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Add additional testing for routing L2 exceptions | expand

Commit Message

Aaron Lewis Dec. 9, 2021, 6:26 p.m. UTC
Add vmx_exception_test to ensure that exceptions that occur in L2 are
forwarded to the correct place.  When an exception occurs in L2, L1 or
L0 may want to get involved.  Test that the exception is routed to the
where it is supposed to go.

The exceptions that are tested are: #GP, #UD, #DE, #DB, #BP, and #AC.
For each of these exceptions there are two main goals:
 1) Test that the exception is handled by L2.
 2) Test that the exception is handled by L1.
To test that this happens, each exception is triggered twice; once with
just an L2 exception handler registered, and again with both an L2
exception handler registered and L1's exception bitmap set.  The
expectation is that the first exception will be handled by L2 and the
second by L1.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 x86/unittests.cfg |   7 ++
 x86/vmx_tests.c   | 211 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 218 insertions(+)

Comments

Sean Christopherson Dec. 9, 2021, 9:15 p.m. UTC | #1
On Thu, Dec 09, 2021, Aaron Lewis wrote:
> +static void vmx_exception_test_guest(void)
> +{
> +	handler old_gp = handle_exception(GP_VECTOR, vmx_exception_handler_gp);
> +	handler old_ud = handle_exception(UD_VECTOR, vmx_exception_handler_ud);
> +	handler old_de = handle_exception(DE_VECTOR, vmx_exception_handler_de);
> +	handler old_db = handle_exception(DB_VECTOR, vmx_exception_handler_db);
> +	handler old_bp = handle_exception(BP_VECTOR, vmx_exception_handler_bp);
> +	bool raised_vector = false;
> +	u64 old_cr0, old_rflags;
> +
> +	asm volatile (
> +		/* Return to L1 before starting the tests. */
> +		"vmcall\n\t"
> +
> +		/* #GP handled by L2*/
> +		"mov %[val], %%cr0\n\t"
> +		"vmx_exception_test_skip_gp:\n\t"
> +		"vmcall\n\t"
> +
> +		/* #GP handled by L1 */
> +		"mov %[val], %%cr0\n\t"

I would strongly prefer each of these be a standalone subtest in the sense that
each test starts from a clean state, configures the environment as need, then
triggers the exception and checks the results.  I absolutely detest the tests
that string a bunch of scenarios together, they inevitably accrue subtle dependencies
between scenarios and are generally difficult/annoying to debug.

Having a gigantic asm blob is also unnecessary.  #GP can be generated with a
non-canonical access purely in C.  Ditto for #AC though that may or may not be
more readable.  #DE probably requires assembly to avoid compiler intervention.
#UD and #BP should be short and sweet.  E.g.

It should be fairly straightforward to create a framework to handle running each
test, a la the vmx_tests array.  E.g. something like the below (completely untested).
This way there's no need to skip instructions, thus no need for a exposing a bunch
of labels.  Each test is isolated, there's no code pairing between L0 and L1/L2, and
adding new tests or running a specific test is trivial.

static u8 vmx_exception_test_vector;

static void vmx_exception_handler(struct ex_regs *regs)
{
        report(regs->vector == vmx_exception_test_vector,
               "Handling %s in L2's exception handler",
               exception_mnemonic(vmx_exception_test_vector));
}

static void vmx_gp_test_guest(void)
{
	*(volatile u64 *)NONCANONICAL = 0;
}

static void handle_exception_in_l2(u8 vector)
{
	handler old_handler = handle_exception(vector, vmx_exception_handler);
	u32 old_eb = vmcs_read(EXC_BITMAP);

	vmx_exception_test_vector = vector;

	vmcs_write(EXC_BITMAP, old_eb & ~(1u << vector));

	enter_guest();
	report(vmcs_read(EXI_REASON) == VMX_VMCALL,
	       "%s handled by L2", exception_mnemonic(vector));

	vmcs_write(EXC_BITMAP, old_eb);
	handle_exception(old_handler);
}

static void handle_exception_in_l1(u32 vector, const char *vector_name)
{
	u32 old_eb = vmcs_read(EXC_BITMAP);

	vmx_exception_test_vector = 0xff;

	vmcs_write(EXC_BITMAP, old_eb | (1u << vector));

	enter_guest();
	report((vmcs_read(EXI_REASON) == VMX_EXC_NMI) &&
	       ((vmcs_read(EXI_INTR_INFO) & 0xff) == vector),
	       "%s handled by L1", exception_mnemonic(vector));

	vmcs_write(EXC_BITMAP, old_eb);
}

struct vmx_exception_test {
	u8 vector;
	void (*guest_code)(void);
}

struct vmx_exception_test vmx_exception_tests[] {
	{ GP_VECTOR, vmx_gp_test_guest },
};

static void vmx_exception_test(void)
{
	struct vmx_exception_test *t;
	handler old_ex;

	enter_guest();
	assert_exit_reason(VMX_VMCALL);
	skip_exit_insn();

	for (i = 0; i < ARRAY_SIZE(vmx_exception_tests); i++) {
		t = &vmx_exception_tests[i];

		test_set_guest(t->guest_code);

		handle_exception_in_l2(t->vector);
		handle_exception_in_l1(t->vector);
	}
}
Aaron Lewis Dec. 14, 2021, 1:19 a.m. UTC | #2
> Having a gigantic asm blob is also unnecessary.  #GP can be generated with a
> non-canonical access purely in C.  Ditto for #AC though that may or may not be
> more readable.  #DE probably requires assembly to avoid compiler intervention.

For #AC I'd prefer to leave this in ASM.  To get this to work in C I
had to trick the compiler to not optimize the code away and when I was
playing with it in compiler explorer clang seemed to outsmart my
unaligning access with an aligned one which defeated the purpose.  It
seems more reliable for what I want to leave it in ASM.

> #UD and #BP should be short and sweet.  E.g.
>
> It should be fairly straightforward to create a framework to handle running each
> test, a la the vmx_tests array.  E.g. something like the below (completely untested).
> This way there's no need to skip instructions, thus no need for a exposing a bunch
diff mbox series

Patch

diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 9fcdcae..0353b69 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -368,6 +368,13 @@  arch = x86_64
 groups = vmx nested_exception
 check = /sys/module/kvm_intel/parameters/allow_smaller_maxphyaddr=Y
 
+[vmx_exception_test]
+file = vmx.flat
+extra_params = -cpu max,+vmx -append vmx_exception_test
+arch = x86_64
+groups = vmx nested_exception
+timeout = 10
+
 [debug]
 file = debug.flat
 arch = x86_64
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 3d57ed6..4aafed9 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -21,6 +21,7 @@ 
 #include "smp.h"
 #include "delay.h"
 #include "access.h"
+#include "x86/usermode.h"
 
 #define VPID_CAP_INVVPID_TYPES_SHIFT 40
 
@@ -10701,6 +10702,215 @@  static void vmx_pf_vpid_test(void)
 	__vmx_pf_vpid_test(invalidate_tlb_new_vpid, 1);
 }
 
+extern char vmx_exception_test_skip_gp;
+extern char vmx_exception_test_skip_ud;
+extern char vmx_exception_test_skip_ud_from_l1;
+extern char vmx_exception_test_skip_de;
+extern char vmx_exception_test_skip_bp;
+
+static void set_exception_bitmap(u32 vector)
+{
+	vmcs_write(EXC_BITMAP, vmcs_read(EXC_BITMAP) | (1u << vector));
+}
+
+static void vmx_exception_handler_gp(struct ex_regs *regs)
+{
+	report(regs->vector == GP_VECTOR,
+	       "Handling #GP in L2's exception handler.");
+	regs->rip = (uintptr_t)(&vmx_exception_test_skip_gp);
+}
+
+static void vmx_exception_handler_ud(struct ex_regs *regs)
+{
+	report(regs->vector == UD_VECTOR,
+	       "Handling #UD in L2's exception handler.");
+	regs->rip = (uintptr_t)(&vmx_exception_test_skip_ud);
+}
+
+static void vmx_exception_handler_de(struct ex_regs *regs)
+{
+	report(regs->vector == DE_VECTOR,
+	       "Handling #DE in L2's exception handler.");
+	regs->rip = (uintptr_t)(&vmx_exception_test_skip_de);
+}
+
+static void vmx_exception_handler_db(struct ex_regs *regs)
+{
+	report(regs->vector == DB_VECTOR,
+	       "Handling #DB in L2's exception handler.");
+	regs->rflags &= ~X86_EFLAGS_TF;
+}
+
+static void vmx_exception_handler_bp(struct ex_regs *regs)
+{
+	report(regs->vector == BP_VECTOR,
+	       "Handling #BP in L2's exception handler.");
+	regs->rip = (uintptr_t)(&vmx_exception_test_skip_bp);
+}
+
+static uint64_t usermode_callback(void)
+{
+	/* Trigger an #AC by writing 8 bytes to a 4-byte aligned address. */
+	asm volatile(
+		"sub $0x10, %rsp\n\t"
+		"movq $0, 0x4(%rsp)\n\t"
+		"add $0x10, %rsp\n\t");
+
+	return 0;
+}
+
+static void vmx_exception_test_guest(void)
+{
+	handler old_gp = handle_exception(GP_VECTOR, vmx_exception_handler_gp);
+	handler old_ud = handle_exception(UD_VECTOR, vmx_exception_handler_ud);
+	handler old_de = handle_exception(DE_VECTOR, vmx_exception_handler_de);
+	handler old_db = handle_exception(DB_VECTOR, vmx_exception_handler_db);
+	handler old_bp = handle_exception(BP_VECTOR, vmx_exception_handler_bp);
+	bool raised_vector = false;
+	u64 old_cr0, old_rflags;
+
+	asm volatile (
+		/* Return to L1 before starting the tests. */
+		"vmcall\n\t"
+
+		/* #GP handled by L2*/
+		"mov %[val], %%cr0\n\t"
+		"vmx_exception_test_skip_gp:\n\t"
+		"vmcall\n\t"
+
+		/* #GP handled by L1 */
+		"mov %[val], %%cr0\n\t"
+
+	 	/* #UD handled by L2. */
+		"ud2\n\t"
+		"vmx_exception_test_skip_ud:\n\t"
+		"vmcall\n\t"
+
+		/* #UD handled by L1. */
+		"ud2\n\t"
+		"vmx_exception_test_skip_ud_from_l1:\n\t"
+
+		/* #DE handled by L2. */
+		"xor %%eax, %%eax\n\t"
+		"xor %%ebx, %%ebx\n\t"
+		"xor %%edx, %%edx\n\t"
+		"idiv %%ebx\n\t"
+		"vmx_exception_test_skip_de:\n\t"
+		"vmcall\n\t"
+
+		/* #DE handled by L1. */
+	 	"xor %%eax, %%eax\n\t"
+	 	"xor %%ebx, %%ebx\n\t"
+	 	"xor %%edx, %%edx\n\t"
+	 	"idiv %%ebx\n\t"
+
+		/* #DB handled by L2. */
+		"nop\n\t"
+		"vmcall\n\t"
+
+		/* #DB handled by L1. */
+		"nop\n\t"
+
+		/* #BP handled by L2. */
+		"int3\n\t"
+		"vmx_exception_test_skip_bp:\n\t"
+		"vmcall\n\t"
+
+		/* #BP handled by L1. */
+		"int3\n\t"
+		:: [val]"r"(1ul << 32) : "eax", "ebx", "edx");
+
+	/* #AC handled by L2. */
+	old_cr0  = read_cr0();
+	old_rflags = read_rflags();
+	write_cr0(old_cr0 | X86_CR0_AM);
+	write_rflags(old_rflags | X86_EFLAGS_AC);
+
+	run_in_user(usermode_callback, AC_VECTOR, 0, 0, 0, 0, &raised_vector);
+	report(raised_vector, "#AC vector raised from usermode in L2");
+	vmcall();
+
+	/* #AC handled by L1. */
+	run_in_user(usermode_callback, AC_VECTOR, 0, 0, 0, 0, &raised_vector);
+	report(!raised_vector,
+	       "#AC vector not raised from usermode in L2 (L1 handled it)");
+
+	write_cr0(old_cr0);
+	write_rflags(old_rflags);
+
+	/* Clean up */
+	handle_exception(GP_VECTOR, old_gp);
+	handle_exception(UD_VECTOR, old_ud);
+	handle_exception(DE_VECTOR, old_de);
+	handle_exception(DB_VECTOR, old_db);
+	handle_exception(BP_VECTOR, old_bp);
+}
+
+static void handle_exception_in_l2(const char *msg)
+{
+	enter_guest();
+	assert_exit_reason(VMX_VMCALL);
+	report(vmcs_read(EXI_REASON) == VMX_VMCALL, msg);
+	skip_exit_insn();
+}
+
+static void handle_exception_in_l1(u32 vector, const char *msg)
+{
+	set_exception_bitmap(vector);
+	enter_guest();
+	assert_exit_reason(VMX_EXC_NMI);
+	report((vmcs_read(EXI_REASON) == VMX_EXC_NMI) &&
+	       ((vmcs_read(EXI_INTR_INFO) & 0xff) == vector),
+	       msg);
+	skip_exit_insn();
+}
+
+static void vmx_exception_test(void)
+{
+	u32 old_eb = vmcs_read(EXC_BITMAP);
+
+	test_set_guest(vmx_exception_test_guest);
+
+	/*
+	 * Start L2 so it can register it's exception handlers before the test
+	 * starts.
+	 */
+	enter_guest();
+	assert_exit_reason(VMX_VMCALL);
+	skip_exit_insn();
+
+	/* Run tests. */
+	handle_exception_in_l2("#GP handled by L2");
+	handle_exception_in_l1(GP_VECTOR, "#GP hanlded by L1");
+
+	handle_exception_in_l2("#UD handled by L2");
+	handle_exception_in_l1(UD_VECTOR, "#UD hanlded by L1");
+	vmcs_write(GUEST_RIP, (u64)&vmx_exception_test_skip_ud_from_l1);
+
+	handle_exception_in_l2("#DE handled by L2");
+	handle_exception_in_l1(DE_VECTOR, "#DE hanlded by L1");
+
+	enable_tf();
+	handle_exception_in_l2("#DB handled by L2");
+	report((vmcs_read(GUEST_RFLAGS) & X86_EFLAGS_TF) == 0,
+	       "X86_EFLAGS_TF disabled in L2");
+	enable_tf();
+	handle_exception_in_l1(DB_VECTOR, "#DB hanlded by L1");
+	disable_tf();
+
+	handle_exception_in_l2("#BP handled by L2");
+	handle_exception_in_l1(BP_VECTOR, "#BP hanlded by L1");
+
+	handle_exception_in_l2("#AC handled by L2");
+	handle_exception_in_l1(AC_VECTOR, "#AC hanlded by L1");
+
+	/* Complete running the guest. */
+	enter_guest();
+	assert_exit_reason(VMX_VMCALL);
+
+	vmcs_write(EXC_BITMAP, old_eb);
+}
+
 #define TEST(name) { #name, .v2 = name }
 
 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
@@ -10810,5 +11020,6 @@  struct vmx_test vmx_tests[] = {
 	TEST(vmx_pf_no_vpid_test),
 	TEST(vmx_pf_invvpid_test),
 	TEST(vmx_pf_vpid_test),
+	TEST(vmx_exception_test),
 	{ NULL, NULL, NULL, NULL, NULL, {0} },
 };