diff mbox series

[2/2,kvm-unit-test,nVMX] : Check "load IA32_PAT" VM-entry control on vmentry of nested guests

Message ID 20190418213941.20977-3-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series [1/2,kvm-unit-test,nVMX] : Move the functionality of enter_guest() to __enter_guest() and make the former a wrapper of the latter | expand

Commit Message

Krish Sadhukhan April 18, 2019, 9:39 p.m. UTC
..to verify KVM performs the appropriate consistency checks for loading
IA32_PAT as part of running a nested guest.

According to section "Checking and Loading Guest State" in Intel SDM
vol 3C, the following check is performed on vmentry:

    If the "load IA32_PAT" VM-entry control is 1, the value of the field
    for the IA32_PAT MSR must be one that could be written by WRMSR
    without fault at CPL 0. Specifically, each of the 8 bytes in the
    field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
    6 (WB), or 7 (UC-).

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
---
 x86/vmx_tests.c | 140 +++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 121 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 04b1aee..8186b4f 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -4995,10 +4995,37 @@  static void test_sysenter_field(u32 field, const char *name)
 	vmcs_write(field, addr_saved);
 }
 
+static void guest_pat_main(void)
+{
+	while (1) {
+		if (vmx_get_test_stage() != 2)
+			vmcall();
+		else
+			break;
+	}
+
+	asm volatile("fnop");
+}
+
+static void report_guest_pat_test(const char *test, u32 xreason, u64 guest_pat)
+{
+	u32 reason = vmcs_read(EXI_REASON);
+	u64 guest_rip;
+	u32 insn_len;
+
+	report("%s, GUEST_PAT %lx", reason == xreason, test, guest_pat);
+
+	guest_rip = vmcs_read(GUEST_RIP);
+	insn_len = vmcs_read(EXI_INST_LEN);
+	if (! (reason & 0x80000021))
+		vmcs_write(GUEST_RIP, guest_rip + insn_len);
+}
+
 /*
- * Since a PAT value higher than 8 will yield the same test result as that
- * of 8, we want to confine our tests only up to 8 in order to reduce
- * redundancy of tests and to avoid too many vmentries.
+ * PAT values higher than 8 are uninteresting since they're likely lumped
+ * in with "8". We only test values above 8 one bit at a time,
+ * in order to reduce the number of VM-Entries and keep the runtime
+ * reasonable.
  */
 #define	PAT_VAL_LIMIT	8
 
@@ -5010,34 +5037,77 @@  static void test_pat(u32 fld, const char * fld_name, u32 ctrl_fld, u64 ctrl_bit)
 	u32 j;
 	int error;
 
-	vmcs_write(ctrl_fld, ctrl_saved & ~ctrl_bit);
-	for (i = 0; i <= PAT_VAL_LIMIT; i++) {
+	vmcs_clear_bits(ctrl_fld, ctrl_bit);
+	if (fld == GUEST_PAT) {
+		vmx_set_test_stage(1);
+		test_set_guest(guest_pat_main);
+	}
+
+	for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) {
 		/* Test PAT0..PAT7 fields */
-		for (j = 0; j < 8; j++) {
+		for (j = 0; j < (i ? 8 : 1); j++) {
 			val = i << j * 8;
 			vmcs_write(fld, val);
-			report_prefix_pushf("%s %lx", fld_name, val);
-			test_vmx_vmlaunch(0, false);
-			report_prefix_pop();
+			if (fld == HOST_PAT) {
+				report_prefix_pushf("%s %lx", fld_name, val);
+				test_vmx_vmlaunch(0, false);
+				report_prefix_pop();
+
+			} else {	// GUEST_PAT
+				__enter_guest(ABORT_ON_EARLY_VMENTRY_FAIL);
+				report_guest_pat_test("ENT_LOAD_PAT enabled",
+						       VMX_VMCALL, val);
+			}
 		}
 	}
 
-	vmcs_write(ctrl_fld, ctrl_saved | ctrl_bit);
-	for (i = 0; i <= PAT_VAL_LIMIT; i++) {
+	vmcs_set_bits(ctrl_fld, ctrl_bit);
+	for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) {
 		/* Test PAT0..PAT7 fields */
-		for (j = 0; j < 8; j++) {
+		for (j = 0; j < (i ? 8 : 1); j++) {
 			val = i << j * 8;
 			vmcs_write(fld, val);
-			report_prefix_pushf("%s %lx", fld_name, val);
-			if (i == 0x2 || i == 0x3 || i >= 0x8)
-				error = VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
-			else
-				error = 0;
-			test_vmx_vmlaunch(error, false);
-			report_prefix_pop();
+
+			if (fld == HOST_PAT) {
+				report_prefix_pushf("%s %lx", fld_name, val);
+				if (i == 0x2 || i == 0x3 || i == 0x8)
+					error =
+					VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+				else
+					error = 0;
+
+				test_vmx_vmlaunch(error, false);
+				report_prefix_pop();
+
+			} else {	// GUEST_PAT
+				if (i == 0x2 || i == 0x3 || i == 0x8) {
+					__enter_guest(ABORT_ON_EARLY_VMENTRY_FAIL);
+					report_guest_pat_test("ENT_LOAD_PAT "
+								"enabled",
+							     VMX_FAIL_STATE |
+							     VMX_ENTRY_FAILURE,
+							     val);
+				} else {
+					__enter_guest(ABORT_ON_EARLY_VMENTRY_FAIL);
+					report_guest_pat_test("ENT_LOAD_PAT "
+							      "enabled",
+							      VMX_VMCALL,
+							      val);
+				}
+			}
+
 		}
 	}
 
+	if (fld == GUEST_PAT) {
+		/*
+		 * Let the guest finish execution
+		 */
+		vmx_set_test_stage(2);
+		vmcs_write(fld, pat_saved);
+		__enter_guest(ABORT_ON_EARLY_VMENTRY_FAIL);
+	}
+
 	vmcs_write(ctrl_fld, ctrl_saved);
 	vmcs_write(fld, pat_saved);
 }
@@ -5083,6 +5153,37 @@  static void vmx_host_state_area_test(void)
 	test_load_host_pat();
 }
 
+/*
+ *  If the "load IA32_PAT" VM-entry control is 1, the value of the field
+ *  for the IA32_PAT MSR must be one that could be written by WRMSR
+ *  without fault at CPL 0. Specifically, each of the 8 bytes in the
+ *  field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
+ *  6 (WB), or 7 (UC-).
+ *
+ *  [Intel SDM]
+ */
+static void test_load_guest_pat(void)
+{
+	/*
+	 * "load IA32_PAT" VM-entry control
+	 */
+	if (!(ctrl_exit_rev.clr & ENT_LOAD_PAT)) {
+		printf("\"Load-IA32-PAT\" entry control not supported\n");
+		return;
+	}
+
+	test_pat(GUEST_PAT, "GUEST_PAT", ENT_CONTROLS, ENT_LOAD_PAT);
+}
+
+/*
+ * Check that the virtual CPU checks the VMX Guest State Area as
+ * documented in the Intel SDM.
+ */
+static void vmx_guest_state_area_test(void)
+{
+	test_load_guest_pat();
+}
+
 static bool valid_vmcs_for_vmentry(void)
 {
 	struct vmcs *current_vmcs = NULL;
@@ -6505,6 +6606,7 @@  struct vmx_test vmx_tests[] = {
 	/* VM-entry tests */
 	TEST(vmx_controls_test),
 	TEST(vmx_host_state_area_test),
+	TEST(vmx_guest_state_area_test),
 	TEST(vmentry_movss_shadow_test),
 	/* APICv tests */
 	TEST(vmx_eoi_bitmap_ioapic_scan_test),