diff mbox series

[2/3,v3] nVMX: Test Selector and Base Address fields of Guest Segment Registers on vmentry of nested guests

Message ID 20200921081027.23047-3-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series kvm-unit-test: nVMX: Test Selector and Base Address fields of Guest Segment registers | expand

Commit Message

Krish Sadhukhan Sept. 21, 2020, 8:10 a.m. UTC
According to section "Checks on Guest Segment Registers" in Intel SDM vol 3C,
the following checks are performed on the Guest Segment Registers on vmentry
of nested guests:

    Selector fields:
	— TR. The TI flag (bit 2) must be 0.
	— LDTR. If LDTR is usable, the TI flag (bit 2) must be 0.
	— SS. If the guest will not be virtual-8086 and the "unrestricted
	  guest" VM-execution control is 0, the RPL (bits 1:0) must equal
	  the RPL of the selector field for CS.1

    Base-address fields:
	— CS, SS, DS, ES, FS, GS. If the guest will be virtual-8086, the
	  address must be the selector field shifted left 4 bits (multiplied
	  by 16).
	— The following checks are performed on processors that support Intel
	  64 architecture:
		TR, FS, GS. The address must be canonical.
		LDTR. If LDTR is usable, the address must be canonical.
		CS. Bits 63:32 of the address must be zero.
		SS, DS, ES. If the register is usable, bits 63:32 of the
		address must be zero.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 lib/x86/processor.h |   1 +
 x86/vmx_tests.c     | 200 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 201 insertions(+)
diff mbox series

Patch

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 74a2498..c2c487c 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -63,6 +63,7 @@ 
 #define X86_EFLAGS_OF    0x00000800
 #define X86_EFLAGS_IOPL  0x00003000
 #define X86_EFLAGS_NT    0x00004000
+#define X86_EFLAGS_VM    0x00020000
 #define X86_EFLAGS_AC    0x00040000
 
 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 22f0c7b..2b6e47b 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -7980,6 +7980,203 @@  static void test_load_guest_bndcfgs(void)
 	vmcs_write(GUEST_BNDCFGS, bndcfgs_saved);
 }
 
+#define	GUEST_SEG_UNUSABLE_MASK	(1u << 16)
+#define	GUEST_SEG_SEL_TI_MASK	(1u << 2)
+#define	TEST_SEGMENT_SEL(xfail, sel, sel_name, val)			\
+	vmcs_write(sel, val);						\
+	test_guest_state("Test Guest Segment Selector",	xfail, val,	\
+			 sel_name);
+
+/*
+ * The following checks are done on the Selector field of the Guest Segment
+ * Registers:
+ *    — TR. The TI flag (bit 2) must be 0.
+ *    — LDTR. If LDTR is usable, the TI flag (bit 2) must be 0.
+ *    — SS. If the guest will not be virtual-8086 and the "unrestricted
+ *	guest" VM-execution control is 0, the RPL (bits 1:0) must equal
+ *	the RPL of the selector field for CS.
+ *
+ *  [Intel SDM]
+ */
+static void test_guest_segment_sel_fields(void)
+{
+	u16 sel_saved;
+	u32 ar_saved;
+	u32 cpu_ctrl0_saved;
+	u32 cpu_ctrl1_saved;
+	u16 cs_rpl_bits;
+
+	/*
+	 * Test for GUEST_SEL_TR
+	 */
+	sel_saved = vmcs_read(GUEST_SEL_TR);
+	TEST_SEGMENT_SEL(true, GUEST_SEL_TR, "GUEST_SEL_TR",
+			 sel_saved | GUEST_SEG_SEL_TI_MASK);
+	vmcs_write(GUEST_SEL_TR, sel_saved);
+
+	/*
+	 * Test for GUEST_SEL_LDTR
+	 */
+	sel_saved = vmcs_read(GUEST_SEL_LDTR);
+	ar_saved = vmcs_read(GUEST_AR_LDTR);
+	/* LDTR is set unusable */
+	vmcs_write(GUEST_AR_LDTR, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_SEL(false, GUEST_SEL_LDTR, "GUEST_SEL_LDTR",
+			 sel_saved | GUEST_SEG_SEL_TI_MASK);
+	TEST_SEGMENT_SEL(false, GUEST_SEL_LDTR, "GUEST_SEL_LDTR",
+			 sel_saved & ~GUEST_SEG_SEL_TI_MASK);
+	/* LDTR is set usable */
+	vmcs_write(GUEST_AR_LDTR, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_SEL(true, GUEST_SEL_LDTR, "GUEST_SEL_LDTR",
+			 sel_saved | GUEST_SEG_SEL_TI_MASK);
+
+	TEST_SEGMENT_SEL(false, GUEST_SEL_LDTR, "GUEST_SEL_LDTR",
+			 sel_saved & ~GUEST_SEG_SEL_TI_MASK);
+
+	vmcs_write(GUEST_AR_LDTR, ar_saved);
+	vmcs_write(GUEST_SEL_LDTR, sel_saved);
+
+	/*
+	 * Test for GUEST_SEL_SS
+	 */
+	cpu_ctrl0_saved = vmcs_read(CPU_EXEC_CTRL0);
+	cpu_ctrl1_saved = vmcs_read(CPU_EXEC_CTRL1);
+	ar_saved = vmcs_read(GUEST_AR_SS);
+	/* Turn off "unrestricted guest" vm-execution control */
+	vmcs_write(CPU_EXEC_CTRL1, cpu_ctrl1_saved & ~CPU_URG);
+	cs_rpl_bits = vmcs_read(GUEST_SEL_CS) & 0x3;
+	sel_saved = vmcs_read(GUEST_SEL_SS);
+	TEST_SEGMENT_SEL(true, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3)));
+	TEST_SEGMENT_SEL(false, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3)));
+	/* Make SS usable if it's unusable or vice-versa */
+	if (ar_saved & GUEST_SEG_UNUSABLE_MASK)
+		vmcs_write(GUEST_AR_SS, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	else
+		vmcs_write(GUEST_AR_SS, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_SEL(true, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3)));
+	TEST_SEGMENT_SEL(false, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3)));
+
+	/* Turn on "unrestricted guest" vm-execution control */
+	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrl0_saved | CPU_SECONDARY);
+	vmcs_write(CPU_EXEC_CTRL1, cpu_ctrl1_saved | CPU_URG);
+	/* EPT and EPTP must be setup when "unrestricted guest" is on */
+	setup_ept(false);
+	TEST_SEGMENT_SEL(false, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3)));
+	TEST_SEGMENT_SEL(false, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3)));
+	/* Make SS usable if it's unusable or vice-versa */
+	if (vmcs_read(GUEST_AR_SS) & GUEST_SEG_UNUSABLE_MASK)
+		vmcs_write(GUEST_AR_SS, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	else
+		vmcs_write(GUEST_AR_SS, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_SEL(false, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3)));
+	TEST_SEGMENT_SEL(false, GUEST_SEL_SS, "GUEST_SEL_SS",
+				 ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3)));
+
+	vmcs_write(GUEST_AR_SS, ar_saved);
+	vmcs_write(GUEST_SEL_SS, sel_saved);
+	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrl0_saved);
+	vmcs_write(CPU_EXEC_CTRL1, cpu_ctrl1_saved);
+}
+
+#define	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(xfail, seg_base, seg_base_name)\
+	addr_saved = vmcs_read(seg_base);				\
+	for (i = 32; i < 63; i = i + 4) {				\
+		addr = addr_saved | 1ull << i;				\
+		vmcs_write(seg_base, addr);				\
+		test_guest_state(seg_base_name,	xfail, addr,		\
+				seg_base_name);				\
+	}								\
+	vmcs_write(seg_base, addr_saved);
+
+#define	TEST_SEGMENT_BASE_ADDR_CANONICAL(xfail, seg_base, seg_base_name)\
+	addr_saved = vmcs_read(seg_base);				\
+	vmcs_write(seg_base, NONCANONICAL);				\
+	test_guest_state(seg_base_name,	xfail, NONCANONICAL,		\
+			seg_base_name);					\
+	vmcs_write(seg_base, addr_saved);
+
+/*
+ * The following checks are done on the Base Address field of the Guest
+ * Segment Registers on processors that support Intel 64 architecture:
+ *    - TR, FS, GS : The address must be canonical.
+ *    - LDTR : If LDTR is usable, the address must be canonical.
+ *    - CS : Bits 63:32 of the address must be zero.
+ *    - SS, DS, ES : If the register is usable, bits 63:32 of the address
+ *	must be zero.
+ *
+ *  [Intel SDM]
+ */
+static void test_guest_segment_base_addr_fields(void)
+{
+	u64 addr_saved;
+	u64 addr;
+	u32 ar_saved;
+	int i;
+
+	/*
+	 * The address of TR, FS, GS and LDTR must be canonical.
+	 */
+	TEST_SEGMENT_BASE_ADDR_CANONICAL(true, GUEST_BASE_TR, "GUEST_BASE_TR");
+	TEST_SEGMENT_BASE_ADDR_CANONICAL(true, GUEST_BASE_FS, "GUEST_BASE_FS");
+	TEST_SEGMENT_BASE_ADDR_CANONICAL(true, GUEST_BASE_GS, "GUEST_BASE_GS");
+	ar_saved = vmcs_read(GUEST_AR_LDTR);
+	/* Make LDTR unusable */
+	vmcs_write(GUEST_AR_LDTR, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_CANONICAL(false, GUEST_BASE_LDTR,
+					"GUEST_BASE_LDTR");
+	/* Make LDTR usable */
+	vmcs_write(GUEST_AR_LDTR, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_CANONICAL(true, GUEST_BASE_LDTR,
+					"GUEST_BASE_LDTR");
+
+	vmcs_write(GUEST_AR_LDTR, ar_saved);
+
+	/*
+	 * Bits 63:32 in CS, SS, DS and ES base address must be zero
+	 */
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(true, GUEST_BASE_CS,
+					 "GUEST_BASE_CS");
+	ar_saved = vmcs_read(GUEST_AR_SS);
+	/* Make SS unusable */
+	vmcs_write(GUEST_AR_SS, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(false, GUEST_BASE_SS,
+					 "GUEST_BASE_SS");
+	/* Make SS usable */
+	vmcs_write(GUEST_AR_SS, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(true, GUEST_BASE_SS,
+					 "GUEST_BASE_SS");
+	vmcs_write(GUEST_AR_SS, ar_saved);
+
+	ar_saved = vmcs_read(GUEST_AR_DS);
+	/* Make DS unusable */
+	vmcs_write(GUEST_AR_DS, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(false, GUEST_BASE_DS,
+					 "GUEST_BASE_DS");
+	/* Make DS usable */
+	vmcs_write(GUEST_AR_DS, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(true, GUEST_BASE_DS,
+					 "GUEST_BASE_DS");
+	vmcs_write(GUEST_AR_DS, ar_saved);
+
+	ar_saved = vmcs_read(GUEST_AR_ES);
+	/* Make ES unusable */
+	vmcs_write(GUEST_AR_ES, ar_saved | GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(false, GUEST_BASE_ES,
+					 "GUEST_BASE_ES");
+	/* Make ES usable */
+	vmcs_write(GUEST_AR_ES, ar_saved & ~GUEST_SEG_UNUSABLE_MASK);
+	TEST_SEGMENT_BASE_ADDR_UPPER_BITS(true, GUEST_BASE_ES,
+					 "GUEST_BASE_ES");
+	vmcs_write(GUEST_AR_ES, ar_saved);
+}
+
 /*
  * Check that the virtual CPU checks the VMX Guest State Area as
  * documented in the Intel SDM.
@@ -8002,6 +8199,9 @@  static void vmx_guest_state_area_test(void)
 	test_load_guest_perf_global_ctrl();
 	test_load_guest_bndcfgs();
 
+	test_guest_segment_sel_fields();
+	test_guest_segment_base_addr_fields();
+
 	test_canonical(GUEST_BASE_GDTR, "GUEST_BASE_GDTR", false);
 	test_canonical(GUEST_BASE_IDTR, "GUEST_BASE_IDTR", false);