[kvm-unit-test,nVMX] : Check Host Control Registers on vmentry of L2 guests
diff mbox series

Message ID 20190214171214.3798-2-krish.sadhukhan@oracle.com
State New
Headers show
Series
  • [kvm-unit-test,nVMX] : Check Host Control Registers on vmentry of L2 guests
Related show

Commit Message

Krish Sadhukhan Feb. 14, 2019, 5:12 p.m. UTC
According to section "Checks on VMX Controls" in Intel SDM vol 3C, the
following checks are performed on vmentry of L2 guests:

    - The CR0 field must not set any bit to a value not supported in VMX
      operation.
    - The CR4 field must not set any bit to a value not supported in VMX
      operation.
    - On processors that support Intel 64 architecture, the CR3 field must
      be such that bits 63:52 and bits in the range 51:32 beyond the
      processor’s physical-address width must be 0.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
---
 lib/x86/processor.h |  1 +
 x86/vmx_tests.c     | 77 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 78 insertions(+)

Comments

Sean Christopherson Feb. 14, 2019, 6:18 p.m. UTC | #1
On Thu, Feb 14, 2019 at 12:12:14PM -0500, Krish Sadhukhan wrote:
> According to section "Checks on VMX Controls" in Intel SDM vol 3C, the
> following checks are performed on vmentry of L2 guests:
> 
>     - The CR0 field must not set any bit to a value not supported in VMX
>       operation.
>     - The CR4 field must not set any bit to a value not supported in VMX
>       operation.
>     - On processors that support Intel 64 architecture, the CR3 field must
>       be such that bits 63:52 and bits in the range 51:32 beyond the
>       processor’s physical-address width must be 0.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
> ---
>  lib/x86/processor.h |  1 +
>  x86/vmx_tests.c     | 77 +++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 78 insertions(+)
> 
> diff --git a/lib/x86/processor.h b/lib/x86/processor.h
> index 15237a5..916e67d 100644
> --- a/lib/x86/processor.h
> +++ b/lib/x86/processor.h
> @@ -40,6 +40,7 @@
>  #define X86_CR4_UMIP   0x00000800
>  #define X86_CR4_VMXE   0x00002000
>  #define X86_CR4_PCIDE  0x00020000
> +#define X86_CR4_SMEP   0x00100000
>  #define X86_CR4_SMAP   0x00200000
>  #define X86_CR4_PKE    0x00400000
>  
> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
> index 66a87f6..7af0430 100644
> --- a/x86/vmx_tests.c
> +++ b/x86/vmx_tests.c
> @@ -4995,6 +4995,81 @@ static void test_sysenter_field(u32 field, const char *name)
>  	vmcs_write(field, addr_saved);
>  }
>  
> +static void test_ctl_reg(const char *cr_name, u64 cr, u64 fixed0, u64 fixed1)
> +{
> +	u64 val;
> +	u64 cr_saved = vmcs_read(cr);
> +
> +	val = fixed0 & fixed1;
> +	if (cr == HOST_CR4)
> +		val |= X86_CR4_PAE;
> +	vmcs_write(cr, val);
> +	report_prefix_pushf("%s %lx", cr_name, val);
> +	test_vmx_vmlaunch(0, false);
> +	report_prefix_pop();
> +
> +	val = fixed0 | fixed1;
> +	vmcs_write(cr, val);
> +	report_prefix_pushf("%s %lx", cr_name, val);
> +	test_vmx_vmlaunch(0, false);
> +	report_prefix_pop();
> +
> +	val = fixed0 ^ fixed1;

Technically this isn't guaranteed to work, although practically speaking
it will always hold true, i.e. a processor could theoretically allow all
bits to be flexible.

It's also not a very effective test since it doesn't verify individual
bits, e.g. it only tests that KVM detects an incorrect setting for *any*
fixed bit.

A more robust and effective approach would be to iterate over each
fixed bit to verify setting it incorrectly fails as expected.  The same
can be done for the flexible bits, e.g. test that each bit can be set
or cleared.  This might also make it cleaner to handle PAE/SMAP/SMEP,
e.g. explicitly skip testing them with a comment saying we need to
preserve the host paging context.

> +	vmcs_write(cr, val);
> +	report_prefix_pushf("%s %lx", cr_name, val);
> +	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, false);
> +	report_prefix_pop();
> +
> +	val = fixed0 & (fixed1 << 1);

Similar comment to above, this isn't guaranteed to work.

> +	vmcs_write(cr, val);
> +	report_prefix_pushf("%s %lx", cr_name, val);
> +	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, false);
> +	report_prefix_pop();
> +
> +	vmcs_write(cr, cr_saved);
> +}
> +
> +/*
> + * 1. The CR0 field must not set any bit to a value not supported in VMX
> + *    operation.
> + * 2. The CR4 field must not set any bit to a value not supported in VMX
> + *    operation.
> + * 3. On processors that support Intel 64 architecture, the CR3 field must
> + *    be such that bits 63:52 and bits in the range 51:32 beyond the
> + *    processor’s physical-address width must be 0.
> + *
> + *  [Intel SDM]
> + */
> +static void test_host_ctl_regs(void)
> +{
> +	u64 fixed0, fixed1, cr3, cr3_saved;
> +	int i;
> +
> +	/* Test CR0 */
> +	fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
> +	fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
> +	test_ctl_reg("HOST_CR0", HOST_CR0, fixed0, fixed1);
> +
> +	/* Test CR4 */
> +	fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
> +	fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1) &
> +		 ~(X86_CR4_SMEP | X86_CR4_SMAP);
> +	test_ctl_reg("HOST_CR4", HOST_CR4, fixed0, fixed1);
> +
> +	/* Test CR3 */
> +	cr3_saved = vmcs_read(HOST_CR3);
> +	for (i = cpuid_maxphyaddr(); i < 64; i++) {
> +		cr3 = cr3_saved | (1ul << i);
> +		vmcs_write(HOST_CR3, cr3);
> +		report_prefix_pushf("HOST_CR3 %lx", cr3);
> +		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
> +				  false);
> +		report_prefix_pop();
> +	}
> +
> +	vmcs_write(HOST_CR3, cr3_saved);
> +}
> +
>  /*
>   * Check that the virtual CPU checks the VMX Host State Area as
>   * documented in the Intel SDM.
> @@ -5008,6 +5083,8 @@ static void vmx_host_state_area_test(void)
>  	 */
>  	vmcs_write(GUEST_RFLAGS, 0);
>  
> +	test_host_ctl_regs();
> +
>  	test_sysenter_field(HOST_SYSENTER_ESP, "HOST_SYSENTER_ESP");
>  	test_sysenter_field(HOST_SYSENTER_EIP, "HOST_SYSENTER_EIP");
>  }
> -- 
> 2.17.2
>

Patch
diff mbox series

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 15237a5..916e67d 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -40,6 +40,7 @@ 
 #define X86_CR4_UMIP   0x00000800
 #define X86_CR4_VMXE   0x00002000
 #define X86_CR4_PCIDE  0x00020000
+#define X86_CR4_SMEP   0x00100000
 #define X86_CR4_SMAP   0x00200000
 #define X86_CR4_PKE    0x00400000
 
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 66a87f6..7af0430 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -4995,6 +4995,81 @@  static void test_sysenter_field(u32 field, const char *name)
 	vmcs_write(field, addr_saved);
 }
 
+static void test_ctl_reg(const char *cr_name, u64 cr, u64 fixed0, u64 fixed1)
+{
+	u64 val;
+	u64 cr_saved = vmcs_read(cr);
+
+	val = fixed0 & fixed1;
+	if (cr == HOST_CR4)
+		val |= X86_CR4_PAE;
+	vmcs_write(cr, val);
+	report_prefix_pushf("%s %lx", cr_name, val);
+	test_vmx_vmlaunch(0, false);
+	report_prefix_pop();
+
+	val = fixed0 | fixed1;
+	vmcs_write(cr, val);
+	report_prefix_pushf("%s %lx", cr_name, val);
+	test_vmx_vmlaunch(0, false);
+	report_prefix_pop();
+
+	val = fixed0 ^ fixed1;
+	vmcs_write(cr, val);
+	report_prefix_pushf("%s %lx", cr_name, val);
+	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, false);
+	report_prefix_pop();
+
+	val = fixed0 & (fixed1 << 1);
+	vmcs_write(cr, val);
+	report_prefix_pushf("%s %lx", cr_name, val);
+	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, false);
+	report_prefix_pop();
+
+	vmcs_write(cr, cr_saved);
+}
+
+/*
+ * 1. The CR0 field must not set any bit to a value not supported in VMX
+ *    operation.
+ * 2. The CR4 field must not set any bit to a value not supported in VMX
+ *    operation.
+ * 3. On processors that support Intel 64 architecture, the CR3 field must
+ *    be such that bits 63:52 and bits in the range 51:32 beyond the
+ *    processor’s physical-address width must be 0.
+ *
+ *  [Intel SDM]
+ */
+static void test_host_ctl_regs(void)
+{
+	u64 fixed0, fixed1, cr3, cr3_saved;
+	int i;
+
+	/* Test CR0 */
+	fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
+	fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
+	test_ctl_reg("HOST_CR0", HOST_CR0, fixed0, fixed1);
+
+	/* Test CR4 */
+	fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
+	fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1) &
+		 ~(X86_CR4_SMEP | X86_CR4_SMAP);
+	test_ctl_reg("HOST_CR4", HOST_CR4, fixed0, fixed1);
+
+	/* Test CR3 */
+	cr3_saved = vmcs_read(HOST_CR3);
+	for (i = cpuid_maxphyaddr(); i < 64; i++) {
+		cr3 = cr3_saved | (1ul << i);
+		vmcs_write(HOST_CR3, cr3);
+		report_prefix_pushf("HOST_CR3 %lx", cr3);
+		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
+				  false);
+		report_prefix_pop();
+	}
+
+	vmcs_write(HOST_CR3, cr3_saved);
+}
+
 /*
  * Check that the virtual CPU checks the VMX Host State Area as
  * documented in the Intel SDM.
@@ -5008,6 +5083,8 @@  static void vmx_host_state_area_test(void)
 	 */
 	vmcs_write(GUEST_RFLAGS, 0);
 
+	test_host_ctl_regs();
+
 	test_sysenter_field(HOST_SYSENTER_ESP, "HOST_SYSENTER_ESP");
 	test_sysenter_field(HOST_SYSENTER_EIP, "HOST_SYSENTER_EIP");
 }