diff mbox series

[1/2] nVMX x86: Check EPTP on vmentry of L2 guests

Message ID 20180918191317.25250-2-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series [1/2] nVMX x86: Check EPTP on vmentry of L2 guests | expand

Commit Message

Krish Sadhukhan Sept. 18, 2018, 7:13 p.m. UTC
According to section "Checks on VMX Controls" in Intel SDM vol 3C, the
following check needs to be enforced on vmentry of L2 guests:

    If the “enable EPT” VM-execution control is 1, the EPTP VM-execution
    control field must satisfy the following checks:

      — The EPT memory type (bits 2:0) must be a value supported by the
	processor as indicated in the IA32_VMX_EPT_VPID_CAP MSR.
      — Bits 5:3 (1 less than the EPT page-walk length) must be 3,
        indicating an EPT page-walk length of 4.
      — Bit 6 (enable bit for accessed and dirty flags for EPT) must be
        0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0, indicating
        that the processor does not support accessed and dirty flags for EPT.
      — Reserved bits 11:7 and 63:N (where N is the processor’s
	physical-address width) must all be 0.

Suggested-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
---
 arch/x86/include/asm/vmx.h | 4 +++-
 arch/x86/kvm/vmx.c         | 9 ++++++---
 2 files changed, 9 insertions(+), 4 deletions(-)

Comments

Sean Christopherson Sept. 19, 2018, 2:56 p.m. UTC | #1
On Tue, 2018-09-18 at 15:13 -0400, Krish Sadhukhan wrote:
> According to section "Checks on VMX Controls" in Intel SDM vol 3C, the
> following check needs to be enforced on vmentry of L2 guests:
> 
>     If the “enable EPT” VM-execution control is 1, the EPTP VM-execution
>     control field must satisfy the following checks:
> 
>       — The EPT memory type (bits 2:0) must be a value supported by the
> 	processor as indicated in the IA32_VMX_EPT_VPID_CAP MSR.
>       — Bits 5:3 (1 less than the EPT page-walk length) must be 3,
>         indicating an EPT page-walk length of 4.
>       — Bit 6 (enable bit for accessed and dirty flags for EPT) must be
>         0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0, indicating
>         that the processor does not support accessed and dirty flags for EPT.
>       — Reserved bits 11:7 and 63:N (where N is the processor’s
> 	physical-address width) must all be 0.

Unless I'm missing something the only functional change in this patch
is to move the EPTP consistency check to check_vmentry_prereqs().  A
few comments:

  1. I submitted a patch for that bug a few months back[1], though it
     hasn't been applied yet.  My version also removed the return value
     from nested_ept_init_mmu_context() as it no longer needs to return
     a pass/fail int since the EPTP check was it's only fail condition.

  2. The changelog should reflect the change you're making to the code.
     Regurgitating the SDM isn't helpful in this case because the issue
     isn't that we missed a check, simply that we didn't do the check
     early enough.

  3. Introducing the #defines for the reserved bits is a good change,
     but it should be done in a separate patch.  In the unlikely event
     that such refactoring introduced a bug then only that patch would
     need to be reverted, i.e. the bug wouldn't be collateral damage of
     the revert.

[1] https://patchwork.kernel.org/patch/10550953/

> Suggested-by: Liran Alon <liran.alon@oracle.com>
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
> ---
>  arch/x86/include/asm/vmx.h | 4 +++-
>  arch/x86/kvm/vmx.c         | 9 ++++++---
>  2 files changed, 9 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index 6aa8499..4e44c58 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -465,7 +465,7 @@ enum vmcs_field {
>  #define VMX_EPT_2MB_PAGE_BIT			(1ull << 16)
>  #define VMX_EPT_1GB_PAGE_BIT			(1ull << 17)
>  #define VMX_EPT_INVEPT_BIT			(1ull << 20)
> -#define VMX_EPT_AD_BIT				    (1ull << 21)
> +#define VMX_EPT_AD_BIT				(1ull << 21)
>  #define VMX_EPT_EXTENT_CONTEXT_BIT		(1ull << 25)
>  #define VMX_EPT_EXTENT_GLOBAL_BIT		(1ull << 26)
>  
> @@ -493,6 +493,8 @@ enum vmcs_field {
>  						 VMX_EPT_WRITABLE_MASK |       \
>  						 VMX_EPT_EXECUTABLE_MASK)
>  #define VMX_EPT_MT_MASK				(7ull << VMX_EPT_MT_EPTE_SHIFT)
> +#define VMX_EPTP_RESV_BITS_SHIFT		7
> +#define VMX_EPTP_RESV_BITS_MASK			0x1full

Masks of this nature usually incorporate the shift in the mask
definition, e.g. %VMX_EPT_MT_MASK.  "RESERVED" is generally
shortened to "RSVD".  And I think we can drop "_MASK", e.g. we'd
end up with VMX_EPTP_RSVD_BITS and a usage like:

	if (address >> maxphyaddr || address & VMX_EPTP_RSVD_BITS)
		return false;

>  /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */
>  #define VMX_EPT_MISCONFIG_WX_VALUE		(VMX_EPT_WRITABLE_MASK |       \
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 5d8e317..0df8273 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -8708,7 +8708,8 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  		return false;
>  
>  	/* Reserved bits should not be set */
> -	if (address >> maxphyaddr || ((address >> 7) & 0x1f))
> +	if (address >> maxphyaddr ||
> +	    ((address >> VMX_EPTP_RESV_BITS_SHIFT) & VMX_EPTP_RESV_BITS_MASK))
>  		return false;
>  
>  	/* AD, if set, should be supported */
> @@ -10602,8 +10603,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
>  static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
>  {
>  	WARN_ON(mmu_is_nested(vcpu));
> -	if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
> -		return 1;
>  
>  	kvm_mmu_unload(vcpu);
>  	kvm_init_shadow_ept_mmu(vcpu,
> @@ -11645,6 +11644,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  		}
>  	}
>  
> +	if (nested_cpu_has_ept(vmcs12) &&
> +	    !valid_ept_address(vcpu, vmcs12->ept_pointer))
> +		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
>  	if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
>  		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>
Krish Sadhukhan Sept. 19, 2018, 11:39 p.m. UTC | #2
On 09/19/2018 07:56 AM, Sean Christopherson wrote:
> On Tue, 2018-09-18 at 15:13 -0400, Krish Sadhukhan wrote:
>> According to section "Checks on VMX Controls" in Intel SDM vol 3C, the
>> following check needs to be enforced on vmentry of L2 guests:
>>
>>      If the “enable EPT” VM-execution control is 1, the EPTP VM-execution
>>      control field must satisfy the following checks:
>>
>>        — The EPT memory type (bits 2:0) must be a value supported by the
>> 	processor as indicated in the IA32_VMX_EPT_VPID_CAP MSR.
>>        — Bits 5:3 (1 less than the EPT page-walk length) must be 3,
>>          indicating an EPT page-walk length of 4.
>>        — Bit 6 (enable bit for accessed and dirty flags for EPT) must be
>>          0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0, indicating
>>          that the processor does not support accessed and dirty flags for EPT.
>>        — Reserved bits 11:7 and 63:N (where N is the processor’s
>> 	physical-address width) must all be 0.
> Unless I'm missing something the only functional change in this patch
> is to move the EPTP consistency check to check_vmentry_prereqs().  A
> few comments:
>
>    1. I submitted a patch for that bug a few months back[1], though it
>       hasn't been applied yet.  My version also removed the return value
>       from nested_ept_init_mmu_context() as it no longer needs to return
>       a pass/fail int since the EPTP check was it's only fail condition.

Sorry, I had missed your patch !

>
>    2. The changelog should reflect the change you're making to the code.
>       Regurgitating the SDM isn't helpful in this case because the issue
>       isn't that we missed a check, simply that we didn't do the check
>       early enough.
>
>    3. Introducing the #defines for the reserved bits is a good change,
>       but it should be done in a separate patch.  In the unlikely event
>       that such refactoring introduced a bug then only that patch would
>       need to be reverted, i.e. the bug wouldn't be collateral damage of
>       the revert.

I will send out a separate patch for the #defines after your patch is 
applied.

>
> [1] https://patchwork.kernel.org/patch/10550953/
>
>> Suggested-by: Liran Alon <liran.alon@oracle.com>
>> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
>> Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
>> ---
>>   arch/x86/include/asm/vmx.h | 4 +++-
>>   arch/x86/kvm/vmx.c         | 9 ++++++---
>>   2 files changed, 9 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
>> index 6aa8499..4e44c58 100644
>> --- a/arch/x86/include/asm/vmx.h
>> +++ b/arch/x86/include/asm/vmx.h
>> @@ -465,7 +465,7 @@ enum vmcs_field {
>>   #define VMX_EPT_2MB_PAGE_BIT			(1ull << 16)
>>   #define VMX_EPT_1GB_PAGE_BIT			(1ull << 17)
>>   #define VMX_EPT_INVEPT_BIT			(1ull << 20)
>> -#define VMX_EPT_AD_BIT				    (1ull << 21)
>> +#define VMX_EPT_AD_BIT				(1ull << 21)
>>   #define VMX_EPT_EXTENT_CONTEXT_BIT		(1ull << 25)
>>   #define VMX_EPT_EXTENT_GLOBAL_BIT		(1ull << 26)
>>   
>> @@ -493,6 +493,8 @@ enum vmcs_field {
>>   						 VMX_EPT_WRITABLE_MASK |       \
>>   						 VMX_EPT_EXECUTABLE_MASK)
>>   #define VMX_EPT_MT_MASK				(7ull << VMX_EPT_MT_EPTE_SHIFT)
>> +#define VMX_EPTP_RESV_BITS_SHIFT		7
>> +#define VMX_EPTP_RESV_BITS_MASK			0x1full
> Masks of this nature usually incorporate the shift in the mask
> definition, e.g. %VMX_EPT_MT_MASK.  "RESERVED" is generally
> shortened to "RSVD".  And I think we can drop "_MASK", e.g. we'd
> end up with VMX_EPTP_RSVD_BITS and a usage like:
>
> 	if (address >> maxphyaddr || address & VMX_EPTP_RSVD_BITS)
> 		return false;
>
>>   /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */
>>   #define VMX_EPT_MISCONFIG_WX_VALUE		(VMX_EPT_WRITABLE_MASK |       \
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 5d8e317..0df8273 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -8708,7 +8708,8 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>>   		return false;
>>   
>>   	/* Reserved bits should not be set */
>> -	if (address >> maxphyaddr || ((address >> 7) & 0x1f))
>> +	if (address >> maxphyaddr ||
>> +	    ((address >> VMX_EPTP_RESV_BITS_SHIFT) & VMX_EPTP_RESV_BITS_MASK))
>>   		return false;
>>   
>>   	/* AD, if set, should be supported */
>> @@ -10602,8 +10603,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
>>   static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
>>   {
>>   	WARN_ON(mmu_is_nested(vcpu));
>> -	if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
>> -		return 1;
>>   
>>   	kvm_mmu_unload(vcpu);
>>   	kvm_init_shadow_ept_mmu(vcpu,
>> @@ -11645,6 +11644,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>   		}
>>   	}
>>   
>> +	if (nested_cpu_has_ept(vmcs12) &&
>> +	    !valid_ept_address(vcpu, vmcs12->ept_pointer))
>> +		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>> +
>>   	if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
>>   		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 6aa8499..4e44c58 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -465,7 +465,7 @@  enum vmcs_field {
 #define VMX_EPT_2MB_PAGE_BIT			(1ull << 16)
 #define VMX_EPT_1GB_PAGE_BIT			(1ull << 17)
 #define VMX_EPT_INVEPT_BIT			(1ull << 20)
-#define VMX_EPT_AD_BIT				    (1ull << 21)
+#define VMX_EPT_AD_BIT				(1ull << 21)
 #define VMX_EPT_EXTENT_CONTEXT_BIT		(1ull << 25)
 #define VMX_EPT_EXTENT_GLOBAL_BIT		(1ull << 26)
 
@@ -493,6 +493,8 @@  enum vmcs_field {
 						 VMX_EPT_WRITABLE_MASK |       \
 						 VMX_EPT_EXECUTABLE_MASK)
 #define VMX_EPT_MT_MASK				(7ull << VMX_EPT_MT_EPTE_SHIFT)
+#define VMX_EPTP_RESV_BITS_SHIFT		7
+#define VMX_EPTP_RESV_BITS_MASK			0x1full
 
 /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */
 #define VMX_EPT_MISCONFIG_WX_VALUE		(VMX_EPT_WRITABLE_MASK |       \
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5d8e317..0df8273 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8708,7 +8708,8 @@  static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
 		return false;
 
 	/* Reserved bits should not be set */
-	if (address >> maxphyaddr || ((address >> 7) & 0x1f))
+	if (address >> maxphyaddr ||
+	    ((address >> VMX_EPTP_RESV_BITS_SHIFT) & VMX_EPTP_RESV_BITS_MASK))
 		return false;
 
 	/* AD, if set, should be supported */
@@ -10602,8 +10603,6 @@  static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
 static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
 {
 	WARN_ON(mmu_is_nested(vcpu));
-	if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
-		return 1;
 
 	kvm_mmu_unload(vcpu);
 	kvm_init_shadow_ept_mmu(vcpu,
@@ -11645,6 +11644,10 @@  static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 		}
 	}
 
+	if (nested_cpu_has_ept(vmcs12) &&
+	    !valid_ept_address(vcpu, vmcs12->ept_pointer))
+		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
 	if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
 		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;