[5/5] KVM: nVMX: Skip Guest State Area vmentry checks that are necessary only if VMCS12 is dirty
diff mbox series

Message ID 20190707071147.11651-6-krish.sadhukhan@oracle.com
State New
Headers show
Series
  • KVM: nVMX: Skip vmentry checks that are necessary only if VMCS12 is dirty
Related show

Commit Message

Krish Sadhukhan July 7, 2019, 7:11 a.m. UTC
..so that every nested vmentry is not slowed down by those checks.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/vmx/nested.c | 20 ++++++++++++++++----
 1 file changed, 16 insertions(+), 4 deletions(-)

Comments

Paolo Bonzini July 10, 2019, 4:33 p.m. UTC | #1
On 07/07/19 09:11, Krish Sadhukhan wrote:
>   ..so that every nested vmentry is not slowed down by those checks.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>

Here I think only the EFER check needs to be done always (before it
refers GUEST_CR0 which is shadowed).

Paolo

> ---
>  arch/x86/kvm/vmx/nested.c | 20 ++++++++++++++++----
>  1 file changed, 16 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index b610f389a01b..095923b1d765 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -2748,10 +2748,23 @@ static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
>  	return 0;
>  }
>  
> +static int nested_vmx_check_guest_state_full(struct kvm_vcpu *vcpu,
> +					     struct vmcs12 *vmcs12,
> +					     u32 *exit_qual)
> +{
> +	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
> +	    (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
> +	     (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
>  static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>  					struct vmcs12 *vmcs12,
>  					u32 *exit_qual)
>  {
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	bool ia32e;
>  
>  	*exit_qual = ENTRY_FAIL_DEFAULT;
> @@ -2788,10 +2801,9 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>  			return -EINVAL;
>  	}
>  
> -	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
> -	    (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
> -	     (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
> -		return -EINVAL;
> +	if (vmx->nested.dirty_vmcs12 &&
> +	    nested_vmx_check_guest_state_full(vcpu, vmcs12, exit_qual))
> +			return -EINVAL;
>  
>  	if (nested_check_guest_non_reg_state(vmcs12))
>  		return -EINVAL;
>
Krish Sadhukhan July 10, 2019, 7:34 p.m. UTC | #2
On 7/10/19 9:33 AM, Paolo Bonzini wrote:
> On 07/07/19 09:11, Krish Sadhukhan wrote:
>>    ..so that every nested vmentry is not slowed down by those checks.
>>
>> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Here I think only the EFER check needs to be done always (before it
> refers GUEST_CR0 which is shadowed).


Should I send v2 ?

Also, I forgot to add  the following to the patchset,

            Suggested-by: Paolo Bonzini <pbonzini@redhat.com>

>
> Paolo
>
>> ---
>>   arch/x86/kvm/vmx/nested.c | 20 ++++++++++++++++----
>>   1 file changed, 16 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
>> index b610f389a01b..095923b1d765 100644
>> --- a/arch/x86/kvm/vmx/nested.c
>> +++ b/arch/x86/kvm/vmx/nested.c
>> @@ -2748,10 +2748,23 @@ static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
>>   	return 0;
>>   }
>>   
>> +static int nested_vmx_check_guest_state_full(struct kvm_vcpu *vcpu,
>> +					     struct vmcs12 *vmcs12,
>> +					     u32 *exit_qual)
>> +{
>> +	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
>> +	    (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
>> +	     (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
>> +		return -EINVAL;
>> +
>> +	return 0;
>> +}
>> +
>>   static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>>   					struct vmcs12 *vmcs12,
>>   					u32 *exit_qual)
>>   {
>> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
>>   	bool ia32e;
>>   
>>   	*exit_qual = ENTRY_FAIL_DEFAULT;
>> @@ -2788,10 +2801,9 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>>   			return -EINVAL;
>>   	}
>>   
>> -	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
>> -	    (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
>> -	     (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
>> -		return -EINVAL;
>> +	if (vmx->nested.dirty_vmcs12 &&
>> +	    nested_vmx_check_guest_state_full(vcpu, vmcs12, exit_qual))
>> +			return -EINVAL;
>>   
>>   	if (nested_check_guest_non_reg_state(vmcs12))
>>   		return -EINVAL;
>>

Patch
diff mbox series

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b610f389a01b..095923b1d765 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2748,10 +2748,23 @@  static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
 	return 0;
 }
 
+static int nested_vmx_check_guest_state_full(struct kvm_vcpu *vcpu,
+					     struct vmcs12 *vmcs12,
+					     u32 *exit_qual)
+{
+	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+	    (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+	     (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 					struct vmcs12 *vmcs12,
 					u32 *exit_qual)
 {
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	bool ia32e;
 
 	*exit_qual = ENTRY_FAIL_DEFAULT;
@@ -2788,10 +2801,9 @@  static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 			return -EINVAL;
 	}
 
-	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
-	    (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
-	     (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
-		return -EINVAL;
+	if (vmx->nested.dirty_vmcs12 &&
+	    nested_vmx_check_guest_state_full(vcpu, vmcs12, exit_qual))
+			return -EINVAL;
 
 	if (nested_check_guest_non_reg_state(vmcs12))
 		return -EINVAL;