diff mbox series

[v2,3/6] nVMX x86: Re-organize the code in check_vmentry_prereqs(), that are related to VM-Exit Control Fields

Message ID 20181115054554.4831-4-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/6] nVMX x86: Re-name check_vmentry_pre[post]reqs to nested_check_vmentry_pre[post]reqs | expand

Commit Message

Krish Sadhukhan Nov. 15, 2018, 5:45 a.m. UTC
Separate out the checks in nested_check_vmentry_prereqs(), that are related to
the VM-Exit Control Fields, to a separate function. The re-organized code is
easier for readability, enhancement and maintenance.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Mark Kanda <mark.kanda@oracle.com>
---
 arch/x86/kvm/vmx.c | 32 ++++++++++++++++++++++++++------
 1 file changed, 26 insertions(+), 6 deletions(-)

Comments

Sean Christopherson Nov. 15, 2018, 6:03 p.m. UTC | #1
On Thu, Nov 15, 2018 at 12:45:51AM -0500, Krish Sadhukhan wrote:
> Separate out the checks in nested_check_vmentry_prereqs(), that are related to
> the VM-Exit Control Fields, to a separate function. The re-organized code is
> easier for readability, enhancement and maintenance.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
> Reviewed-by: Mark Kanda <mark.kanda@oracle.com>
> ---
>  arch/x86/kvm/vmx.c | 32 ++++++++++++++++++++++++++------
>  1 file changed, 26 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 25eb74e..72bfc58 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -11752,18 +11752,26 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
>  	return 0;
>  }
>  
> -static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
> +static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
>  						struct vmcs12 *vmcs12)
>  {
>  	if (vmcs12->vm_exit_msr_load_count == 0 &&
> -	    vmcs12->vm_exit_msr_store_count == 0 &&
> -	    vmcs12->vm_entry_msr_load_count == 0)
> +	    vmcs12->vm_exit_msr_store_count == 0)
>  		return 0; /* Fast path */
>  	if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
>  					VM_EXIT_MSR_LOAD_ADDR) ||
>  	    nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
> -					VM_EXIT_MSR_STORE_ADDR) ||
> -	    nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
> +					VM_EXIT_MSR_STORE_ADDR))
> +		return -EINVAL;
> +	return 0;
> +}
> +
> +static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
> +						struct vmcs12 *vmcs12)

Having both nested_vmx_check_exit_msr_switch_controls() and
nested_vmx_check_entry_msr_switch_controls() in addition to the higher
nested_check-vm_{exit,entr}_controls() and lower
nested_vmx_check_msr_switch() seems a bit silly, especially since the
point of the original code was to handle the case where all three lists
are empty.  Splitting the checks means the "Fast path" isn't really a
fast path and is simply redundant with nested_vmx_check_msr_switch().
And at that point I don't see why we'd still pass the field enums as
opposed to the field values.  So, what about simplifying
nested_vmx_check_msr_switch() in an earlier patch:

From 863424efe9dd9adda005c7502e92173fcc1e6bb4 Mon Sep 17 00:00:00 2001
From: Sean Christopherson <sean.j.christopherson@intel.com>
Date: Thu, 15 Nov 2018 09:55:05 -0800
Subject: [PATCH] KVM: nVMX: Remove param indirection from
 nested_vmx_check_msr_switch()

Passing the enum and doing an indirect lookup is silly when we can
simply pass the field directly.  Remove the "fast path" code in
nested_vmx_check_msr_switch_controls() as it's now nothing more than a
redundant check.

Remove the debug message rather than continue passing the enum for the
address field.  Having debug messages for the MSRs themselves is useful
as MSR legality is a huge space, whereas messing up a physical address
means the VMM is fundamentally broken.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx.c | 32 ++++++++------------------------
 1 file changed, 8 insertions(+), 24 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4555077d69ce..cbd4d4a8b80d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -12221,44 +12221,28 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
 }

 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
-                                      unsigned long count_field,
-                                      unsigned long addr_field)
+                                      u32 count, u64 addr)
 {
-       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        int maxphyaddr;
-       u64 count, addr;

-       if (vmcs12_read_any(vmcs12, count_field, &count) ||
-           vmcs12_read_any(vmcs12, addr_field, &addr)) {
-               WARN_ON(1);
-               return -EINVAL;
-       }
        if (count == 0)
                return 0;
        maxphyaddr = cpuid_maxphyaddr(vcpu);
        if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
-           (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
-               pr_debug_ratelimited(
-                       "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
-                       addr_field, maxphyaddr, count, addr);
+           (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
                return -EINVAL;
-       }
        return 0;
 }

 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
                                                struct vmcs12 *vmcs12)
 {
-       if (vmcs12->vm_exit_msr_load_count == 0 &&
-           vmcs12->vm_exit_msr_store_count == 0 &&
-           vmcs12->vm_entry_msr_load_count == 0)
-               return 0; /* Fast path */
-       if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
-                                       VM_EXIT_MSR_LOAD_ADDR) ||
-           nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
-                                       VM_EXIT_MSR_STORE_ADDR) ||
-           nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
-                                       VM_ENTRY_MSR_LOAD_ADDR))
+       if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
+                                       vmcs12->vm_exit_msr_load_addr) ||
+           nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
+                                       vmcs12->vm_exit_msr_store_addr) ||
+           nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
+                                       vmcs12->vm_entry_msr_load_addr))
                return -EINVAL;
        return 0;
 }
--
2.19.1

> +{
> +	if (vmcs12->vm_entry_msr_load_count == 0)
> +		return 0; /* Fast path */
> +	if (nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
>  					VM_ENTRY_MSR_LOAD_ADDR))
>  		return -EINVAL;
>  	return 0;
> @@ -12432,13 +12440,25 @@ static int nested_check_vm_execution_ctls(struct kvm_vcpu *vcpu, struct vmcs12 *
>  	return 0;
>  }
>  
> +/*
> + * Checks related to VM-Exit Control Fields
> + */
> +static int nested_check_vm_exit_ctls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)

Same comment as the previous patch, spell out "controls" and wrap.

> +{
> +	if (nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
>  static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  {
>  	if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
>  	    vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
>  		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>  
> -	if (nested_check_vm_execution_ctls(vcpu, vmcs12))
> +	if (nested_check_vm_execution_ctls(vcpu, vmcs12) ||
> +	    nested_check_vm_exit_ctls(vcpu, vmcs12))
>  		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>  
>  	if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
> -- 
> 2.9.5
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 25eb74e..72bfc58 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11752,18 +11752,26 @@  static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
-static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
+static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
 						struct vmcs12 *vmcs12)
 {
 	if (vmcs12->vm_exit_msr_load_count == 0 &&
-	    vmcs12->vm_exit_msr_store_count == 0 &&
-	    vmcs12->vm_entry_msr_load_count == 0)
+	    vmcs12->vm_exit_msr_store_count == 0)
 		return 0; /* Fast path */
 	if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
 					VM_EXIT_MSR_LOAD_ADDR) ||
 	    nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
-					VM_EXIT_MSR_STORE_ADDR) ||
-	    nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
+					VM_EXIT_MSR_STORE_ADDR))
+		return -EINVAL;
+	return 0;
+}
+
+static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
+						struct vmcs12 *vmcs12)
+{
+	if (vmcs12->vm_entry_msr_load_count == 0)
+		return 0; /* Fast path */
+	if (nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
 					VM_ENTRY_MSR_LOAD_ADDR))
 		return -EINVAL;
 	return 0;
@@ -12432,13 +12440,25 @@  static int nested_check_vm_execution_ctls(struct kvm_vcpu *vcpu, struct vmcs12 *
 	return 0;
 }
 
+/*
+ * Checks related to VM-Exit Control Fields
+ */
+static int nested_check_vm_exit_ctls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+	if (nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int nested_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 {
 	if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
 	    vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
 		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
-	if (nested_check_vm_execution_ctls(vcpu, vmcs12))
+	if (nested_check_vm_execution_ctls(vcpu, vmcs12) ||
+	    nested_check_vm_exit_ctls(vcpu, vmcs12))
 		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
 	if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))