diff mbox series

[3/7] KVM: nVMX: Remove param indirection from nested_vmx_check_msr_switch()

Message ID 20181130190438.13591-4-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series [1/7] KVM: nVMX: Prepend "nested_" to check_vmentry_{pre,post}reqs() | expand

Commit Message

Krish Sadhukhan Nov. 30, 2018, 7:04 p.m. UTC
From: Sean Christopherson <sean.j.christopherson@intel.com>

Passing the enum and doing an indirect lookup is silly when we can
simply pass the field directly.  Remove the "fast path" code in
nested_vmx_check_msr_switch_controls() as it's now nothing more than a
redundant check.

Remove the debug message rather than continue passing the enum for the
address field.  Having debug messages for the MSRs themselves is useful
as MSR legality is a huge space, whereas messing up a physical address
means the VMM is fundamentally broken.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx.c | 34 ++++++++++------------------------
 1 file changed, 10 insertions(+), 24 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 27892e8..7f891f1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -12253,45 +12253,31 @@  static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
 }
 
 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
-				       unsigned long count_field,
-				       unsigned long addr_field)
+				       u32 count, u64 addr)
 {
-	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	int maxphyaddr;
-	u64 count, addr;
 
-	if (vmcs12_read_any(vmcs12, count_field, &count) ||
-	    vmcs12_read_any(vmcs12, addr_field, &addr)) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
 	if (count == 0)
 		return 0;
 	maxphyaddr = cpuid_maxphyaddr(vcpu);
 	if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
-	    (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
-		pr_debug_ratelimited(
-			"nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
-			addr_field, maxphyaddr, count, addr);
+	    (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
 		return -EINVAL;
-	}
+
 	return 0;
 }
 
 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
 						struct vmcs12 *vmcs12)
 {
-	if (vmcs12->vm_exit_msr_load_count == 0 &&
-	    vmcs12->vm_exit_msr_store_count == 0 &&
-	    vmcs12->vm_entry_msr_load_count == 0)
-		return 0; /* Fast path */
-	if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
-					VM_EXIT_MSR_LOAD_ADDR) ||
-	    nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
-					VM_EXIT_MSR_STORE_ADDR) ||
-	    nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
-					VM_ENTRY_MSR_LOAD_ADDR))
+	if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
+					vmcs12->vm_exit_msr_load_addr) ||
+	    nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
+					vmcs12->vm_exit_msr_store_addr) ||
+	    nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
+					vmcs12->vm_entry_msr_load_addr))
 		return -EINVAL;
+
 	return 0;
 }