diff mbox series

[1/3] KVM: nSVM: Check addresses of MSR and IO bitmap

Message ID 20210113024633.8488-2-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests | expand

Commit Message

Krish Sadhukhan Jan. 13, 2021, 2:46 a.m. UTC
According to section "Canonicalization and Consistency Checks" in APM vol 2,
the following guest state is illegal:

    "The MSR or IOIO intercept tables extend to a physical address that
     is greater than or equal to the maximum supported physical address."

Also check that these addresses are aligned on page boundary.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/svm/nested.c | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)

Comments

Sean Christopherson Jan. 13, 2021, 5:43 p.m. UTC | #1
On Wed, Jan 13, 2021, Krish Sadhukhan wrote:
> According to section "Canonicalization and Consistency Checks" in APM vol 2,
> the following guest state is illegal:
> 
>     "The MSR or IOIO intercept tables extend to a physical address that
>      is greater than or equal to the maximum supported physical address."
> 
> Also check that these addresses are aligned on page boundary.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> ---
>  arch/x86/kvm/svm/nested.c | 17 ++++++++++++++---
>  1 file changed, 14 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index cb4c6ee10029..389a8108ddb5 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -211,8 +211,11 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
>  	return true;
>  }
>  
> -static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
> +static bool nested_vmcb_check_controls(struct vcpu_svm *svm,

It's probably worth passing vcpu instead of svm.  svm_set_nested_state() already
takes vcpu, and nested_vmcb_checks() could easily do the same (in a separate
cleanup), especially if nested_svm_vmrun() were cleaned up to capture vcpu in a
local variable instead of constantly doing &svm->vcpu.

> +				       struct vmcb_control_area *control)
>  {
> +	int maxphyaddr;
> +
>  	if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
>  		return false;
>  
> @@ -223,6 +226,14 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
>  	    !npt_enabled)
>  		return false;
>  
> +	maxphyaddr = cpuid_maxphyaddr(&svm->vcpu);
> +	if (!IS_ALIGNED(control->msrpm_base_pa, PAGE_SIZE) ||
> +	    control->msrpm_base_pa >> maxphyaddr)

These can use page_address_valid().

Unrelated to this patch, we really should consolidate all the different flavors
of open-coded variants of maxphyaddr checks to use kvm_vcpu_is_illegal_gpa(),
and maybe add a helper to do an arbitrary alignment check.  VMX has a handful of
checks that fit that pattern and aren't exactly intuitive at first glance.

We could also add a kvm_vcpu_rsvd_gpa_bits() too.  Somehow even that has
multiple open-coded variants.

I'll add those cleanups to the todo list.

> +		return false;
> +	if (!IS_ALIGNED(control->iopm_base_pa, PAGE_SIZE) ||
> +	    control->iopm_base_pa >> maxphyaddr)
> +		return false;
> +
>  	return true;
>  }
>  
> @@ -258,7 +269,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
>  	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
>  		return false;
>  
> -	return nested_vmcb_check_controls(&vmcb12->control);
> +	return nested_vmcb_check_controls(svm, &vmcb12->control);
>  }
>  
>  static void load_nested_vmcb_control(struct vcpu_svm *svm,
> @@ -1173,7 +1184,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>  		goto out_free;
>  
>  	ret = -EINVAL;
> -	if (!nested_vmcb_check_controls(ctl))
> +	if (!nested_vmcb_check_controls(svm, ctl))
>  		goto out_free;
>  
>  	/*
> -- 
> 2.27.0
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index cb4c6ee10029..389a8108ddb5 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -211,8 +211,11 @@  static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
 	return true;
 }
 
-static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+static bool nested_vmcb_check_controls(struct vcpu_svm *svm,
+				       struct vmcb_control_area *control)
 {
+	int maxphyaddr;
+
 	if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
 		return false;
 
@@ -223,6 +226,14 @@  static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 	    !npt_enabled)
 		return false;
 
+	maxphyaddr = cpuid_maxphyaddr(&svm->vcpu);
+	if (!IS_ALIGNED(control->msrpm_base_pa, PAGE_SIZE) ||
+	    control->msrpm_base_pa >> maxphyaddr)
+		return false;
+	if (!IS_ALIGNED(control->iopm_base_pa, PAGE_SIZE) ||
+	    control->iopm_base_pa >> maxphyaddr)
+		return false;
+
 	return true;
 }
 
@@ -258,7 +269,7 @@  static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
 		return false;
 
-	return nested_vmcb_check_controls(&vmcb12->control);
+	return nested_vmcb_check_controls(svm, &vmcb12->control);
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -1173,7 +1184,7 @@  static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 		goto out_free;
 
 	ret = -EINVAL;
-	if (!nested_vmcb_check_controls(ctl))
+	if (!nested_vmcb_check_controls(svm, ctl))
 		goto out_free;
 
 	/*