diff mbox series

[1/2] KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests

Message ID 20200504223523.7166-2-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: Check MBZ bits in CR3 and CR4 on vmrun of nested guests | expand

Commit Message

Krish Sadhukhan May 4, 2020, 10:35 p.m. UTC
According to section "Canonicalization and Consistency Checks" in APM vol. 2
the following guest state is illegal:

    "Any MBZ bit of CR3 is set."
    "Any MBZ bit of CR4 is set."

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>

---
 arch/x86/kvm/svm/nested.c | 18 ++++++++++++++++++
 arch/x86/kvm/svm/svm.h    |  7 ++++++-
 2 files changed, 24 insertions(+), 1 deletion(-)

Comments

Paolo Bonzini May 7, 2020, 1:46 p.m. UTC | #1
On 05/05/20 00:35, Krish Sadhukhan wrote:
> +	if (!(vmcb->save.efer & EFER_LMA)) {
> +		if (vmcb->save.cr4 & X86_CR4_PAE) {
> +			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
> +				return false;
> +		} else {
> +			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
> +				return false;
> +		}
> +		if (vmcb->save.cr4 & MSR_CR4_LEGACY_RESERVED_MASK)
> +			return false;
> +	} else {
> +		if ((vmcb->save.cr4 & X86_CR4_PAE) &&
> +		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
> +			return false;
> +		if (vmcb->save.cr4 & MSR_CR4_RESERVED_MASK)
> +			return false;
> +	}
> +
>  	if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
>  		return false;
>  

I think checking LMA from the guest state is incorrect, the number of
bits in CR3 and CR4 remains 64 as long as the host processor is 64-bits.
 This of course is unless you have reproduced on bare metal that a
hypervisor running in 32-bit mode ignores the top 32 bits.

Also, the checks for CR4 must use the guest's reserved bits, using
kvm_valid_cr4.  However this can be a bit slow so it is probably a good
idea to cache the bits in kvm_update_cpuid.

Thanks,

Paolo

> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index df3474f..796c083 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -354,7 +354,12 @@ static inline bool gif_set(struct vcpu_svm *svm)
>  }
>  
>  /* svm.c */
> -#define MSR_INVALID			0xffffffffU
> +#define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
> +#define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
> +#define MSR_CR3_LONG_RESERVED_MASK		0xfff0000000000fe7U
> +#define MSR_CR4_LEGACY_RESERVED_MASK		0xffbaf000U
> +#define MSR_CR4_RESERVED_MASK			0xffffffffffbaf000U
> +#define MSR_INVALID				0xffffffffU
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 90a1ca9..1804a97 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -207,6 +207,24 @@  static bool nested_vmcb_checks(struct vmcb *vmcb)
 	if ((vmcb->save.efer & EFER_SVME) == 0)
 		return false;
 
+	if (!(vmcb->save.efer & EFER_LMA)) {
+		if (vmcb->save.cr4 & X86_CR4_PAE) {
+			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+				return false;
+		} else {
+			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+				return false;
+		}
+		if (vmcb->save.cr4 & MSR_CR4_LEGACY_RESERVED_MASK)
+			return false;
+	} else {
+		if ((vmcb->save.cr4 & X86_CR4_PAE) &&
+		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
+			return false;
+		if (vmcb->save.cr4 & MSR_CR4_RESERVED_MASK)
+			return false;
+	}
+
 	if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
 		return false;
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index df3474f..796c083 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -354,7 +354,12 @@  static inline bool gif_set(struct vcpu_svm *svm)
 }
 
 /* svm.c */
-#define MSR_INVALID			0xffffffffU
+#define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
+#define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
+#define MSR_CR3_LONG_RESERVED_MASK		0xfff0000000000fe7U
+#define MSR_CR4_LEGACY_RESERVED_MASK		0xffbaf000U
+#define MSR_CR4_RESERVED_MASK			0xffffffffffbaf000U
+#define MSR_INVALID				0xffffffffU
 
 u32 svm_msrpm_offset(u32 msr);
 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);