diff mbox series

[2/4,v2] KVM: nSVM: Add check for reserved bits for CR3, CR4, DR6, DR7 and EFER to svm_set_nested_state()

Message ID 20200928072043.9359-3-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits | expand

Commit Message

Krish Sadhukhan Sept. 28, 2020, 7:20 a.m. UTC
The path for SVM_SET_NESTED_STATE needs to have the same checks for the CPU
registers, as we have in the VMRUN path for a nested guest. This patch adds
those missing checks to svm_set_nested_state().

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/svm/nested.c | 55 +++++++++++++++++++++++----------------
 1 file changed, 33 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index e90bc436f584..55c785587bd4 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -215,9 +215,35 @@  static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 	return true;
 }
 
+static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
+				      struct vmcb_save_area *save)
+{
+	bool nested_vmcb_lma =
+	        (save->efer & EFER_LME) &&
+		(save->cr0 & X86_CR0_PG);
+
+	if (!nested_vmcb_lma) {
+		if (save->cr4 & X86_CR4_PAE) {
+			if (save->cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+				return false;
+		} else {
+			if (save->cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+				return false;
+		}
+	} else {
+		if (!(save->cr4 & X86_CR4_PAE) ||
+		    !(save->cr0 & X86_CR0_PE) ||
+		    (save->cr3 & MSR_CR3_LONG_MBZ_MASK))
+			return false;
+	}
+	if (kvm_valid_cr4(&svm->vcpu, save->cr4))
+		return false;
+
+	return true;
+}
+
 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
 {
-	bool nested_vmcb_lma;
 	if ((vmcb->save.efer & EFER_SVME) == 0)
 		return false;
 
@@ -228,25 +254,7 @@  static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
 	if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
 		return false;
 
-	nested_vmcb_lma =
-	        (vmcb->save.efer & EFER_LME) &&
-		(vmcb->save.cr0 & X86_CR0_PG);
-
-	if (!nested_vmcb_lma) {
-		if (vmcb->save.cr4 & X86_CR4_PAE) {
-			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
-				return false;
-		} else {
-			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
-				return false;
-		}
-	} else {
-		if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
-		    !(vmcb->save.cr0 & X86_CR0_PE) ||
-		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
-			return false;
-	}
-	if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
+	if (!nested_vmcb_check_cr3_cr4(svm, &(vmcb->save)))
 		return false;
 
 	return nested_vmcb_check_controls(&vmcb->control);
@@ -1116,9 +1124,12 @@  static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 	/*
 	 * Validate host state saved from before VMRUN (see
 	 * nested_svm_check_permissions).
-	 * TODO: validate reserved bits for all saved state.
 	 */
-	if (!(save.cr0 & X86_CR0_PG))
+	if (!(save.cr0 & X86_CR0_PG) ||
+	    !nested_vmcb_check_cr3_cr4(svm, &save) ||
+	    !kvm_dr6_valid(save.dr6) ||
+	    !kvm_dr7_valid(save.dr7) ||
+	    !kvm_valid_efer(vcpu, save.efer))
 		return -EINVAL;
 
 	/*