diff mbox series

[RFC,2/7] KVM: x86: SVM: Emulate reads and writes to shadow stack MSRs

Message ID 20221012203910.204793-3-john.allen@amd.com (mailing list archive)
State New, archived
Headers show
Series SVM guest shadow stack support | expand

Commit Message

John Allen Oct. 12, 2022, 8:39 p.m. UTC
Set up interception of shadow stack MSRs. In the event that shadow stack
is unsupported on the host or the MSRs are otherwise inaccessible, the
interception code will return an error. In certain circumstances such as
host initiated MSR reads or writes, the interception code will get or
set the requested MSR value.

Signed-off-by: John Allen <john.allen@amd.com>
---
Adapted from: https://lore.kernel.org/all/20220616084643.19564-13-weijiang.yang@intel.com/
---
 arch/x86/kvm/svm/svm.c | 58 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f3813dbacb9f..1f31a991c745 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2764,6 +2764,31 @@  static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (guest_cpuid_is_intel(vcpu))
 			msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
 		break;
+	case MSR_IA32_S_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = svm->vmcb->save.s_cet;
+		break;
+	case MSR_IA32_U_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		kvm_get_xsave_msr(msr_info);
+		break;
+	case MSR_IA32_INT_SSP_TAB:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = svm->vmcb->save.isst_addr;
+		break;
+	case MSR_KVM_GUEST_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = svm->vmcb->save.ssp;
+		break;
+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		kvm_get_xsave_msr(msr_info);
+		break;
 	case MSR_TSC_AUX:
 		msr_info->data = svm->tsc_aux;
 		break;
@@ -2995,6 +3020,39 @@  static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
 		svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
 		break;
+	case MSR_IA32_S_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		svm->vmcb->save.s_cet = data;
+		break;
+	case MSR_IA32_U_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		kvm_set_xsave_msr(msr);
+		break;
+	case MSR_IA32_INT_SSP_TAB:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		if (is_noncanonical_address(data, vcpu))
+			return 1;
+		svm->vmcb->save.isst_addr = data;
+		break;
+	case MSR_KVM_GUEST_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		/* SSP MSR values should be a 4-byte aligned canonical addresses */
+		if ((data & GENMASK(1, 0)) || is_noncanonical_address(data, vcpu))
+			return 1;
+		svm->vmcb->save.ssp = data;
+		break;
+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		/* SSP MSR values should be a 4-byte aligned canonical addresses */
+		if ((data & GENMASK(1, 0)) || is_noncanonical_address(data, vcpu))
+			return 1;
+		kvm_set_xsave_msr(msr);
+		break;
 	case MSR_TSC_AUX:
 		/*
 		 * TSC_AUX is usually changed only during boot and never read