diff mbox series

[RFC,5/7] KVM: SVM: Prevent injection when restricted injection is active

Message ID 2e8bce9bf1b1f0a83e1afb78a61165f536c70cb4.1724795971.git.thomas.lendacky@amd.com (mailing list archive)
State New, archived
Headers show
Series KVM: SEV-SNP support for running an SVSM | expand

Commit Message

Tom Lendacky Aug. 27, 2024, 9:59 p.m. UTC
Prevent injection of exceptions/interrupts when restricted injection is
active. This is not full support for restricted injection, but the SVSM
is not expecting any injections at all.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/kvm/svm/sev.c | 30 ++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c |  6 ++++++
 arch/x86/kvm/svm/svm.h |  3 +++
 3 files changed, 39 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c6c9306c86ef..4324a72d35ea 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -5227,3 +5227,33 @@  int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
 
 	return level;
 }
+
+bool sev_snp_is_rinj_active(struct kvm_vcpu *vcpu)
+{
+	struct kvm_sev_info *sev;
+	int vmpl;
+
+	if (!sev_snp_guest(vcpu->kvm))
+		return false;
+
+	sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+	vmpl = to_svm(vcpu)->sev_es.snp_current_vmpl;
+
+	return sev->vmsa_features[vmpl] & SVM_SEV_FEAT_RESTRICTED_INJECTION;
+}
+
+bool sev_snp_nmi_blocked(struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(!sev_snp_is_rinj_active(vcpu));
+
+	/* NMIs are blocked when restricted injection is active */
+	return true;
+}
+
+bool sev_snp_interrupt_blocked(struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(!sev_snp_is_rinj_active(vcpu));
+
+	/* Interrupts are blocked when restricted injection is active */
+	return true;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 586c26627bb1..632c74cb41f4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3780,6 +3780,9 @@  bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
 	if (!gif_set(svm))
 		return true;
 
+	if (sev_snp_is_rinj_active(vcpu))
+		return sev_snp_nmi_blocked(vcpu);
+
 	if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
 		return false;
 
@@ -3812,6 +3815,9 @@  bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
 	if (!gif_set(svm))
 		return true;
 
+	if (sev_snp_is_rinj_active(vcpu))
+		return sev_snp_interrupt_blocked(vcpu);
+
 	if (is_guest_mode(vcpu)) {
 		/* As long as interrupts are being delivered...  */
 		if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 55f1f6ffb871..029eb54a8472 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -761,6 +761,9 @@  void sev_es_vcpu_reset(struct vcpu_svm *svm);
 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
+bool sev_snp_is_rinj_active(struct kvm_vcpu *vcpu);
+bool sev_snp_nmi_blocked(struct kvm_vcpu *vcpu);
+bool sev_snp_interrupt_blocked(struct kvm_vcpu *vcpu);
 
 #ifdef CONFIG_KVM_AMD_SEV
 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);