@@ -120,6 +120,7 @@
#define SVM_VMGEXIT_HVDB_SET 1
#define SVM_VMGEXIT_HVDB_QUERY 2
#define SVM_VMGEXIT_HVDB_CLEAR 3
+#define SVM_VMGEXIT_HV_IPI 0x80000015
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
@@ -2417,7 +2417,7 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
- struct kvm_lapic *apic = to_lapic(this);
+ struct kvm_lapic *apic = this ? to_lapic(this) : vcpu->arch.apic;
unsigned int offset = address - apic->base_address;
u32 val;
@@ -3416,3 +3416,25 @@ void kvm_lapic_exit(void)
static_key_deferred_flush(&apic_sw_disabled);
WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
}
+
+/* Send IPI by writing ICR with MSR write when X2APIC enabled, with mmio write when XAPIC enabled */
+int kvm_xapic_x2apic_send_ipi(struct kvm_vcpu *vcpu, u64 data)
+{
+ u32 icr_msr_addr = APIC_BASE_MSR + (APIC_ICR >> 4);
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ gpa_t gpa = apic->base_address + APIC_ICR;
+
+ if (!kvm_lapic_enabled(vcpu))
+ return 1;
+
+ if (vcpu->arch.apic_base & X2APIC_ENABLE) {
+ if (!kvm_x2apic_msr_write(vcpu, icr_msr_addr, data))
+ return 0;
+ } else {
+ if (!apic_mmio_write(vcpu, NULL, gpa, 4, &data))
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(kvm_xapic_x2apic_send_ipi);
@@ -140,6 +140,8 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
void kvm_lapic_exit(void);
+int kvm_xapic_x2apic_send_ipi(struct kvm_vcpu *vcpu, u64 data);
+
u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic);
#define VEC_POS(v) ((v) & (32 - 1))
@@ -34,6 +34,7 @@
#include "svm_ops.h"
#include "cpuid.h"
#include "trace.h"
+#include "lapic.h"
#define GHCB_VERSION_MAX 2ULL
#define GHCB_VERSION_DEFAULT 2ULL
@@ -3417,6 +3418,10 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
if (!sev_snp_guest(vcpu->kvm))
goto vmgexit_err;
break;
+ case SVM_VMGEXIT_HV_IPI:
+ if (!sev_snp_guest(vcpu->kvm))
+ goto vmgexit_err;
+ break;
default:
reason = GHCB_ERR_INVALID_EVENT;
goto vmgexit_err;
@@ -4193,6 +4198,22 @@ static int sev_snp_hv_doorbell_page(struct vcpu_svm *svm)
return 0;
}
+static int sev_snp_hv_ipi(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ u64 icr_info;
+
+ if (!sev_snp_guest(vcpu->kvm))
+ return -EINVAL;
+
+ icr_info = svm->vmcb->control.exit_info_1;
+
+ if (kvm_xapic_x2apic_send_ipi(vcpu, icr_info))
+ return -EINVAL;
+
+ return 0;
+}
+
static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4479,6 +4500,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
}
+ ret = 1;
+ break;
+ case SVM_VMGEXIT_HV_IPI:
+ if (sev_snp_hv_ipi(svm)) {
+ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
+ }
+
ret = 1;
break;
case SVM_VMGEXIT_UNSUPPORTED_EVENT: