@@ -1464,14 +1464,10 @@ static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool wri
return ES_OK;
}
-static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+static enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write)
{
struct pt_regs *regs = ctxt->regs;
enum es_result ret;
- bool write;
-
- /* Is it a WRMSR? */
- write = ctxt->insn.opcode.bytes[1] == 0x30;
switch (regs->cx) {
case MSR_SVSM_CAA:
@@ -1501,6 +1497,40 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ret;
}
+static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ return __vc_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30);
+}
+
+void savic_ghcb_msr_write(u32 reg, u64 value)
+{
+ u64 msr = APIC_BASE_MSR + (reg >> 4);
+ struct pt_regs regs = {
+ .cx = msr,
+ .ax = lower_32_bits(value),
+ .dx = upper_32_bits(value)
+ };
+ struct es_em_ctxt ctxt = { .regs = ®s };
+ struct ghcb_state state;
+ unsigned long flags;
+ enum es_result ret;
+ struct ghcb *ghcb;
+
+ local_irq_save(flags);
+ ghcb = __sev_get_ghcb(&state);
+ vc_ghcb_invalidate(ghcb);
+
+ ret = __vc_handle_msr(ghcb, &ctxt, true);
+ if (ret != ES_OK) {
+ pr_err("Secure AVIC msr (0x%llx) write returned error (%d)\n", msr, ret);
+ /* MSR writes should never fail. Any failure is fatal error for SNP guest */
+ snp_abort();
+ }
+
+ __sev_put_ghcb(&state);
+ local_irq_restore(flags);
+}
+
enum es_result savic_register_gpa(u64 gpa)
{
struct ghcb_state state;
@@ -484,6 +484,7 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
void __init snp_secure_tsc_prepare(void);
void __init snp_secure_tsc_init(void);
enum es_result savic_register_gpa(u64 gpa);
+void savic_ghcb_msr_write(u32 reg, u64 value);
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -528,6 +529,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
static inline enum es_result savic_register_gpa(u64 gpa) { return ES_UNSUPPORTED; }
+static inline void savic_ghcb_msr_write(u32 reg, u64 value) { }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -46,6 +46,25 @@ static __always_inline void set_reg(unsigned int offset, u32 val)
#define SAVIC_ALLOWED_IRR 0x204
+static inline void update_vector(unsigned int cpu, unsigned int offset,
+ unsigned int vector, bool set)
+{
+ struct apic_page *ap = per_cpu_ptr(apic_page, cpu);
+ unsigned long *reg = (unsigned long *) &ap->bytes[offset];
+ unsigned int bit;
+
+ /*
+ * The registers are 32-bit wide and 16-byte aligned.
+ * Compensate for the resulting bit number spacing.
+ */
+ bit = vector + 96 * (vector / 32);
+
+ if (set)
+ set_bit(bit, reg);
+ else
+ clear_bit(bit, reg);
+}
+
static u32 x2apic_savic_read(u32 reg)
{
/*
@@ -109,6 +128,17 @@ static u32 x2apic_savic_read(u32 reg)
#define SAVIC_NMI_REQ 0x278
+static inline void self_ipi_reg_write(unsigned int vector)
+{
+ /*
+ * Secure AVIC hardware accelerates guest's MSR write to SELF_IPI
+ * register. It updates the IRR in the APIC backing page, evaluates
+ * the new IRR for interrupt injection and continues with guest
+ * code execution.
+ */
+ native_apic_msr_write(APIC_SELF_IPI, vector);
+}
+
static void x2apic_savic_write(u32 reg, u32 data)
{
switch (reg) {
@@ -117,7 +147,6 @@ static void x2apic_savic_write(u32 reg, u32 data)
case APIC_LVT1:
case APIC_TMICT:
case APIC_TDCR:
- case APIC_SELF_IPI:
case APIC_TASKPRI:
case APIC_EOI:
case APIC_SPIV:
@@ -133,6 +162,9 @@ static void x2apic_savic_write(u32 reg, u32 data)
case APIC_EILVTn(0) ... APIC_EILVTn(3):
set_reg(reg, data);
break;
+ case APIC_SELF_IPI:
+ self_ipi_reg_write(data);
+ break;
/* ALLOWED_IRR offsets are writable */
case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70:
if (IS_ALIGNED(reg - SAVIC_ALLOWED_IRR, 16)) {
@@ -145,62 +177,126 @@ static void x2apic_savic_write(u32 reg, u32 data)
}
}
+static inline void send_ipi_dest(unsigned int cpu, unsigned int vector)
+{
+ update_vector(cpu, APIC_IRR, vector, true);
+}
+
+static void send_ipi_allbut(unsigned int vector)
+{
+ unsigned int cpu, src_cpu;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ src_cpu = raw_smp_processor_id();
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (cpu == src_cpu)
+ continue;
+ send_ipi_dest(cpu, vector);
+ }
+
+ local_irq_restore(flags);
+}
+
+static inline void self_ipi(unsigned int vector)
+{
+ u32 icr_low = APIC_SELF_IPI | vector;
+
+ native_x2apic_icr_write(icr_low, 0);
+}
+
+static void x2apic_savic_icr_write(u32 icr_low, u32 icr_high)
+{
+ unsigned int dsh, vector;
+ u64 icr_data;
+
+ dsh = icr_low & APIC_DEST_ALLBUT;
+ vector = icr_low & APIC_VECTOR_MASK;
+
+ switch (dsh) {
+ case APIC_DEST_SELF:
+ self_ipi(vector);
+ break;
+ case APIC_DEST_ALLINC:
+ self_ipi(vector);
+ fallthrough;
+ case APIC_DEST_ALLBUT:
+ send_ipi_allbut(vector);
+ break;
+ default:
+ send_ipi_dest(icr_high, vector);
+ break;
+ }
+
+ icr_data = ((u64)icr_high) << 32 | icr_low;
+ if (dsh != APIC_DEST_SELF)
+ savic_ghcb_msr_write(APIC_ICR, icr_data);
+}
+
+static void send_ipi(u32 dest, unsigned int vector, unsigned int dsh)
+{
+ unsigned int icr_low;
+
+ icr_low = __prepare_ICR(dsh, vector, APIC_DEST_PHYSICAL);
+ x2apic_savic_icr_write(icr_low, dest);
+}
+
static void x2apic_savic_send_ipi(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
- /* x2apic MSRs are special and need a special fence: */
- weak_wrmsr_fence();
- __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
+ send_ipi(dest, vector, 0);
}
-static void __send_ipi_mask(const struct cpumask *mask, int vector, bool excl_self)
+static void send_ipi_mask(const struct cpumask *mask, unsigned int vector, bool excl_self)
{
- unsigned long query_cpu;
- unsigned long this_cpu;
+ unsigned int this_cpu;
+ unsigned int cpu;
unsigned long flags;
- /* x2apic MSRs are special and need a special fence: */
- weak_wrmsr_fence();
-
local_irq_save(flags);
- this_cpu = smp_processor_id();
- for_each_cpu(query_cpu, mask) {
- if (excl_self && this_cpu == query_cpu)
+ this_cpu = raw_smp_processor_id();
+
+ for_each_cpu(cpu, mask) {
+ if (excl_self && cpu == this_cpu)
continue;
- __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
- vector, APIC_DEST_PHYSICAL);
+ send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0);
}
+
local_irq_restore(flags);
}
static void x2apic_savic_send_ipi_mask(const struct cpumask *mask, int vector)
{
- __send_ipi_mask(mask, vector, false);
+ send_ipi_mask(mask, vector, false);
}
static void x2apic_savic_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{
- __send_ipi_mask(mask, vector, true);
+ send_ipi_mask(mask, vector, true);
}
-static void x2apic_savic_update_vector(unsigned int cpu, unsigned int vector, bool set)
+static void x2apic_savic_send_ipi_allbutself(int vector)
{
- struct apic_page *ap = per_cpu_ptr(apic_page, cpu);
- unsigned long *sirr = (unsigned long *) &ap->bytes[SAVIC_ALLOWED_IRR];
- unsigned int bit;
+ send_ipi(0, vector, APIC_DEST_ALLBUT);
+}
- /*
- * The registers are 32-bit wide and 16-byte aligned.
- * Compensate for the resulting bit number spacing.
- */
- bit = vector + 96 * (vector / 32);
+static void x2apic_savic_send_ipi_all(int vector)
+{
+ send_ipi(0, vector, APIC_DEST_ALLINC);
+}
- if (set)
- set_bit(bit, sirr);
- else
- clear_bit(bit, sirr);
+static void x2apic_savic_send_ipi_self(int vector)
+{
+ self_ipi_reg_write(vector);
+}
+
+static void x2apic_savic_update_vector(unsigned int cpu, unsigned int vector, bool set)
+{
+ update_vector(cpu, SAVIC_ALLOWED_IRR, vector, set);
}
static void init_apic_page(void)
@@ -279,16 +375,16 @@ static struct apic apic_x2apic_savic __ro_after_init = {
.send_IPI = x2apic_savic_send_ipi,
.send_IPI_mask = x2apic_savic_send_ipi_mask,
.send_IPI_mask_allbutself = x2apic_savic_send_ipi_mask_allbutself,
- .send_IPI_allbutself = x2apic_send_IPI_allbutself,
- .send_IPI_all = x2apic_send_IPI_all,
- .send_IPI_self = x2apic_send_IPI_self,
+ .send_IPI_allbutself = x2apic_savic_send_ipi_allbutself,
+ .send_IPI_all = x2apic_savic_send_ipi_all,
+ .send_IPI_self = x2apic_savic_send_ipi_self,
.nmi_to_offline_cpu = true,
.read = x2apic_savic_read,
.write = x2apic_savic_write,
.eoi = native_apic_msr_eoi,
.icr_read = native_x2apic_icr_read,
- .icr_write = native_x2apic_icr_write,
+ .icr_write = x2apic_savic_icr_write,
.update_vector = x2apic_savic_update_vector,
};