@@ -192,14 +192,23 @@ void noinstr __sev_es_ist_exit(void)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
}
-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state,
+ unsigned long *flags)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
+ /*
+ * Nothing shall interrupt this code path while holding the per-cpu
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ */
+ local_irq_save(*flags);
+
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
+
+
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
@@ -479,7 +488,8 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+static __always_inline void sev_es_put_ghcb(struct ghcb_state *state,
+ unsigned long flags)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
@@ -500,14 +510,17 @@ static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
vc_ghcb_invalidate(ghcb);
data->ghcb_active = false;
}
+
+ local_irq_restore(flags);
}
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
+ unsigned long flags;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = sev_es_get_ghcb(&state, &flags);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
@@ -517,7 +530,7 @@ void noinstr __sev_es_nmi_complete(void)
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
VMGEXIT();
- sev_es_put_ghcb(&state);
+ sev_es_put_ghcb(&state, flags);
}
static u64 get_jump_table_addr(void)
@@ -527,9 +540,7 @@ static u64 get_jump_table_addr(void)
struct ghcb *ghcb;
u64 ret = 0;
- local_irq_save(flags);
-
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = sev_es_get_ghcb(&state, &flags);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
@@ -543,9 +554,7 @@ static u64 get_jump_table_addr(void)
ghcb_sw_exit_info_2_is_valid(ghcb))
ret = ghcb->save.sw_exit_info_2;
- sev_es_put_ghcb(&state);
-
- local_irq_restore(flags);
+ sev_es_put_ghcb(&state, flags);
return ret;
}
@@ -666,9 +675,10 @@ static bool __init sev_es_setup_ghcb(void)
static void sev_es_ap_hlt_loop(void)
{
struct ghcb_state state;
+ unsigned long flags;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = sev_es_get_ghcb(&state, &flags);
while (true) {
vc_ghcb_invalidate(ghcb);
@@ -685,7 +695,7 @@ static void sev_es_ap_hlt_loop(void)
break;
}
- sev_es_put_ghcb(&state);
+ sev_es_put_ghcb(&state, flags);
}
/*
@@ -1333,6 +1343,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
struct ghcb_state state;
struct es_em_ctxt ctxt;
enum es_result result;
+ unsigned long flags;
struct ghcb *ghcb;
/*
@@ -1353,7 +1364,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
* keep the IRQs disabled to protect us against concurrent TLB flushes.
*/
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = sev_es_get_ghcb(&state, &flags);
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);
@@ -1361,7 +1372,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
if (result == ES_OK)
result = vc_handle_exitcode(&ctxt, ghcb, error_code);
- sev_es_put_ghcb(&state);
+ sev_es_put_ghcb(&state, flags);
/* Done - now check the result */
switch (result) {