diff mbox series

[RFC,v2,08/12] migration: Allow resetting the mirror vcpu to the MH entry point

Message ID 20210823141636.65975-9-dovmurik@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Confidential guest-assisted live migration | expand

Commit Message

Dov Murik Aug. 23, 2021, 2:16 p.m. UTC
Add a function to reset the mirror vcpu so it'll start directly at the
entry point of the migration handler.

Note: In the patch below the GDT and EIP values are hard-coded to fit
the OVMF migration handler entry point implementation we currently have.
These values can be exposed in the OVMF GUID table and can be discovered
from there instead of being hard-coded here.

Signed-off-by: Dov Murik <dovmurik@linux.ibm.com>
---
 migration/confidential-ram.h |   2 +
 migration/confidential-ram.c | 112 +++++++++++++++++++++++++++++++++++
 2 files changed, 114 insertions(+)
diff mbox series

Patch

diff --git a/migration/confidential-ram.h b/migration/confidential-ram.h
index 9a1027bdaf..af046f95cc 100644
--- a/migration/confidential-ram.h
+++ b/migration/confidential-ram.h
@@ -18,4 +18,6 @@  int cgs_mh_save_encrypted_page(QEMUFile *f, ram_addr_t src_gpa, uint32_t size,
 
 int cgs_mh_load_encrypted_page(QEMUFile *f, ram_addr_t dest_gpa);
 
+void cgs_mh_reset_mirror_vcpu(CPUState *s);
+
 #endif
diff --git a/migration/confidential-ram.c b/migration/confidential-ram.c
index 30002448b9..6e41cba878 100644
--- a/migration/confidential-ram.c
+++ b/migration/confidential-ram.c
@@ -8,6 +8,8 @@ 
 #include "io/channel.h"
 #include "qapi/error.h"
 #include "exec/memory.h"
+#include "sysemu/kvm.h"
+#include "kvm/kvm_i386.h"
 #include "trace.h"
 #include "confidential-ram.h"
 
@@ -225,3 +227,113 @@  int cgs_mh_load_encrypted_page(QEMUFile *f, ram_addr_t dest_gpa)
     }
     return ret;
 }
+
+void cgs_mh_reset_mirror_vcpu(CPUState *s)
+{
+    X86CPU *cpu = X86_CPU(s);
+    CPUX86State *env = &cpu->env;
+    uint64_t xcr0;
+    int i;
+
+    memset(env, 0, offsetof(CPUX86State, end_reset_fields));
+
+    env->old_exception = -1;
+
+    /* init to reset state */
+
+    env->hflags2 |= HF2_GIF_MASK;
+    env->hflags &= ~HF_GUEST_MASK;
+    env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_PE_MASK | HF_MP_MASK;
+
+    cpu_x86_update_cr0(env, 0x00010033);
+    env->a20_mask = ~0x0;
+    env->smbase = 0x30000;
+    env->msr_smi_count = 0;
+
+    /* The GDT is hard-coded to the one setup by OVMF */
+    env->gdt.base = 0x823600;
+    env->gdt.limit = 0x0047;
+    env->ldt.limit = 0xffff;
+    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
+    env->tr.limit = 0xffff;
+    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
+
+    cpu_x86_load_seg_cache(env, R_CS, 0x38, 0, 0xffffffff,
+                           DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_DS, 0x30, 0, 0xffffffff,
+                           DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+                           DESC_W_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_ES, 0x30, 0, 0xffffffff,
+                           DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+                           DESC_W_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_SS, 0x30, 0, 0xffffffff,
+                           DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+                           DESC_W_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_FS, 0x30, 0, 0xffffffff,
+                           DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+                           DESC_W_MASK | DESC_A_MASK);
+    cpu_x86_load_seg_cache(env, R_GS, 0x30, 0, 0xffffffff,
+                           DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+                           DESC_W_MASK | DESC_A_MASK);
+
+    /* The EIP is hard-coded to the OVMF migration handler entry point */
+    env->eip = 0x823000;
+    /* env->regs[R_EDX] = env->cpuid_version; */
+
+    env->eflags = 0x2;
+
+    /* FPU init */
+    for (i = 0; i < 8; i++) {
+        env->fptags[i] = 1;
+    }
+    cpu_set_fpuc(env, 0x37f);
+
+    env->mxcsr = 0x1f80;
+    /* All units are in INIT state.  */
+    env->xstate_bv = 0;
+
+    env->pat = 0x0007040600070406ULL;
+    env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
+    if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
+        env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
+    }
+
+    memset(env->dr, 0, sizeof(env->dr));
+    env->dr[6] = DR6_FIXED_1;
+    env->dr[7] = DR7_FIXED_1;
+    cpu_breakpoint_remove_all(s, BP_CPU);
+    cpu_watchpoint_remove_all(s, BP_CPU);
+
+    xcr0 = XSTATE_FP_MASK;
+    env->xcr0 = xcr0;
+    cpu_x86_update_cr4(env, 0x00000668);
+
+    /*
+     * SDM 11.11.5 requires:
+     *  - IA32_MTRR_DEF_TYPE MSR.E = 0
+     *  - IA32_MTRR_PHYSMASKn.V = 0
+     * All other bits are undefined.  For simplification, zero it all.
+     */
+    env->mtrr_deftype = 0;
+    memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+    memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
+    env->interrupt_injected = -1;
+    env->exception_nr = -1;
+    env->exception_pending = 0;
+    env->exception_injected = 0;
+    env->exception_has_payload = false;
+    env->exception_payload = 0;
+    env->nmi_injected = false;
+#if !defined(CONFIG_USER_ONLY)
+    /* We hard-wire the BSP to the first CPU. */
+    apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
+
+    s->halted = !cpu_is_bsp(cpu);
+
+    if (kvm_enabled()) {
+        kvm_arch_reset_vcpu(cpu);
+    }
+#endif
+}