@@ -150,6 +150,7 @@ static void kvm_reset_vcpu(void *opaque)
{
CPUState *env = opaque;
+ kvm_arch_reset_vcpu(env);
if (kvm_arch_put_registers(env)) {
fprintf(stderr, "Fatal: kvm vcpu reset failed\n");
abort();
@@ -201,6 +202,7 @@ int kvm_init_vcpu(CPUState *env)
ret = kvm_arch_init_vcpu(env);
if (ret == 0) {
qemu_register_reset(kvm_reset_vcpu, env);
+ kvm_arch_reset_vcpu(env);
ret = kvm_arch_put_registers(env);
}
err:
@@ -93,6 +93,8 @@ int kvm_arch_init(KVMState *s, int smp_cpus);
int kvm_arch_init_vcpu(CPUState *env);
+void kvm_arch_reset_vcpu(CPUState *env);
+
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
@@ -693,6 +693,7 @@ typedef struct CPUX86State {
/* For KVM */
uint64_t interrupt_bitmap[256 / 64];
uint32_t mp_state;
+ uint32_t nmi_pending;
/* in order to simplify APIC support, we leave this pointer to the
user */
@@ -221,6 +221,11 @@ int kvm_arch_init_vcpu(CPUState *env)
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
}
+void kvm_arch_reset_vcpu(CPUState *env)
+{
+ env->nmi_pending = 0;
+}
+
static int kvm_has_msr_star(CPUState *env)
{
static int has_msr_star;
@@ -346,113 +351,93 @@ static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
*qemu_reg = *kvm_reg;
}
-static int kvm_getput_regs(CPUState *env, int set)
+static void kvm_getput_regs(CPUState *env, struct kvm_regs *regs, int set)
{
- struct kvm_regs regs;
- int ret = 0;
-
- if (!set) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
- if (ret < 0)
- return ret;
- }
-
- kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
- kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
- kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
- kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
- kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
- kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
- kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
- kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
+ kvm_getput_reg(®s->rax, &env->regs[R_EAX], set);
+ kvm_getput_reg(®s->rbx, &env->regs[R_EBX], set);
+ kvm_getput_reg(®s->rcx, &env->regs[R_ECX], set);
+ kvm_getput_reg(®s->rdx, &env->regs[R_EDX], set);
+ kvm_getput_reg(®s->rsi, &env->regs[R_ESI], set);
+ kvm_getput_reg(®s->rdi, &env->regs[R_EDI], set);
+ kvm_getput_reg(®s->rsp, &env->regs[R_ESP], set);
+ kvm_getput_reg(®s->rbp, &env->regs[R_EBP], set);
#ifdef TARGET_X86_64
- kvm_getput_reg(®s.r8, &env->regs[8], set);
- kvm_getput_reg(®s.r9, &env->regs[9], set);
- kvm_getput_reg(®s.r10, &env->regs[10], set);
- kvm_getput_reg(®s.r11, &env->regs[11], set);
- kvm_getput_reg(®s.r12, &env->regs[12], set);
- kvm_getput_reg(®s.r13, &env->regs[13], set);
- kvm_getput_reg(®s.r14, &env->regs[14], set);
- kvm_getput_reg(®s.r15, &env->regs[15], set);
+ kvm_getput_reg(®s->r8, &env->regs[8], set);
+ kvm_getput_reg(®s->r9, &env->regs[9], set);
+ kvm_getput_reg(®s->r10, &env->regs[10], set);
+ kvm_getput_reg(®s->r11, &env->regs[11], set);
+ kvm_getput_reg(®s->r12, &env->regs[12], set);
+ kvm_getput_reg(®s->r13, &env->regs[13], set);
+ kvm_getput_reg(®s->r14, &env->regs[14], set);
+ kvm_getput_reg(®s->r15, &env->regs[15], set);
#endif
- kvm_getput_reg(®s.rflags, &env->eflags, set);
- kvm_getput_reg(®s.rip, &env->eip, set);
-
- if (set)
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
-
- return ret;
+ kvm_getput_reg(®s->rflags, &env->eflags, set);
+ kvm_getput_reg(®s->rip, &env->eip, set);
}
-static int kvm_put_fpu(CPUState *env)
+static void kvm_put_fpu(CPUState *env, struct kvm_fpu *fpu)
{
- struct kvm_fpu fpu;
int i;
- memset(&fpu, 0, sizeof fpu);
- fpu.fsw = env->fpus & ~(7 << 11);
- fpu.fsw |= (env->fpstt & 7) << 11;
- fpu.fcw = env->fpuc;
- for (i = 0; i < 8; ++i)
- fpu.ftwx |= (!env->fptags[i]) << i;
- memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
- memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
- fpu.mxcsr = env->mxcsr;
-
- return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+ memset(fpu, 0, sizeof(*fpu));
+ fpu->fsw = env->fpus & ~(7 << 11);
+ fpu->fsw |= (env->fpstt & 7) << 11;
+ fpu->fcw = env->fpuc;
+ for (i = 0; i < 8; ++i) {
+ fpu->ftwx |= (!env->fptags[i]) << i;
+ }
+ memcpy(fpu->fpr, env->fpregs, sizeof env->fpregs);
+ memcpy(fpu->xmm, env->xmm_regs, sizeof env->xmm_regs);
+ fpu->mxcsr = env->mxcsr;
}
-static int kvm_put_sregs(CPUState *env)
+static void kvm_put_sregs(CPUState *env, struct kvm_sregs *sregs)
{
- struct kvm_sregs sregs;
-
- memcpy(sregs.interrupt_bitmap,
+ memcpy(sregs->interrupt_bitmap,
env->interrupt_bitmap,
- sizeof(sregs.interrupt_bitmap));
+ sizeof(sregs->interrupt_bitmap));
if ((env->eflags & VM_MASK)) {
- set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
- set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
- set_v8086_seg(&sregs.es, &env->segs[R_ES]);
- set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
- set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
- set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
+ set_v8086_seg(&sregs->cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs->ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs->es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs->fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs->gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs->ss, &env->segs[R_SS]);
} else {
- set_seg(&sregs.cs, &env->segs[R_CS]);
- set_seg(&sregs.ds, &env->segs[R_DS]);
- set_seg(&sregs.es, &env->segs[R_ES]);
- set_seg(&sregs.fs, &env->segs[R_FS]);
- set_seg(&sregs.gs, &env->segs[R_GS]);
- set_seg(&sregs.ss, &env->segs[R_SS]);
-
- if (env->cr[0] & CR0_PE_MASK) {
- /* force ss cpl to cs cpl */
- sregs.ss.selector = (sregs.ss.selector & ~3) |
- (sregs.cs.selector & 3);
- sregs.ss.dpl = sregs.ss.selector & 3;
- }
+ set_seg(&sregs->cs, &env->segs[R_CS]);
+ set_seg(&sregs->ds, &env->segs[R_DS]);
+ set_seg(&sregs->es, &env->segs[R_ES]);
+ set_seg(&sregs->fs, &env->segs[R_FS]);
+ set_seg(&sregs->gs, &env->segs[R_GS]);
+ set_seg(&sregs->ss, &env->segs[R_SS]);
+
+ if (env->cr[0] & CR0_PE_MASK) {
+ /* force ss cpl to cs cpl */
+ sregs->ss.selector = (sregs->ss.selector & ~3) |
+ (sregs->cs.selector & 3);
+ sregs->ss.dpl = sregs->ss.selector & 3;
+ }
}
- set_seg(&sregs.tr, &env->tr);
- set_seg(&sregs.ldt, &env->ldt);
+ set_seg(&sregs->tr, &env->tr);
+ set_seg(&sregs->ldt, &env->ldt);
- sregs.idt.limit = env->idt.limit;
- sregs.idt.base = env->idt.base;
- sregs.gdt.limit = env->gdt.limit;
- sregs.gdt.base = env->gdt.base;
+ sregs->idt.limit = env->idt.limit;
+ sregs->idt.base = env->idt.base;
+ sregs->gdt.limit = env->gdt.limit;
+ sregs->gdt.base = env->gdt.base;
- sregs.cr0 = env->cr[0];
- sregs.cr2 = env->cr[2];
- sregs.cr3 = env->cr[3];
- sregs.cr4 = env->cr[4];
+ sregs->cr0 = env->cr[0];
+ sregs->cr2 = env->cr[2];
+ sregs->cr3 = env->cr[3];
+ sregs->cr4 = env->cr[4];
- sregs.cr8 = cpu_get_apic_tpr(env);
- sregs.apic_base = cpu_get_apic_base(env);
+ sregs->cr8 = cpu_get_apic_tpr(env);
+ sregs->apic_base = cpu_get_apic_base(env);
- sregs.efer = env->efer;
-
- return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ sregs->efer = env->efer;
}
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
@@ -462,93 +447,75 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
entry->data = value;
}
-static int kvm_put_msrs(CPUState *env)
+static void kvm_put_msrs(CPUState *env, struct kvm_msrs *msrs)
{
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[100];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
+ struct kvm_msr_entry *msr = msrs->entries;
int n = 0;
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
- if (kvm_has_msr_star(env))
- kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+ kvm_msr_entry_set(&msr[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ kvm_msr_entry_set(&msr[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ kvm_msr_entry_set(&msr[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ if (kvm_has_msr_star(env)) {
+ kvm_msr_entry_set(&msr[n++], MSR_STAR, env->star);
+ }
+ kvm_msr_entry_set(&msr[n++], MSR_IA32_TSC, env->tsc);
#ifdef TARGET_X86_64
/* FIXME if lm capable */
- kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
- kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
- kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
- kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
+ kvm_msr_entry_set(&msr[n++], MSR_CSTAR, env->cstar);
+ kvm_msr_entry_set(&msr[n++], MSR_KERNELGSBASE, env->kernelgsbase);
+ kvm_msr_entry_set(&msr[n++], MSR_FMASK, env->fmask);
+ kvm_msr_entry_set(&msr[n++], MSR_LSTAR, env->lstar);
#endif
- msr_data.info.nmsrs = n;
-
- return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
+ msrs->nmsrs = n;
}
-
-static int kvm_get_fpu(CPUState *env)
+static void kvm_get_fpu(CPUState *env, struct kvm_fpu *fpu)
{
- struct kvm_fpu fpu;
- int i, ret;
-
- ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
- if (ret < 0)
- return ret;
-
- env->fpstt = (fpu.fsw >> 11) & 7;
- env->fpus = fpu.fsw;
- env->fpuc = fpu.fcw;
- for (i = 0; i < 8; ++i)
- env->fptags[i] = !((fpu.ftwx >> i) & 1);
- memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
- memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
- env->mxcsr = fpu.mxcsr;
+ int i;
- return 0;
+ env->fpstt = (fpu->fsw >> 11) & 7;
+ env->fpus = fpu->fsw;
+ env->fpuc = fpu->fcw;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((fpu->ftwx >> i) & 1);
+ }
+ memcpy(env->fpregs, fpu->fpr, sizeof env->fpregs);
+ memcpy(env->xmm_regs, fpu->xmm, sizeof env->xmm_regs);
+ env->mxcsr = fpu->mxcsr;
}
-static int kvm_get_sregs(CPUState *env)
+static void kvm_get_sregs(CPUState *env, struct kvm_sregs *sregs)
{
- struct kvm_sregs sregs;
uint32_t hflags;
- int ret;
-
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
- if (ret < 0)
- return ret;
memcpy(env->interrupt_bitmap,
- sregs.interrupt_bitmap,
- sizeof(sregs.interrupt_bitmap));
+ sregs->interrupt_bitmap,
+ sizeof(sregs->interrupt_bitmap));
- get_seg(&env->segs[R_CS], &sregs.cs);
- get_seg(&env->segs[R_DS], &sregs.ds);
- get_seg(&env->segs[R_ES], &sregs.es);
- get_seg(&env->segs[R_FS], &sregs.fs);
- get_seg(&env->segs[R_GS], &sregs.gs);
- get_seg(&env->segs[R_SS], &sregs.ss);
+ get_seg(&env->segs[R_CS], &sregs->cs);
+ get_seg(&env->segs[R_DS], &sregs->ds);
+ get_seg(&env->segs[R_ES], &sregs->es);
+ get_seg(&env->segs[R_FS], &sregs->fs);
+ get_seg(&env->segs[R_GS], &sregs->gs);
+ get_seg(&env->segs[R_SS], &sregs->ss);
- get_seg(&env->tr, &sregs.tr);
- get_seg(&env->ldt, &sregs.ldt);
+ get_seg(&env->tr, &sregs->tr);
+ get_seg(&env->ldt, &sregs->ldt);
- env->idt.limit = sregs.idt.limit;
- env->idt.base = sregs.idt.base;
- env->gdt.limit = sregs.gdt.limit;
- env->gdt.base = sregs.gdt.base;
+ env->idt.limit = sregs->idt.limit;
+ env->idt.base = sregs->idt.base;
+ env->gdt.limit = sregs->gdt.limit;
+ env->gdt.base = sregs->gdt.base;
- env->cr[0] = sregs.cr0;
- env->cr[2] = sregs.cr2;
- env->cr[3] = sregs.cr3;
- env->cr[4] = sregs.cr4;
+ env->cr[0] = sregs->cr0;
+ env->cr[2] = sregs->cr2;
+ env->cr[3] = sregs->cr3;
+ env->cr[4] = sregs->cr4;
- cpu_set_apic_base(env, sregs.apic_base);
+ cpu_set_apic_base(env, sregs->apic_base);
- env->efer = sregs.efer;
+ env->efer = sregs->efer;
//cpu_set_apic_tpr(env, sregs.cr8);
#define HFLAG_COPY_MASK ~( \
@@ -557,8 +524,6 @@ static int kvm_get_sregs(CPUState *env)
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
-
-
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
@@ -590,125 +555,221 @@ static int kvm_get_sregs(CPUState *env)
}
}
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
-
- return 0;
}
-static int kvm_get_msrs(CPUState *env)
+static void kvm_prepare_get_msrs(CPUState *env, struct kvm_msrs *msrs)
{
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[100];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
- int ret, i, n;
+ struct kvm_msr_entry *msr = msrs->entries;
+ int n;
n = 0;
- msrs[n++].index = MSR_IA32_SYSENTER_CS;
- msrs[n++].index = MSR_IA32_SYSENTER_ESP;
- msrs[n++].index = MSR_IA32_SYSENTER_EIP;
- if (kvm_has_msr_star(env))
- msrs[n++].index = MSR_STAR;
- msrs[n++].index = MSR_IA32_TSC;
+ msr[n++].index = MSR_IA32_SYSENTER_CS;
+ msr[n++].index = MSR_IA32_SYSENTER_ESP;
+ msr[n++].index = MSR_IA32_SYSENTER_EIP;
+ if (kvm_has_msr_star(env)) {
+ msr[n++].index = MSR_STAR;
+ }
+ msr[n++].index = MSR_IA32_TSC;
#ifdef TARGET_X86_64
/* FIXME lm_capable_kernel */
- msrs[n++].index = MSR_CSTAR;
- msrs[n++].index = MSR_KERNELGSBASE;
- msrs[n++].index = MSR_FMASK;
- msrs[n++].index = MSR_LSTAR;
+ msr[n++].index = MSR_CSTAR;
+ msr[n++].index = MSR_KERNELGSBASE;
+ msr[n++].index = MSR_FMASK;
+ msr[n++].index = MSR_LSTAR;
#endif
- msr_data.info.nmsrs = n;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
- if (ret < 0)
- return ret;
+ msrs->nmsrs = n;
+}
- for (i = 0; i < ret; i++) {
- switch (msrs[i].index) {
+static void kvm_get_msrs(CPUState *env, struct kvm_msrs *msrs)
+{
+ struct kvm_msr_entry *msr = msrs->entries;
+ int i;
+
+ for (i = 0; i < msrs->nmsrs; i++) {
+ switch (msr[i].index) {
case MSR_IA32_SYSENTER_CS:
- env->sysenter_cs = msrs[i].data;
+ env->sysenter_cs = msr[i].data;
break;
case MSR_IA32_SYSENTER_ESP:
- env->sysenter_esp = msrs[i].data;
+ env->sysenter_esp = msr[i].data;
break;
case MSR_IA32_SYSENTER_EIP:
- env->sysenter_eip = msrs[i].data;
+ env->sysenter_eip = msr[i].data;
break;
case MSR_STAR:
- env->star = msrs[i].data;
+ env->star = msr[i].data;
break;
#ifdef TARGET_X86_64
case MSR_CSTAR:
- env->cstar = msrs[i].data;
+ env->cstar = msr[i].data;
break;
case MSR_KERNELGSBASE:
- env->kernelgsbase = msrs[i].data;
+ env->kernelgsbase = msr[i].data;
break;
case MSR_FMASK:
- env->fmask = msrs[i].data;
+ env->fmask = msr[i].data;
break;
case MSR_LSTAR:
- env->lstar = msrs[i].data;
+ env->lstar = msr[i].data;
break;
#endif
case MSR_IA32_TSC:
- env->tsc = msrs[i].data;
+ env->tsc = msr[i].data;
break;
}
}
+}
- return 0;
+#ifdef KVM_CAP_VCPU_STATE
+static void kvm_put_nmi(CPUState *env, struct kvm_nmi_state *nmi_state)
+{
+ nmi_state->pending = env->nmi_pending;
+ nmi_state->masked = !!(env->hflags2 & HF2_NMI_MASK);
}
+static void kvm_get_nmi(CPUState *env, struct kvm_nmi_state *nmi_state)
+{
+ env->nmi_pending = nmi_state->pending;
+ if (nmi_state->masked) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+}
+#endif
+
int kvm_arch_put_registers(CPUState *env)
{
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ struct kvm_fpu fpu;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[100];
+ } msrs;
int ret;
+#ifdef KVM_CAP_VCPU_STATE
+ struct kvm_mp_state mp_state;
+ struct kvm_nmi_state nmi;
+ struct {
+ struct kvm_vcpu_state header;
+ struct kvm_vcpu_substate substates[6];
+ } request;
+#endif
- ret = kvm_getput_regs(env, 1);
- if (ret < 0)
- return ret;
-
- ret = kvm_put_fpu(env);
- if (ret < 0)
- return ret;
-
- ret = kvm_put_sregs(env);
- if (ret < 0)
- return ret;
-
- ret = kvm_put_msrs(env);
- if (ret < 0)
- return ret;
-
- ret = kvm_put_mp_state(env);
+ kvm_getput_regs(env, ®s, 1);
+ kvm_put_fpu(env, &fpu);
+ kvm_put_sregs(env, &sregs);
+ kvm_put_msrs(env, &msrs.info);
+#ifdef KVM_CAP_VCPU_STATE
+ mp_state.mp_state = env->mp_state;
+ kvm_put_nmi(env, &nmi);
+
+ request.header.nsubstates = 6;
+ request.header.substates[0].type = KVM_VCPU_REGS;
+ request.header.substates[0].offset = (size_t)®s - (size_t)&request;
+ request.header.substates[1].type = KVM_VCPU_FPU;
+ request.header.substates[1].offset = (size_t)&fpu - (size_t)&request;
+ request.header.substates[2].type = KVM_VCPU_SREGS;
+ request.header.substates[2].offset = (size_t)&sregs - (size_t)&request;
+ request.header.substates[3].type = KVM_X86_VCPU_MSRS;
+ request.header.substates[3].offset = (size_t)&msrs - (size_t)&request;
+ request.header.substates[4].type = KVM_VCPU_MP;
+ request.header.substates[4].offset = (size_t)&mp_state - (size_t)&request;
+ request.header.substates[5].type = KVM_X86_VCPU_NMI;
+ request.header.substates[5].offset = (size_t)&nmi - (size_t)&request;
+
+ ret = kvm_vcpu_ioctl(env, KVM_SET_VCPU_STATE, &request);
if (ret < 0)
- return ret;
+#endif
+ {
+ ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
+ if (ret < 0)
+ return ret;
+ ret = kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+ if (ret < 0)
+ return ret;
+ ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ if (ret < 0)
+ return ret;
+ ret = kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msrs);
+ if (ret < 0)
+ return ret;
+ ret = kvm_put_mp_state(env);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
int kvm_arch_get_registers(CPUState *env)
{
- int ret;
-
- ret = kvm_getput_regs(env, 0);
- if (ret < 0)
- return ret;
-
- ret = kvm_get_fpu(env);
- if (ret < 0)
- return ret;
-
- ret = kvm_get_sregs(env);
- if (ret < 0)
- return ret;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ struct kvm_fpu fpu;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[100];
+ } msrs;
+ int ret = -1;
+#ifdef KVM_CAP_VCPU_STATE
+ struct kvm_mp_state mp_state;
+ struct kvm_nmi_state nmi;
+ struct {
+ struct kvm_vcpu_state header;
+ struct kvm_vcpu_substate substates[6];
+ } request;
+#endif
- ret = kvm_get_msrs(env);
- if (ret < 0)
- return ret;
+ kvm_prepare_get_msrs(env, &msrs.info);
+
+#ifdef KVM_CAP_VCPU_STATE
+ request.header.nsubstates = 6;
+ request.header.substates[0].type = KVM_VCPU_REGS;
+ request.header.substates[0].offset = (size_t)®s - (size_t)&request;
+ request.header.substates[1].type = KVM_VCPU_FPU;
+ request.header.substates[1].offset = (size_t)&fpu - (size_t)&request;
+ request.header.substates[2].type = KVM_VCPU_SREGS;
+ request.header.substates[2].offset = (size_t)&sregs - (size_t)&request;
+ request.header.substates[3].type = KVM_X86_VCPU_MSRS;
+ request.header.substates[3].offset = (size_t)&msrs - (size_t)&request;
+ request.header.substates[4].type = KVM_VCPU_MP;
+ request.header.substates[4].offset = (size_t)&mp_state - (size_t)&request;
+ request.header.substates[5].type = KVM_X86_VCPU_NMI;
+ request.header.substates[5].offset = (size_t)&nmi - (size_t)&request;
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_STATE, &request);
+
+ if (ret == 0) {
+ msrs.info.nmsrs = msrs.info.nprocessed;
+ env->mp_state = mp_state.mp_state;
+ kvm_get_nmi(env, &nmi);
+ } else
+#endif
+ {
+ ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
+ if (ret < 0)
+ return ret;
+ ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
+ if (ret < 0)
+ return ret;
+ ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ if (ret < 0)
+ return ret;
+ ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msrs);
+ if (ret < 0)
+ return ret;
+ msrs.info.nmsrs = ret;
+ ret = kvm_get_mp_state(env);
+ if (ret < 0)
+ return ret;
+ }
- ret = kvm_get_mp_state(env);
- if (ret < 0)
- return ret;
+ kvm_getput_regs(env, ®s, 0);
+ kvm_get_fpu(env, &fpu);
+ kvm_get_sregs(env, &sregs);
+ kvm_get_msrs(env, &msrs.info);
return 0;
}
@@ -468,6 +468,7 @@ const VMStateDescription vmstate_cpu = {
VMSTATE_INT32_V(pending_irq_vmstate, CPUState, 9),
VMSTATE_UINT32_V(mp_state, CPUState, 9),
VMSTATE_UINT64_V(tsc, CPUState, 9),
+ VMSTATE_UINT32_V(nmi_pending, CPUState, 11),
/* MCE */
VMSTATE_UINT64_V(mcg_cap, CPUState, 10),
VMSTATE_UINT64_V(mcg_status, CPUState, 10),
@@ -53,6 +53,10 @@ int kvm_arch_init_vcpu(CPUState *cenv)
return ret;
}
+void kvm_arch_reset_vcpu(CPUState *env)
+{
+}
+
int kvm_arch_put_registers(CPUState *env)
{
struct kvm_regs regs;