From patchwork Wed Oct 14 16:29:11 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Glauber Costa X-Patchwork-Id: 53746 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n9EGaSeq012620 for ; Wed, 14 Oct 2009 16:36:29 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1761950AbZJNQ3r (ORCPT ); Wed, 14 Oct 2009 12:29:47 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1761993AbZJNQ3q (ORCPT ); Wed, 14 Oct 2009 12:29:46 -0400 Received: from mx1.redhat.com ([209.132.183.28]:46768 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1761856AbZJNQ3o (ORCPT ); Wed, 14 Oct 2009 12:29:44 -0400 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id n9EGTIZ7011294 for ; Wed, 14 Oct 2009 12:29:18 -0400 Received: from localhost.localdomain (vpn-12-132.rdu.redhat.com [10.11.12.132]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id n9EGTEv9011561; Wed, 14 Oct 2009 12:29:17 -0400 From: Glauber Costa To: kvm@vger.kernel.org Cc: avi@redhat.com Subject: [PATCH 1/3] change function signatures so that they don't take a vcpu argument Date: Wed, 14 Oct 2009 13:29:11 -0300 Message-Id: <1255537753-18694-2-git-send-email-glommer@redhat.com> In-Reply-To: <1255537753-18694-1-git-send-email-glommer@redhat.com> References: <1255537753-18694-1-git-send-email-glommer@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org diff --git a/cpu-defs.h b/cpu-defs.h index 1f48267..cf502e9 100644 --- a/cpu-defs.h +++ b/cpu-defs.h @@ -141,7 +141,6 @@ struct qemu_work_item; struct KVMCPUState { pthread_t thread; int signalled; - void *vcpu_ctx; struct qemu_work_item *queued_work_first, *queued_work_last; int regs_modified; }; diff --git a/hw/apic.c b/hw/apic.c index b8fe529..9e707bd 100644 --- a/hw/apic.c +++ b/hw/apic.c @@ -900,7 +900,7 @@ static void kvm_kernel_lapic_save_to_user(APICState *s) struct kvm_lapic_state *kapic = &apic; int i, v; - kvm_get_lapic(s->cpu_env->kvm_cpu_state.vcpu_ctx, kapic); + kvm_get_lapic(s->cpu_env, kapic); s->id = kapic_reg(kapic, 0x2) >> 24; s->tpr = kapic_reg(kapic, 0x8); @@ -953,7 +953,7 @@ static void kvm_kernel_lapic_load_from_user(APICState *s) kapic_set_reg(klapic, 0x38, s->initial_count); kapic_set_reg(klapic, 0x3e, s->divide_conf); - kvm_set_lapic(s->cpu_env->kvm_cpu_state.vcpu_ctx, klapic); + kvm_set_lapic(s->cpu_env, klapic); } #endif diff --git a/kvm-tpr-opt.c b/kvm-tpr-opt.c index f7b6f3b..932b49b 100644 --- a/kvm-tpr-opt.c +++ b/kvm-tpr-opt.c @@ -70,7 +70,7 @@ static uint8_t read_byte_virt(CPUState *env, target_ulong virt) { struct kvm_sregs sregs; - kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs); + kvm_get_sregs(env, &sregs); return ldub_phys(map_addr(&sregs, virt, NULL)); } @@ -78,7 +78,7 @@ static void write_byte_virt(CPUState *env, target_ulong virt, uint8_t b) { struct kvm_sregs sregs; - kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs); + kvm_get_sregs(env, &sregs); stb_phys(map_addr(&sregs, virt, NULL), b); } @@ -86,7 +86,7 @@ static __u64 kvm_rsp_read(CPUState *env) { struct kvm_regs regs; - kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, ®s); + kvm_get_regs(env, ®s); return regs.rsp; } @@ -192,7 +192,7 @@ static int bios_is_mapped(CPUState *env, uint64_t rip) if (bios_enabled) return 1; - kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs); + kvm_get_sregs(env, &sregs); probe = (rip & 0xf0000000) + 0xe0000; phys = map_addr(&sregs, probe, &perms); @@ -240,7 +240,7 @@ static int enable_vapic(CPUState *env) if (pcr_cpu < 0) return 0; - kvm_enable_vapic(env->kvm_cpu_state.vcpu_ctx, vapic_phys + (pcr_cpu << 7)); + kvm_enable_vapic(env, vapic_phys + (pcr_cpu << 7)); cpu_physical_memory_rw(vapic_phys + (pcr_cpu << 7) + 4, &one, 1, 1); bios_enabled = 1; @@ -313,7 +313,7 @@ void kvm_tpr_access_report(CPUState *env, uint64_t rip, int is_write) void kvm_tpr_vcpu_start(CPUState *env) { - kvm_enable_tpr_access_reporting(env->kvm_cpu_state.vcpu_ctx); + kvm_enable_tpr_access_reporting(env); if (bios_enabled) enable_vapic(env); } @@ -363,7 +363,7 @@ static void vtpr_ioport_write(void *opaque, uint32_t addr, uint32_t val) struct kvm_sregs sregs; uint32_t rip; - kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, ®s); + kvm_get_regs(env, ®s); rip = regs.rip - 2; write_byte_virt(env, rip, 0x66); write_byte_virt(env, rip + 1, 0x90); @@ -371,7 +371,7 @@ static void vtpr_ioport_write(void *opaque, uint32_t addr, uint32_t val) return; if (!bios_is_mapped(env, rip)) printf("bios not mapped?\n"); - kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs); + kvm_get_sregs(env, &sregs); for (addr = 0xfffff000u; addr >= 0x80000000u; addr -= 4096) if (map_addr(&sregs, addr, NULL) == 0xfee00000u) { real_tpr = addr + 0x80; diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c index fffcfd8..8c4140d 100644 --- a/qemu-kvm-x86.c +++ b/qemu-kvm-x86.c @@ -172,14 +172,14 @@ static int kvm_handle_tpr_access(CPUState *env) } -int kvm_enable_vapic(kvm_vcpu_context_t vcpu, uint64_t vapic) +int kvm_enable_vapic(CPUState *env, uint64_t vapic) { int r; struct kvm_vapic_addr va = { .vapic_addr = vapic, }; - r = ioctl(vcpu->fd, KVM_SET_VAPIC_ADDR, &va); + r = ioctl(env->kvm_fd, KVM_SET_VAPIC_ADDR, &va); if (r == -1) { r = -errno; perror("kvm_enable_vapic"); @@ -281,12 +281,12 @@ int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start) #ifdef KVM_CAP_IRQCHIP -int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s) +int kvm_get_lapic(CPUState *env, struct kvm_lapic_state *s) { int r; if (!kvm_irqchip_in_kernel()) return 0; - r = ioctl(vcpu->fd, KVM_GET_LAPIC, s); + r = ioctl(env->kvm_fd, KVM_GET_LAPIC, s); if (r == -1) { r = -errno; perror("kvm_get_lapic"); @@ -294,12 +294,12 @@ int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s) return r; } -int kvm_set_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s) +int kvm_set_lapic(CPUState *env, struct kvm_lapic_state *s) { int r; if (!kvm_irqchip_in_kernel()) return 0; - r = ioctl(vcpu->fd, KVM_SET_LAPIC, s); + r = ioctl(env->kvm_fd, KVM_SET_LAPIC, s); if (r == -1) { r = -errno; perror("kvm_set_lapic"); @@ -353,10 +353,10 @@ int kvm_has_pit_state2(kvm_context_t kvm) return r; } -void kvm_show_code(kvm_vcpu_context_t vcpu) +void kvm_show_code(CPUState *env) { #define SHOW_CODE_LEN 50 - int fd = vcpu->fd; + int fd = env->kvm_fd; struct kvm_regs regs; struct kvm_sregs sregs; int r, n; @@ -417,14 +417,14 @@ struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm) return msrs; } -int kvm_get_msrs(kvm_vcpu_context_t vcpu, struct kvm_msr_entry *msrs, int n) +int kvm_get_msrs(CPUState *env, struct kvm_msr_entry *msrs, int n) { struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs); int r, e; kmsrs->nmsrs = n; memcpy(kmsrs->entries, msrs, n * sizeof *msrs); - r = ioctl(vcpu->fd, KVM_GET_MSRS, kmsrs); + r = ioctl(env->kvm_fd, KVM_GET_MSRS, kmsrs); e = errno; memcpy(msrs, kmsrs->entries, n * sizeof *msrs); free(kmsrs); @@ -432,14 +432,14 @@ int kvm_get_msrs(kvm_vcpu_context_t vcpu, struct kvm_msr_entry *msrs, int n) return r; } -int kvm_set_msrs(kvm_vcpu_context_t vcpu, struct kvm_msr_entry *msrs, int n) +int kvm_set_msrs(CPUState *env, struct kvm_msr_entry *msrs, int n) { struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs); int r, e; kmsrs->nmsrs = n; memcpy(kmsrs->entries, msrs, n * sizeof *msrs); - r = ioctl(vcpu->fd, KVM_SET_MSRS, kmsrs); + r = ioctl(env->kvm_fd, KVM_SET_MSRS, kmsrs); e = errno; free(kmsrs); errno = e; @@ -461,19 +461,19 @@ int kvm_get_mce_cap_supported(kvm_context_t kvm, uint64_t *mce_cap, return -ENOSYS; } -int kvm_setup_mce(kvm_vcpu_context_t vcpu, uint64_t *mcg_cap) +int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap) { #ifdef KVM_CAP_MCE - return ioctl(vcpu->fd, KVM_X86_SETUP_MCE, mcg_cap); + return ioctl(env->kvm_fd, KVM_X86_SETUP_MCE, mcg_cap); #else return -ENOSYS; #endif } -int kvm_set_mce(kvm_vcpu_context_t vcpu, struct kvm_x86_mce *m) +int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m) { #ifdef KVM_CAP_MCE - return ioctl(vcpu->fd, KVM_X86_SET_MCE, m); + return ioctl(env->kvm_fd, KVM_X86_SET_MCE, m); #else return -ENOSYS; #endif @@ -494,9 +494,9 @@ static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt) fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit); } -void kvm_show_regs(kvm_vcpu_context_t vcpu) +void kvm_show_regs(CPUState *env) { - int fd = vcpu->fd; + int fd = env->kvm_fd; struct kvm_regs regs; struct kvm_sregs sregs; int r; @@ -553,7 +553,7 @@ static __u64 kvm_get_cr8(CPUState *env) return env->kvm_run->cr8; } -int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent, +int kvm_setup_cpuid(CPUState *env, int nent, struct kvm_cpuid_entry *entries) { struct kvm_cpuid *cpuid; @@ -563,13 +563,13 @@ int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent, cpuid->nent = nent; memcpy(cpuid->entries, entries, nent * sizeof(*entries)); - r = ioctl(vcpu->fd, KVM_SET_CPUID, cpuid); + r = ioctl(env->kvm_fd, KVM_SET_CPUID, cpuid); free(cpuid); return r; } -int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu, int nent, +int kvm_setup_cpuid2(CPUState *env, int nent, struct kvm_cpuid_entry2 *entries) { struct kvm_cpuid2 *cpuid; @@ -579,7 +579,7 @@ int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu, int nent, cpuid->nent = nent; memcpy(cpuid->entries, entries, nent * sizeof(*entries)); - r = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid); + r = ioctl(env->kvm_fd, KVM_SET_CPUID2, cpuid); if (r == -1) { fprintf(stderr, "kvm_setup_cpuid2: %m\n"); r = -errno; @@ -624,7 +624,7 @@ int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages) #ifdef KVM_CAP_VAPIC -static int tpr_access_reporting(kvm_vcpu_context_t vcpu, int enabled) +static int tpr_access_reporting(CPUState *env, int enabled) { int r; struct kvm_tpr_access_ctl tac = { @@ -634,7 +634,7 @@ static int tpr_access_reporting(kvm_vcpu_context_t vcpu, int enabled) r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC); if (r <= 0) return -ENOSYS; - r = ioctl(vcpu->fd, KVM_TPR_ACCESS_REPORTING, &tac); + r = ioctl(env->kvm_fd, KVM_TPR_ACCESS_REPORTING, &tac); if (r == -1) { r = -errno; perror("KVM_TPR_ACCESS_REPORTING"); @@ -643,14 +643,14 @@ static int tpr_access_reporting(kvm_vcpu_context_t vcpu, int enabled) return 0; } -int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu) +int kvm_enable_tpr_access_reporting(CPUState *env) { - return tpr_access_reporting(vcpu, 1); + return tpr_access_reporting(env, 1); } -int kvm_disable_tpr_access_reporting(kvm_vcpu_context_t vcpu) +int kvm_disable_tpr_access_reporting(CPUState *env) { - return tpr_access_reporting(vcpu, 0); + return tpr_access_reporting(env, 0); } #endif @@ -926,7 +926,7 @@ void kvm_arch_load_regs(CPUState *env) regs.rflags = env->eflags; regs.rip = env->eip; - kvm_set_regs(env->kvm_cpu_state.vcpu_ctx, ®s); + kvm_set_regs(env, ®s); memset(&fpu, 0, sizeof fpu); fpu.fsw = env->fpus & ~(7 << 11); @@ -937,7 +937,7 @@ void kvm_arch_load_regs(CPUState *env) memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs); fpu.mxcsr = env->mxcsr; - kvm_set_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu); + kvm_set_fpu(env, &fpu); memcpy(sregs.interrupt_bitmap, env->interrupt_bitmap, sizeof(sregs.interrupt_bitmap)); @@ -982,7 +982,7 @@ void kvm_arch_load_regs(CPUState *env) sregs.efer = env->efer; - kvm_set_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs); + kvm_set_sregs(env, &sregs); /* msrs */ n = 0; @@ -1002,7 +1002,7 @@ void kvm_arch_load_regs(CPUState *env) } #endif - rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n); + rc = kvm_set_msrs(env, msrs, n); if (rc == -1) perror("kvm_set_msrs FAILED"); } @@ -1014,7 +1014,7 @@ void kvm_load_tsc(CPUState *env) set_msr_entry(&msr, MSR_IA32_TSC, env->tsc); - rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, &msr, 1); + rc = kvm_set_msrs(env, &msr, 1); if (rc == -1) perror("kvm_set_tsc FAILED.\n"); } @@ -1025,7 +1025,7 @@ void kvm_arch_save_mpstate(CPUState *env) int r; struct kvm_mp_state mp_state; - r = kvm_get_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state); + r = kvm_get_mpstate(env, &mp_state); if (r < 0) env->mp_state = -1; else @@ -1045,7 +1045,7 @@ void kvm_arch_load_mpstate(CPUState *env) * so don't touch it. */ if (env->mp_state != -1) - kvm_set_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state); + kvm_set_mpstate(env, &mp_state); #endif } @@ -1058,7 +1058,7 @@ void kvm_arch_save_regs(CPUState *env) uint32_t hflags; uint32_t i, n, rc; - kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, ®s); + kvm_get_regs(env, ®s); env->regs[R_EAX] = regs.rax; env->regs[R_EBX] = regs.rbx; @@ -1082,7 +1082,7 @@ void kvm_arch_save_regs(CPUState *env) env->eflags = regs.rflags; env->eip = regs.rip; - kvm_get_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu); + kvm_get_fpu(env, &fpu); env->fpstt = (fpu.fsw >> 11) & 7; env->fpus = fpu.fsw; env->fpuc = fpu.fcw; @@ -1092,7 +1092,7 @@ void kvm_arch_save_regs(CPUState *env) memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs); env->mxcsr = fpu.mxcsr; - kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs); + kvm_get_sregs(env, &sregs); memcpy(env->interrupt_bitmap, sregs.interrupt_bitmap, sizeof(env->interrupt_bitmap)); @@ -1179,7 +1179,7 @@ void kvm_arch_save_regs(CPUState *env) msrs[n++].index = MSR_LSTAR; } #endif - rc = kvm_get_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n); + rc = kvm_get_msrs(env, msrs, n); if (rc == -1) { perror("kvm_get_msrs FAILED"); } @@ -1330,7 +1330,7 @@ int kvm_arch_init_vcpu(CPUState *cenv) for (i = 0x80000000; i <= limit; ++i) do_cpuid_ent(&cpuid_ent[cpuid_nent++], i, 0, ©); - kvm_setup_cpuid2(cenv->kvm_cpu_state.vcpu_ctx, cpuid_nent, cpuid_ent); + kvm_setup_cpuid2(cenv, cpuid_nent, cpuid_ent); #ifdef KVM_CAP_MCE if (((cenv->cpuid_version >> 8)&0xF) >= 6 @@ -1346,7 +1346,7 @@ int kvm_arch_init_vcpu(CPUState *cenv) banks = MCE_BANKS_DEF; mcg_cap &= MCE_CAP_DEF; mcg_cap |= banks; - if (kvm_setup_mce(cenv->kvm_cpu_state.vcpu_ctx, &mcg_cap)) + if (kvm_setup_mce(cenv, &mcg_cap)) perror("kvm_setup_mce FAILED"); else cenv->mcg_cap = mcg_cap; @@ -1357,9 +1357,8 @@ int kvm_arch_init_vcpu(CPUState *cenv) return 0; } -int kvm_arch_halt(kvm_vcpu_context_t vcpu) +int kvm_arch_halt(CPUState *env) { - CPUState *env = cpu_single_env; if (!((env->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && @@ -1406,7 +1405,7 @@ int kvm_arch_try_push_interrupts(void *opaque) env->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { - r = kvm_inject_irq(env->kvm_cpu_state.vcpu_ctx, irq); + r = kvm_inject_irq(env, irq); if (r < 0) printf("cpu %d fail inject %x\n", env->cpu_index, irq); } @@ -1425,7 +1424,7 @@ void kvm_arch_push_nmi(void *opaque) return; env->interrupt_request &= ~CPU_INTERRUPT_NMI; - r = kvm_inject_nmi(env->kvm_cpu_state.vcpu_ctx); + r = kvm_inject_nmi(env); if (r < 0) printf("cpu %d fail inject NMI\n", env->cpu_index); } @@ -1437,7 +1436,7 @@ void kvm_arch_cpu_reset(CPUState *env) if (!cpu_is_bsp(env)) { if (kvm_irqchip_in_kernel()) { #ifdef KVM_CAP_MP_STATE - kvm_reset_mpstate(env->kvm_cpu_state.vcpu_ctx); + kvm_reset_mpstate(env); #endif } else { env->interrupt_request &= ~CPU_INTERRUPT_HARD; diff --git a/qemu-kvm.c b/qemu-kvm.c index 15b9129..700d030 100644 --- a/qemu-kvm.c +++ b/qemu-kvm.c @@ -436,7 +436,7 @@ void kvm_disable_pit_creation(kvm_context_t kvm) kvm->no_pit_creation = 1; } -kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id) +static void kvm_create_vcpu(CPUState *env, int id) { long mmap_size; int r; @@ -447,6 +447,7 @@ kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id) fprintf(stderr, "kvm_create_vcpu: %m\n"); goto err; } + vcpu_ctx->fd = r; env->kvm_fd = r; @@ -465,12 +466,11 @@ kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id) goto err_fd; } - return vcpu_ctx; + return; err_fd: close(vcpu_ctx->fd); err: free(vcpu_ctx); - return NULL; } static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id) @@ -819,54 +819,54 @@ static int handle_debug(CPUState *env) #endif } -int kvm_get_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs) +int kvm_get_regs(CPUState *env, struct kvm_regs *regs) { - return ioctl(vcpu->fd, KVM_GET_REGS, regs); + return ioctl(env->kvm_fd, KVM_GET_REGS, regs); } -int kvm_set_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs) +int kvm_set_regs(CPUState *env, struct kvm_regs *regs) { - return ioctl(vcpu->fd, KVM_SET_REGS, regs); + return ioctl(env->kvm_fd, KVM_SET_REGS, regs); } -int kvm_get_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu) +int kvm_get_fpu(CPUState *env, struct kvm_fpu *fpu) { - return ioctl(vcpu->fd, KVM_GET_FPU, fpu); + return ioctl(env->kvm_fd, KVM_GET_FPU, fpu); } -int kvm_set_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu) +int kvm_set_fpu(CPUState *env, struct kvm_fpu *fpu) { - return ioctl(vcpu->fd, KVM_SET_FPU, fpu); + return ioctl(env->kvm_fd, KVM_SET_FPU, fpu); } -int kvm_get_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *sregs) +int kvm_get_sregs(CPUState *env, struct kvm_sregs *sregs) { - return ioctl(vcpu->fd, KVM_GET_SREGS, sregs); + return ioctl(env->kvm_fd, KVM_GET_SREGS, sregs); } -int kvm_set_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *sregs) +int kvm_set_sregs(CPUState *env, struct kvm_sregs *sregs) { - return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); + return ioctl(env->kvm_fd, KVM_SET_SREGS, sregs); } #ifdef KVM_CAP_MP_STATE -int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state) +int kvm_get_mpstate(CPUState *env, struct kvm_mp_state *mp_state) { int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE); if (r > 0) - return ioctl(vcpu->fd, KVM_GET_MP_STATE, mp_state); + return ioctl(env->kvm_fd, KVM_GET_MP_STATE, mp_state); return -ENOSYS; } -int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state) +int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state) { int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE); if (r > 0) - return ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); + return ioctl(env->kvm_fd, KVM_SET_MP_STATE, mp_state); return -ENOSYS; } #endif @@ -930,13 +930,12 @@ int kvm_is_ready_for_interrupt_injection(CPUState *env) return env->kvm_run->ready_for_interrupt_injection; } -int kvm_run(kvm_vcpu_context_t vcpu, void *env) +int kvm_run(CPUState *env) { int r; - int fd = vcpu->fd; - CPUState *_env = env; - kvm_context_t kvm = &_env->kvm_state->kvm_context; - struct kvm_run *run = _env->kvm_run; + kvm_context_t kvm = &env->kvm_state->kvm_context; + struct kvm_run *run = env->kvm_run; + int fd = env->kvm_fd; again: push_nmi(kvm); @@ -945,9 +944,9 @@ int kvm_run(kvm_vcpu_context_t vcpu, void *env) run->request_interrupt_window = kvm_arch_try_push_interrupts(env); #endif - if (_env->kvm_cpu_state.regs_modified) { - kvm_arch_put_registers(_env); - _env->kvm_cpu_state.regs_modified = 0; + if (env->kvm_cpu_state.regs_modified) { + kvm_arch_put_registers(env); + env->kvm_cpu_state.regs_modified = 0; } r = pre_kvm_run(kvm, env); @@ -995,8 +994,8 @@ int kvm_run(kvm_vcpu_context_t vcpu, void *env) case KVM_EXIT_EXCEPTION: fprintf(stderr, "exception %d (%x)\n", run->ex.exception, run->ex.error_code); - kvm_show_regs(vcpu); - kvm_show_code(vcpu); + kvm_show_regs(env); + kvm_show_code(env); abort(); break; case KVM_EXIT_IO: @@ -1009,7 +1008,7 @@ int kvm_run(kvm_vcpu_context_t vcpu, void *env) r = handle_mmio(env); break; case KVM_EXIT_HLT: - r = kvm_arch_halt(vcpu); + r = kvm_arch_halt(env); break; case KVM_EXIT_IRQ_WINDOW_OPEN: break; @@ -1018,16 +1017,16 @@ int kvm_run(kvm_vcpu_context_t vcpu, void *env) break; #if defined(__s390__) case KVM_EXIT_S390_SIEIC: - r = kvm_s390_handle_intercept(kvm, vcpu, run); + r = kvm_s390_handle_intercept(kvm, env, run); break; case KVM_EXIT_S390_RESET: - r = kvm_s390_handle_reset(kvm, vcpu, run); + r = kvm_s390_handle_reset(kvm, env, run); break; #endif case KVM_EXIT_INTERNAL_ERROR: fprintf(stderr, "KVM internal error. Suberror: %d\n", run->internal.suberror); - kvm_show_regs(vcpu); + kvm_show_regs(env); if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) fprintf(stderr, "emulation failure, check dmesg for details\n"); abort(); @@ -1035,7 +1034,7 @@ int kvm_run(kvm_vcpu_context_t vcpu, void *env) default: if (kvm_arch_run(env)) { fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason); - kvm_show_regs(vcpu); + kvm_show_regs(env); abort(); } break; @@ -1047,28 +1046,28 @@ int kvm_run(kvm_vcpu_context_t vcpu, void *env) return r; } -int kvm_inject_irq(kvm_vcpu_context_t vcpu, unsigned irq) +int kvm_inject_irq(CPUState *env, unsigned irq) { struct kvm_interrupt intr; intr.irq = irq; - return ioctl(vcpu->fd, KVM_INTERRUPT, &intr); + return ioctl(env->kvm_fd, KVM_INTERRUPT, &intr); } #ifdef KVM_CAP_SET_GUEST_DEBUG -int kvm_set_guest_debug(kvm_vcpu_context_t vcpu, struct kvm_guest_debug *dbg) +int kvm_set_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) { - return ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, dbg); + return ioctl(env->kvm_fd, KVM_SET_GUEST_DEBUG, dbg); } #endif -int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset) +int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) { struct kvm_signal_mask *sigmask; int r; if (!sigset) { - r = ioctl(vcpu->fd, KVM_SET_SIGNAL_MASK, NULL); + r = ioctl(env->kvm_fd, KVM_SET_SIGNAL_MASK, NULL); if (r == -1) r = -errno; return r; @@ -1077,7 +1076,7 @@ int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset) sigmask->len = 8; memcpy(sigmask->sigset, sigset, sizeof(*sigset)); - r = ioctl(vcpu->fd, KVM_SET_SIGNAL_MASK, sigmask); + r = ioctl(env->kvm_fd, KVM_SET_SIGNAL_MASK, sigmask); if (r == -1) r = -errno; free(sigmask); @@ -1089,10 +1088,10 @@ int kvm_pit_in_kernel(kvm_context_t kvm) return kvm->pit_in_kernel; } -int kvm_inject_nmi(kvm_vcpu_context_t vcpu) +int kvm_inject_nmi(CPUState *env) { #ifdef KVM_CAP_USER_NMI - return ioctl(vcpu->fd, KVM_NMI); + return ioctl(env->kvm_fd, KVM_NMI); #else return -ENOSYS; #endif @@ -1689,7 +1688,7 @@ int kvm_cpu_exec(CPUState *env) { int r; - r = kvm_run(env->kvm_cpu_state.vcpu_ctx, env); + r = kvm_run(env); if (r < 0) { printf("kvm_run returned %d\n", r); vm_stop(0); @@ -1756,7 +1755,7 @@ static void kvm_on_sigbus(CPUState *env, siginfo_t *siginfo) hardware_memory_error(); } mce.addr = paddr; - r = kvm_set_mce(env->kvm_cpu_state.vcpu_ctx, &mce); + r = kvm_set_mce(env, &mce); if (r < 0) { fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno)); abort(); @@ -1896,7 +1895,7 @@ static void setup_kernel_sigmask(CPUState *env) sigdelset(&set, SIG_IPI); sigdelset(&set, SIGBUS); - kvm_set_signal_mask(env->kvm_cpu_state.vcpu_ctx, &set); + kvm_set_signal_mask(env, &set); } static void qemu_kvm_system_reset(void) @@ -1965,7 +1964,7 @@ static void *ap_main_loop(void *_env) env->thread_id = kvm_get_thread_id(); sigfillset(&signals); sigprocmask(SIG_BLOCK, &signals, NULL); - env->kvm_cpu_state.vcpu_ctx = kvm_create_vcpu(env, env->cpu_index); + kvm_create_vcpu(env, env->cpu_index); #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT /* do ioperm for io ports of assigned devices */ @@ -2425,7 +2424,7 @@ static void kvm_invoke_set_guest_debug(void *data) cpu_single_env->kvm_cpu_state.regs_modified = 0; } dbg_data->err = - kvm_set_guest_debug(cpu_single_env->kvm_cpu_state.vcpu_ctx, + kvm_set_guest_debug(cpu_single_env, &dbg_data->dbg); } @@ -2658,7 +2657,7 @@ static void kvm_do_inject_x86_mce(void *_data) struct kvm_x86_mce_data *data = _data; int r; - r = kvm_set_mce(data->env->kvm_cpu_state.vcpu_ctx, data->mce); + r = kvm_set_mce(data->env, data->mce); if (r < 0) { perror("kvm_set_mce FAILED"); if (data->abort_on_error) diff --git a/qemu-kvm.h b/qemu-kvm.h index 863911d..abcb98d 100644 --- a/qemu-kvm.h +++ b/qemu-kvm.h @@ -95,9 +95,9 @@ int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes, int kvm_arch_run(CPUState *env); -void kvm_show_code(kvm_vcpu_context_t vcpu); +void kvm_show_code(CPUState *env); -int handle_halt(kvm_vcpu_context_t vcpu); +int handle_halt(CPUState *env); #ifndef QEMU_KVM_NO_CPU @@ -109,13 +109,13 @@ int try_push_interrupts(kvm_context_t kvm); #if defined(__x86_64__) || defined(__i386__) struct kvm_msr_list *kvm_get_msr_list(kvm_context_t); -int kvm_get_msrs(kvm_vcpu_context_t, struct kvm_msr_entry *msrs, int n); -int kvm_set_msrs(kvm_vcpu_context_t, struct kvm_msr_entry *msrs, int n); +int kvm_get_msrs(CPUState *env, struct kvm_msr_entry *msrs, int n); +int kvm_set_msrs(CPUState *env, struct kvm_msr_entry *msrs, int n); int kvm_get_mce_cap_supported(kvm_context_t, uint64_t *mce_cap, int *max_banks); -int kvm_setup_mce(kvm_vcpu_context_t vcpu, uint64_t *mcg_cap); +int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap); struct kvm_x86_mce; -int kvm_set_mce(kvm_vcpu_context_t vcpu, struct kvm_x86_mce *mce); +int kvm_set_mce(CPUState *env, struct kvm_x86_mce *mce); #endif #endif @@ -172,18 +172,6 @@ int kvm_create_vm(kvm_context_t kvm); void kvm_create_irqchip(kvm_context_t kvm); /*! - * \brief Create a new virtual cpu - * - * This creates a new virtual cpu (the first vcpu is created by kvm_create()). - * Should be called from a thread dedicated to the vcpu. - * - * \param kvm kvm context - * \param slot vcpu number (> 0) - * \return 0 on success, -errno on failure - */ -kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id); - -/*! * \brief Start the VCPU * * This starts the VCPU and virtualization is started.\n @@ -204,7 +192,7 @@ kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id); * return except for when an error has occured, or when you have sent it * an EINTR signal. */ -int kvm_run(kvm_vcpu_context_t vcpu, void *env); +int kvm_run(CPUState *env); /*! * \brief Get interrupt flag from on last exit to userspace @@ -243,7 +231,7 @@ int kvm_is_ready_for_interrupt_injection(CPUState *env); * registers values * \return 0 on success */ -int kvm_get_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs); +int kvm_get_regs(CPUState *env, struct kvm_regs *regs); /*! * \brief Write VCPU registers @@ -258,7 +246,7 @@ int kvm_get_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs); * registers values * \return 0 on success */ -int kvm_set_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs); +int kvm_set_regs(CPUState *env, struct kvm_regs *regs); /*! * \brief Read VCPU fpu registers * @@ -274,7 +262,7 @@ int kvm_set_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs); * fpu registers values * \return 0 on success */ -int kvm_get_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu); +int kvm_get_fpu(CPUState *env, struct kvm_fpu *fpu); /*! * \brief Write VCPU fpu registers @@ -288,7 +276,7 @@ int kvm_get_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu); * \param fpu Pointer to a kvm_fpu which holds the new vcpu fpu state * \return 0 on success */ -int kvm_set_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu); +int kvm_set_fpu(CPUState *env, struct kvm_fpu *fpu); /*! * \brief Read VCPU system registers @@ -306,7 +294,7 @@ int kvm_set_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu); * registers values * \return 0 on success */ -int kvm_get_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs); +int kvm_get_sregs(CPUState *env, struct kvm_sregs *regs); /*! * \brief Write VCPU system registers @@ -321,29 +309,29 @@ int kvm_get_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs); * registers values * \return 0 on success */ -int kvm_set_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs); +int kvm_set_sregs(CPUState *env, struct kvm_sregs *regs); #ifdef KVM_CAP_MP_STATE /*! * * \brief Read VCPU MP state * */ -int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state); +int kvm_get_mpstate(CPUState *env, struct kvm_mp_state *mp_state); /*! * * \brief Write VCPU MP state * */ -int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state); +int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state); /*! * * \brief Reset VCPU MP state * */ -static inline int kvm_reset_mpstate(kvm_vcpu_context_t vcpu) +static inline int kvm_reset_mpstate(CPUState *env) { struct kvm_mp_state mp_state = {.mp_state = KVM_MP_STATE_UNINITIALIZED }; - return kvm_set_mpstate(vcpu, &mp_state); + return kvm_set_mpstate(env, &mp_state); } #endif @@ -357,10 +345,10 @@ static inline int kvm_reset_mpstate(kvm_vcpu_context_t vcpu) * \param irq Vector number * \return 0 on success */ -int kvm_inject_irq(kvm_vcpu_context_t vcpu, unsigned irq); +int kvm_inject_irq(CPUState *env, unsigned irq); #ifdef KVM_CAP_SET_GUEST_DEBUG -int kvm_set_guest_debug(kvm_vcpu_context_t, struct kvm_guest_debug *dbg); +int kvm_set_guest_debug(CPUState *env, struct kvm_guest_debug *dbg); #endif #if defined(__i386__) || defined(__x86_64__) @@ -375,7 +363,7 @@ int kvm_set_guest_debug(kvm_vcpu_context_t, struct kvm_guest_debug *dbg); * \param entries cpuid function entries table * \return 0 on success, or -errno on error */ -int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent, +int kvm_setup_cpuid(CPUState *env, int nent, struct kvm_cpuid_entry *entries); /*! @@ -391,7 +379,7 @@ int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent, * \param entries cpuid function entries table * \return 0 on success, or -errno on error */ -int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu, int nent, +int kvm_setup_cpuid2(CPUState *env, int nent, struct kvm_cpuid_entry2 *entries); /*! @@ -425,7 +413,7 @@ int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages); * \param sigset signal mask for guest mode * \return 0 on success, or -errno on error */ -int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset); +int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset); /*! * \brief Dump VCPU registers @@ -439,7 +427,7 @@ int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset); * \param vcpu Which virtual CPU should get dumped * \return 0 on success */ -void kvm_show_regs(kvm_vcpu_context_t vcpu); +void kvm_show_regs(CPUState *env); void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start, @@ -551,7 +539,7 @@ int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip); * \param vcpu Which virtual CPU should be accessed * \param s Local apic state of the specific virtual CPU */ -int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s); +int kvm_get_lapic(CPUState *env, struct kvm_lapic_state *s); /*! * \brief Set in kernel local APIC for vcpu @@ -562,7 +550,7 @@ int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s); * \param vcpu Which virtual CPU should be accessed * \param s Local apic state of the specific virtual CPU */ -int kvm_set_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s); +int kvm_set_lapic(CPUState *env, struct kvm_lapic_state *s); #endif @@ -575,7 +563,7 @@ int kvm_set_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s); * \param vcpu Which virtual CPU should get dumped * \return 0 on success */ -int kvm_inject_nmi(kvm_vcpu_context_t vcpu); +int kvm_inject_nmi(CPUState *env); #endif @@ -682,7 +670,7 @@ int kvm_get_pit2(kvm_context_t kvm, struct kvm_pit_state2 *ps2); * \param kvm Pointer to the current kvm_context * \param vcpu vcpu to enable tpr access reporting on */ -int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu); +int kvm_enable_tpr_access_reporting(CPUState *env); /*! * \brief Disable kernel tpr access reporting @@ -692,9 +680,9 @@ int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu); * \param kvm Pointer to the current kvm_context * \param vcpu vcpu to disable tpr access reporting on */ -int kvm_disable_tpr_access_reporting(kvm_vcpu_context_t vcpu); +int kvm_disable_tpr_access_reporting(CPUState *env); -int kvm_enable_vapic(kvm_vcpu_context_t vcpu, uint64_t vapic); +int kvm_enable_vapic(CPUState *env, uint64_t vapic); #endif @@ -1073,8 +1061,8 @@ struct ioperm_data { }; void qemu_kvm_cpu_stop(CPUState *env); -int kvm_arch_halt(kvm_vcpu_context_t vcpu); -int handle_tpr_access(void *opaque, kvm_vcpu_context_t vcpu, uint64_t rip, +int kvm_arch_halt(CPUState *env); +int handle_tpr_access(void *opaque, CPUState *env, uint64_t rip, int is_write); int kvm_has_sync_mmu(void);