diff mbox

[RFC] Do not use cpu_index in interface between libkvm and qemu

Message ID 20090602134740.GE25846@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov June 2, 2009, 1:47 p.m. UTC
On vcpu creation cookie is returned which is used in future communication.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Avi Kivity June 4, 2009, 12:58 p.m. UTC | #1
Gleb Natapov wrote:
> On vcpu creation cookie is returned which is used in future communication.
>
>   

Applied, thanks.
diff mbox

Patch

diff --git a/cpu-defs.h b/cpu-defs.h
index 1e071e7..5f541e0 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -147,6 +147,7 @@  struct KVMCPUState {
     int stop;
     int stopped;
     int created;
+    void *vcpu_ctx;
     struct qemu_work_item *queued_work_first, *queued_work_last;
 };
 
diff --git a/hw/apic.c b/hw/apic.c
index 86aa6b6..c5d97b2 100644
--- a/hw/apic.c
+++ b/hw/apic.c
@@ -833,7 +833,7 @@  static void kvm_kernel_lapic_save_to_user(APICState *s)
     struct kvm_lapic_state *kapic = &apic;
     int i, v;
 
-    kvm_get_lapic(kvm_context, s->cpu_env->cpu_index, kapic);
+    kvm_get_lapic(s->cpu_env->kvm_cpu_state.vcpu_ctx, kapic);
 
     s->id = kapic_reg(kapic, 0x2) >> 24;
     s->tpr = kapic_reg(kapic, 0x8);
@@ -886,7 +886,7 @@  static void kvm_kernel_lapic_load_from_user(APICState *s)
     kapic_set_reg(klapic, 0x38, s->initial_count);
     kapic_set_reg(klapic, 0x3e, s->divide_conf);
 
-    kvm_set_lapic(kvm_context, s->cpu_env->cpu_index, klapic);
+    kvm_set_lapic(s->cpu_env->kvm_cpu_state.vcpu_ctx, klapic);
 }
 
 #endif
diff --git a/kvm-tpr-opt.c b/kvm-tpr-opt.c
index bdbc742..3f388ef 100644
--- a/kvm-tpr-opt.c
+++ b/kvm-tpr-opt.c
@@ -70,7 +70,7 @@  static uint8_t read_byte_virt(CPUState *env, target_ulong virt)
 {
     struct kvm_sregs sregs;
 
-    kvm_get_sregs(kvm_context, env->cpu_index, &sregs);
+    kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
     return ldub_phys(map_addr(&sregs, virt, NULL));
 }
 
@@ -78,7 +78,7 @@  static void write_byte_virt(CPUState *env, target_ulong virt, uint8_t b)
 {
     struct kvm_sregs sregs;
 
-    kvm_get_sregs(kvm_context, env->cpu_index, &sregs);
+    kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
     stb_phys(map_addr(&sregs, virt, NULL), b);
 }
 
@@ -86,7 +86,7 @@  static __u64 kvm_rsp_read(CPUState *env)
 {
     struct kvm_regs regs;
 
-    kvm_get_regs(kvm_context, env->cpu_index, &regs);
+    kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
     return regs.rsp;
 }
 
@@ -192,7 +192,7 @@  static int bios_is_mapped(CPUState *env, uint64_t rip)
     if (bios_enabled)
 	return 1;
 
-    kvm_get_sregs(kvm_context, env->cpu_index, &sregs);
+    kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
 
     probe = (rip & 0xf0000000) + 0xe0000;
     phys = map_addr(&sregs, probe, &perms);
@@ -241,7 +241,7 @@  static int enable_vapic(CPUState *env)
     if (pcr_cpu < 0)
 	    return 0;
 
-    kvm_enable_vapic(kvm_context, env->cpu_index, vapic_phys + (pcr_cpu << 7));
+    kvm_enable_vapic(env->kvm_cpu_state.vcpu_ctx, vapic_phys + (pcr_cpu << 7));
     cpu_physical_memory_rw(vapic_phys + (pcr_cpu << 7) + 4, &one, 1, 1);
     bios_enabled = 1;
 
@@ -314,7 +314,7 @@  void kvm_tpr_access_report(CPUState *env, uint64_t rip, int is_write)
 
 void kvm_tpr_vcpu_start(CPUState *env)
 {
-    kvm_enable_tpr_access_reporting(kvm_context, env->cpu_index);
+    kvm_enable_tpr_access_reporting(env->kvm_cpu_state.vcpu_ctx);
     if (bios_enabled)
 	enable_vapic(env);
 }
@@ -364,7 +364,7 @@  static void vtpr_ioport_write(void *opaque, uint32_t addr, uint32_t val)
     struct kvm_sregs sregs;
     uint32_t rip;
 
-    kvm_get_regs(kvm_context, env->cpu_index, &regs);
+    kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
     rip = regs.rip - 2;
     write_byte_virt(env, rip, 0x66);
     write_byte_virt(env, rip + 1, 0x90);
@@ -372,7 +372,7 @@  static void vtpr_ioport_write(void *opaque, uint32_t addr, uint32_t val)
 	return;
     if (!bios_is_mapped(env, rip))
 	printf("bios not mapped?\n");
-    kvm_get_sregs(kvm_context, env->cpu_index, &sregs);
+    kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
     for (addr = 0xfffff000u; addr >= 0x80000000u; addr -= 4096)
 	if (map_addr(&sregs, addr, NULL) == 0xfee00000u) {
 	    real_tpr = addr + 0x80;
diff --git a/libkvm-all.c b/libkvm-all.c
index 1668e32..a826341 100644
--- a/libkvm-all.c
+++ b/libkvm-all.c
@@ -356,10 +356,12 @@  kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
 
 void kvm_finalize(kvm_context_t kvm)
 {
+	/* FIXME
     	if (kvm->vcpu_fd[0] != -1)
 		close(kvm->vcpu_fd[0]);
     	if (kvm->vm_fd != -1)
 		close(kvm->vm_fd);
+	*/
 	close(kvm->fd);
 	free(kvm);
 }
@@ -374,32 +376,43 @@  void kvm_disable_pit_creation(kvm_context_t kvm)
 	kvm->no_pit_creation = 1;
 }
 
-int kvm_create_vcpu(kvm_context_t kvm, int slot)
+kvm_vcpu_context_t kvm_create_vcpu(kvm_context_t kvm, int id)
 {
 	long mmap_size;
 	int r;
+	kvm_vcpu_context_t vcpu_ctx = malloc(sizeof(struct kvm_vcpu_context));
 
-	r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, slot);
+	if (!vcpu_ctx) {
+		errno = ENOMEM;
+		return NULL;
+	}
+
+	vcpu_ctx->kvm = kvm;
+	vcpu_ctx->id = id;
+
+	r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, id);
 	if (r == -1) {
-		r = -errno;
 		fprintf(stderr, "kvm_create_vcpu: %m\n");
-		return r;
+		goto err;
 	}
-	kvm->vcpu_fd[slot] = r;
+	vcpu_ctx->fd = r;
 	mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
 	if (mmap_size == -1) {
-		r = -errno;
 		fprintf(stderr, "get vcpu mmap size: %m\n");
-		return r;
+		goto err_fd;
 	}
-	kvm->run[slot] = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
-			      kvm->vcpu_fd[slot], 0);
-	if (kvm->run[slot] == MAP_FAILED) {
-		r = -errno;
+	vcpu_ctx->run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
+			      vcpu_ctx->fd, 0);
+	if (vcpu_ctx->run == MAP_FAILED) {
 		fprintf(stderr, "mmap vcpu area: %m\n");
-		return r;
+		goto err_fd;
 	}
-	return 0;
+	return vcpu_ctx;
+err_fd:
+	close(vcpu_ctx->fd);
+err:
+	free(vcpu_ctx);
+	return NULL;
 }
 
 int kvm_create_vm(kvm_context_t kvm)
@@ -414,8 +427,6 @@  int kvm_create_vm(kvm_context_t kvm)
 	kvm->nr_allocated_irq_routes = 0;
 #endif
 
-	kvm->vcpu_fd[0] = -1;
-
 	fd = ioctl(fd, KVM_CREATE_VM, 0);
 	if (fd == -1) {
 		fprintf(stderr, "kvm_create_vm: %m\n");
@@ -731,8 +742,10 @@  int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
 
 #endif
 
-static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
+static int handle_io(kvm_vcpu_context_t vcpu)
 {
+	struct kvm_run *run = vcpu->run;
+	kvm_context_t kvm = vcpu->kvm;
 	uint16_t addr = run->io.port;
 	int r;
 	int i;
@@ -786,10 +799,11 @@  static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
 	return 0;
 }
 
-int handle_debug(kvm_context_t kvm, int vcpu, void *env)
+int handle_debug(kvm_vcpu_context_t vcpu, void *env)
 {
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-    struct kvm_run *run = kvm->run[vcpu];
+    struct kvm_run *run = vcpu->run;
+    kvm_context_t kvm = vcpu->kvm;
 
     return kvm->callbacks->debug(kvm->opaque, env, &run->debug.arch);
 #else
@@ -797,61 +811,63 @@  int handle_debug(kvm_context_t kvm, int vcpu, void *env)
 #endif
 }
 
-int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
+int kvm_get_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs)
 {
-    return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
+    return ioctl(vcpu->fd, KVM_GET_REGS, regs);
 }
 
-int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
+int kvm_set_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs)
 {
-    return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
+    return ioctl(vcpu->fd, KVM_SET_REGS, regs);
 }
 
-int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
+int kvm_get_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu)
 {
-    return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
+    return ioctl(vcpu->fd, KVM_GET_FPU, fpu);
 }
 
-int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
+int kvm_set_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu)
 {
-    return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
+    return ioctl(vcpu->fd, KVM_SET_FPU, fpu);
 }
 
-int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
+int kvm_get_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *sregs)
 {
-    return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
+    return ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
 }
 
-int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
+int kvm_set_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *sregs)
 {
-    return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
+    return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
 }
 
 #ifdef KVM_CAP_MP_STATE
-int kvm_get_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
+int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state)
 {
     int r;
 
-    r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+    r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
     if (r > 0)
-        return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MP_STATE, mp_state);
+        return ioctl(vcpu->fd, KVM_GET_MP_STATE, mp_state);
     return -ENOSYS;
 }
 
-int kvm_set_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
+int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state)
 {
     int r;
 
-    r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+    r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
     if (r > 0)
-        return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MP_STATE, mp_state);
+        return ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
     return -ENOSYS;
 }
 #endif
 
-static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
+static int handle_mmio(kvm_vcpu_context_t vcpu)
 {
-	unsigned long addr = kvm_run->mmio.phys_addr;
+	unsigned long addr = vcpu->run->mmio.phys_addr;
+	kvm_context_t kvm = vcpu->kvm;
+	struct kvm_run *kvm_run = vcpu->run;
 	void *data = kvm_run->mmio.data;
 
 	/* hack: Red Hat 7.1 generates these weird accesses. */
@@ -871,9 +887,9 @@  int handle_io_window(kvm_context_t kvm)
 	return kvm->callbacks->io_window(kvm->opaque);
 }
 
-int handle_halt(kvm_context_t kvm, int vcpu)
+int handle_halt(kvm_vcpu_context_t vcpu)
 {
-	return kvm->callbacks->halt(kvm->opaque, vcpu);
+	return vcpu->kvm->callbacks->halt(vcpu->kvm->opaque, vcpu);
 }
 
 int handle_shutdown(kvm_context_t kvm, void *env)
@@ -903,25 +919,22 @@  int pre_kvm_run(kvm_context_t kvm, void *env)
 	return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
 }
 
-int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
+int kvm_get_interrupt_flag(kvm_vcpu_context_t vcpu)
 {
-	struct kvm_run *run = kvm->run[vcpu];
-
-	return run->if_flag;
+	return vcpu->run->if_flag;
 }
 
-int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
+int kvm_is_ready_for_interrupt_injection(kvm_vcpu_context_t vcpu)
 {
-	struct kvm_run *run = kvm->run[vcpu];
-
-	return run->ready_for_interrupt_injection;
+	return vcpu->run->ready_for_interrupt_injection;
 }
 
-int kvm_run(kvm_context_t kvm, int vcpu, void *env)
+int kvm_run(kvm_vcpu_context_t vcpu, void *env)
 {
 	int r;
-	int fd = kvm->vcpu_fd[vcpu];
-	struct kvm_run *run = kvm->run[vcpu];
+	int fd = vcpu->fd;
+	struct kvm_run *run = vcpu->run;
+	kvm_context_t kvm = vcpu->kvm;
 
 again:
 	push_nmi(kvm);
@@ -969,35 +982,35 @@  again:
 		switch (run->exit_reason) {
 		case KVM_EXIT_UNKNOWN:
 			fprintf(stderr, "unhandled vm exit: 0x%x vcpu_id %d\n",
-				(unsigned)run->hw.hardware_exit_reason, vcpu);
-			kvm_show_regs(kvm, vcpu);
+				(unsigned)run->hw.hardware_exit_reason,vcpu->id);
+			kvm_show_regs(vcpu);
 			abort();
 			break;
 		case KVM_EXIT_FAIL_ENTRY:
 			fprintf(stderr, "kvm_run: failed entry, reason %u\n", 
 				(unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
-			kvm_show_regs(kvm, vcpu);
+			kvm_show_regs(vcpu);
 			return -ENOEXEC;
 			break;
 		case KVM_EXIT_EXCEPTION:
 			fprintf(stderr, "exception %d (%x)\n", 
 			       run->ex.exception,
 			       run->ex.error_code);
-			kvm_show_regs(kvm, vcpu);
-			kvm_show_code(kvm, vcpu);
+			kvm_show_regs(vcpu);
+			kvm_show_code(vcpu);
 			abort();
 			break;
 		case KVM_EXIT_IO:
-			r = handle_io(kvm, run, vcpu);
+			r = handle_io(vcpu);
 			break;
 		case KVM_EXIT_DEBUG:
-			r = handle_debug(kvm, vcpu, env);
+			r = handle_debug(vcpu, env);
 			break;
 		case KVM_EXIT_MMIO:
-			r = handle_mmio(kvm, run);
+			r = handle_mmio(vcpu);
 			break;
 		case KVM_EXIT_HLT:
-			r = handle_halt(kvm, vcpu);
+			r = handle_halt(vcpu);
 			break;
 		case KVM_EXIT_IRQ_WINDOW_OPEN:
 			break;
@@ -1014,10 +1027,10 @@  again:
 			break;
 #endif
 		default:
-			if (kvm_arch_run(run, kvm, vcpu)) {
+			if (kvm_arch_run(vcpu)) {
 				fprintf(stderr, "unhandled vm exit: 0x%x\n",
 							run->exit_reason);
-				kvm_show_regs(kvm, vcpu);
+				kvm_show_regs(vcpu);
 				abort();
 			}
 			break;
@@ -1029,28 +1042,28 @@  more:
 	return r;
 }
 
-int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
+int kvm_inject_irq(kvm_vcpu_context_t vcpu, unsigned irq)
 {
 	struct kvm_interrupt intr;
 
 	intr.irq = irq;
-	return ioctl(kvm->vcpu_fd[vcpu], KVM_INTERRUPT, &intr);
+	return ioctl(vcpu->fd, KVM_INTERRUPT, &intr);
 }
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_set_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_guest_debug *dbg)
+int kvm_set_guest_debug(kvm_vcpu_context_t vcpu, struct kvm_guest_debug *dbg)
 {
-	return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_GUEST_DEBUG, dbg);
+	return ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, dbg);
 }
 #endif
 
-int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
+int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset)
 {
 	struct kvm_signal_mask *sigmask;
 	int r;
 
 	if (!sigset) {
-		r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, NULL);
+		r = ioctl(vcpu->fd, KVM_SET_SIGNAL_MASK, NULL);
 		if (r == -1)
 			r = -errno;
 		return r;
@@ -1061,7 +1074,7 @@  int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
 
 	sigmask->len = 8;
 	memcpy(sigmask->sigset, sigset, sizeof(*sigset));
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, sigmask);
+	r = ioctl(vcpu->fd, KVM_SET_SIGNAL_MASK, sigmask);
 	if (r == -1)
 		r = -errno;
 	free(sigmask);
@@ -1087,10 +1100,10 @@  int kvm_has_sync_mmu(kvm_context_t kvm)
         return r;
 }
 
-int kvm_inject_nmi(kvm_context_t kvm, int vcpu)
+int kvm_inject_nmi(kvm_vcpu_context_t vcpu)
 {
 #ifdef KVM_CAP_USER_NMI
-	return ioctl(kvm->vcpu_fd[vcpu], KVM_NMI);
+	return ioctl(vcpu->fd, KVM_NMI);
 #else
 	return -ENOSYS;
 #endif
diff --git a/libkvm-all.h b/libkvm-all.h
index 4821a1e..3dca4f7 100644
--- a/libkvm-all.h
+++ b/libkvm-all.h
@@ -20,13 +20,15 @@ 
 #include <signal.h>
 
 struct kvm_context;
+struct kvm_vcpu_context;
 
 typedef struct kvm_context *kvm_context_t;
+typedef struct kvm_vcpu_context *kvm_vcpu_context_t;
 
 #if defined(__x86_64__) || defined(__i386__)
 struct kvm_msr_list *kvm_get_msr_list(kvm_context_t);
-int kvm_get_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
-int kvm_set_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
+int kvm_get_msrs(kvm_vcpu_context_t, struct kvm_msr_entry *msrs, int n);
+int kvm_set_msrs(kvm_vcpu_context_t, struct kvm_msr_entry *msrs, int n);
 #endif
 
 /*!
@@ -65,7 +67,7 @@  struct kvm_callbacks {
 	 * Typically, you should yeild here to prevent 100% CPU utilization
 	 * on the host CPU.
 	 */
-    int (*halt)(void *opaque, int vcpu);
+    int (*halt)(void *opaque, kvm_vcpu_context_t vcpu);
     int (*shutdown)(void *opaque, void *env);
     int (*io_window)(void *opaque);
     int (*try_push_interrupts)(void *opaque);
@@ -74,15 +76,15 @@  struct kvm_callbacks {
 #endif
     void (*post_kvm_run)(void *opaque, void *env);
     int (*pre_kvm_run)(void *opaque, void *env);
-    int (*tpr_access)(void *opaque, int vcpu, uint64_t rip, int is_write);
+    int (*tpr_access)(void *opaque, kvm_vcpu_context_t vcpu, uint64_t rip, int is_write);
 #if defined(__powerpc__)
-    int (*powerpc_dcr_read)(int vcpu, uint32_t dcrn, uint32_t *data);
-    int (*powerpc_dcr_write)(int vcpu, uint32_t dcrn, uint32_t data);
+    int (*powerpc_dcr_read)(kvm_vcpu_context_t vcpu, uint32_t dcrn, uint32_t *data);
+    int (*powerpc_dcr_write)(kvm_vcpu_context_t vcpu, uint32_t dcrn, uint32_t data);
 #endif
 #if defined(__s390__)
-    int (*s390_handle_intercept)(kvm_context_t context, int vcpu,
+    int (*s390_handle_intercept)(kvm_context_t context, kvm_vcpu_context_t vcpu,
 	struct kvm_run *run);
-    int (*s390_handle_reset)(kvm_context_t context, int vcpu,
+    int (*s390_handle_reset)(kvm_context_t context, kvm_vcpu_context_t vcpu,
 	 struct kvm_run *run);
 #endif
 };
@@ -163,7 +165,7 @@  void kvm_create_irqchip(kvm_context_t kvm);
  * \param slot vcpu number (> 0)
  * \return 0 on success, -errno on failure
  */
-int kvm_create_vcpu(kvm_context_t kvm, int slot);
+kvm_vcpu_context_t kvm_create_vcpu(kvm_context_t kvm, int id);
 
 /*!
  * \brief Start the VCPU
@@ -186,7 +188,7 @@  int kvm_create_vcpu(kvm_context_t kvm, int slot);
  * return except for when an error has occured, or when you have sent it
  * an EINTR signal.
  */
-int kvm_run(kvm_context_t kvm, int vcpu, void *env);
+int kvm_run(kvm_vcpu_context_t vcpu, void *env);
 
 /*!
  * \brief Get interrupt flag from on last exit to userspace
@@ -197,7 +199,7 @@  int kvm_run(kvm_context_t kvm, int vcpu, void *env);
  * \param vcpu Which virtual CPU should get dumped
  * \return interrupt flag value (0 or 1)
  */
-int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
+int kvm_get_interrupt_flag(kvm_vcpu_context_t vcpu);
 
 /*!
  * \brief Get the value of the APIC_BASE msr as of last exit to userspace
@@ -208,7 +210,7 @@  int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
  * \param vcpu Which virtual CPU should get dumped
  * \return APIC_BASE msr contents
  */
-uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
+uint64_t kvm_get_apic_base(kvm_vcpu_context_t vcpu);
 
 /*!
  * \brief Check if a vcpu is ready for interrupt injection
@@ -219,7 +221,7 @@  uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
  * \param vcpu Which virtual CPU should get dumped
  * \return boolean indicating interrupt injection readiness
  */
-int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
+int kvm_is_ready_for_interrupt_injection(kvm_vcpu_context_t vcpu);
 
 /*!
  * \brief Read VCPU registers
@@ -236,7 +238,7 @@  int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
  * registers values
  * \return 0 on success
  */
-int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
+int kvm_get_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs);
 
 /*!
  * \brief Write VCPU registers
@@ -251,7 +253,7 @@  int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
  * registers values
  * \return 0 on success
  */
-int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
+int kvm_set_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs);
 /*!
  * \brief Read VCPU fpu registers
  *
@@ -267,7 +269,7 @@  int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
  * fpu registers values
  * \return 0 on success
  */
-int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
+int kvm_get_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu);
 
 /*!
  * \brief Write VCPU fpu registers
@@ -281,7 +283,7 @@  int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
  * \param fpu Pointer to a kvm_fpu which holds the new vcpu fpu state
  * \return 0 on success
  */
-int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
+int kvm_set_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu);
 
 /*!
  * \brief Read VCPU system registers
@@ -299,7 +301,7 @@  int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
  * registers values
  * \return 0 on success
  */
-int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
+int kvm_get_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs);
 
 /*!
  * \brief Write VCPU system registers
@@ -314,30 +316,28 @@  int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
  * registers values
  * \return 0 on success
  */
-int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
+int kvm_set_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs);
 
 #ifdef KVM_CAP_MP_STATE
 /*!
  *  * \brief Read VCPU MP state
  *
  */
-int kvm_get_mpstate(kvm_context_t kvm, int vcpu,
-                    struct kvm_mp_state *mp_state);
+int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state);
 
 /*!
  *  * \brief Write VCPU MP state
  *
  */
-int kvm_set_mpstate(kvm_context_t kvm, int vcpu,
-                    struct kvm_mp_state *mp_state);
+int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state);
 /*!
  *  * \brief Reset VCPU MP state
  *
  */
-static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
+static inline int kvm_reset_mpstate(kvm_vcpu_context_t vcpu)
 {
     struct kvm_mp_state mp_state = {.mp_state = KVM_MP_STATE_UNINITIALIZED};
-    return kvm_set_mpstate(kvm, vcpu, &mp_state);
+    return kvm_set_mpstate(vcpu, &mp_state);
 }
 #endif
 
@@ -351,10 +351,10 @@  static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
  * \param irq Vector number
  * \return 0 on success
  */
-int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq);
+int kvm_inject_irq(kvm_vcpu_context_t vcpu, unsigned irq);
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
+int kvm_set_guest_debug(kvm_vcpu_context_t, struct kvm_guest_debug *dbg);
 #endif
 
 #if defined(__i386__) || defined(__x86_64__)
@@ -369,7 +369,7 @@  int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
  * \param entries cpuid function entries table
  * \return 0 on success, or -errno on error
  */
-int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent,
 		    struct kvm_cpuid_entry *entries);
 
 /*!
@@ -385,7 +385,7 @@  int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
  * \param entries cpuid function entries table
  * \return 0 on success, or -errno on error
  */
-int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu, int nent,
 		     struct kvm_cpuid_entry2 *entries);
 
 /*!
@@ -414,7 +414,7 @@  int kvm_get_shadow_pages(kvm_context_t kvm , unsigned int *nrshadow_pages);
  * \param vcpu Which virtual CPU should get dumped
  * \param cr8 next cr8 value
  */
-void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
+void kvm_set_cr8(kvm_vcpu_context_t vcpu, uint64_t cr8);
 
 /*!
  * \brief Get cr8 for sync tpr in qemu apic emulation
@@ -425,7 +425,7 @@  void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
  * \param kvm Pointer to the current kvm_context
  * \param vcpu Which virtual CPU should get dumped
  */
-__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
+__u64 kvm_get_cr8(kvm_vcpu_context_t vcpu);
 #endif
 
 /*!
@@ -441,23 +441,7 @@  __u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
  * \param sigset signal mask for guest mode
  * \return 0 on success, or -errno on error
  */
-int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset);
-
-/*!
- * \brief Dump all VCPU information
- *
- * This dumps \b all the information that KVM has about a virtual CPU, namely:
- * - GP Registers
- * - System registers (selectors, descriptors, etc)
- * - VMCS Data
- * - MSRS
- * - Pending interrupts
- *
- * \param kvm Pointer to the current kvm_context
- * \param vcpu Which virtual CPU should get dumped
- * \return 0 on success
- */
-int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
+int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset);
 
 /*!
  * \brief Dump VCPU registers
@@ -471,7 +455,7 @@  int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
  * \param vcpu Which virtual CPU should get dumped
  * \return 0 on success
  */
-void kvm_show_regs(kvm_context_t kvm, int vcpu);
+void kvm_show_regs(kvm_vcpu_context_t vcpu);
 
 
 void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start, 
@@ -593,7 +577,7 @@  int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
  * \param vcpu Which virtual CPU should be accessed
  * \param s Local apic state of the specific virtual CPU
  */
-int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
+int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s);
 
 /*!
  * \brief Set in kernel local APIC for vcpu
@@ -604,7 +588,7 @@  int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
  * \param vcpu Which virtual CPU should be accessed
  * \param s Local apic state of the specific virtual CPU
  */
-int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
+int kvm_set_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s);
 
 #endif
 
@@ -617,7 +601,7 @@  int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
  * \param vcpu Which virtual CPU should get dumped
  * \return 0 on success
  */
-int kvm_inject_nmi(kvm_context_t kvm, int vcpu);
+int kvm_inject_nmi(kvm_vcpu_context_t vcpu);
 
 #endif
 
@@ -677,7 +661,7 @@  int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
  * \param kvm Pointer to the current kvm_context
  * \param vcpu vcpu to enable tpr access reporting on
  */
-int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu);
 
 /*!
  * \brief Disable kernel tpr access reporting
@@ -687,9 +671,9 @@  int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
  * \param kvm Pointer to the current kvm_context
  * \param vcpu vcpu to disable tpr access reporting on
  */
-int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+int kvm_disable_tpr_access_reporting(kvm_vcpu_context_t vcpu);
 
-int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic);
+int kvm_enable_vapic(kvm_vcpu_context_t vcpu, uint64_t vapic);
 
 #endif
 
diff --git a/libkvm-common.h b/libkvm-common.h
index c95c591..dc5b667 100644
--- a/libkvm-common.h
+++ b/libkvm-common.h
@@ -44,8 +44,6 @@  struct kvm_context {
 	/// Filedescriptor to /dev/kvm
 	int fd;
 	int vm_fd;
-	int vcpu_fd[MAX_VCPUS];
-	struct kvm_run *run[MAX_VCPUS];
 	/// Callbacks that KVM uses to emulate various unvirtualizable functionality
 	struct kvm_callbacks *callbacks;
 	void *opaque;
@@ -71,6 +69,14 @@  struct kvm_context {
 	int max_gsi;
 };
 
+struct kvm_vcpu_context
+{
+	int fd;
+	struct kvm_run *run;
+	struct kvm_context *kvm;
+	uint32_t id;
+};
+
 int kvm_alloc_kernel_memory(kvm_context_t kvm, unsigned long memory,
 								void **vm_mem);
 int kvm_alloc_userspace_memory(kvm_context_t kvm, unsigned long memory,
@@ -78,17 +84,17 @@  int kvm_alloc_userspace_memory(kvm_context_t kvm, unsigned long memory,
 
 int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
                         void **vm_mem);
-int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu);
+int kvm_arch_run(kvm_vcpu_context_t vcpu);
 
 
-void kvm_show_code(kvm_context_t kvm, int vcpu);
+void kvm_show_code(kvm_vcpu_context_t vcpu);
 
-int handle_halt(kvm_context_t kvm, int vcpu);
+int handle_halt(kvm_vcpu_context_t vcpu);
 int handle_shutdown(kvm_context_t kvm, void *env);
 void post_kvm_run(kvm_context_t kvm, void *env);
 int pre_kvm_run(kvm_context_t kvm, void *env);
 int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu, void *env);
+int handle_debug(kvm_vcpu_context_t vcpu, void *env);
 int try_push_interrupts(kvm_context_t kvm);
 
 #endif
diff --git a/qemu-kvm-ia64.c b/qemu-kvm-ia64.c
index 0e65cb4..d33c1c3 100644
--- a/qemu-kvm-ia64.c
+++ b/qemu-kvm-ia64.c
@@ -31,7 +31,7 @@  int kvm_arch_qemu_init_env(CPUState *cenv)
     return 0;
 }
 
-int kvm_arch_halt(void *opaque, int vcpu)
+int kvm_arch_halt(void *opaque, kvm_vcpu_context_t vcpu)
 {
     CPUState *env = cpu_single_env;
     env->hflags |= HF_HALTED_MASK;
@@ -104,7 +104,7 @@  void kvm_save_mpstate(CPUState *env)
     int r;
     struct kvm_mp_state mp_state;
 
-    r = kvm_get_mpstate(kvm_context, env->cpu_index, &mp_state);
+    r = kvm_get_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
     if (r < 0)
         env->mp_state = -1;
     else
@@ -122,7 +122,7 @@  void kvm_load_mpstate(CPUState *env)
      *  so don't touch it.
      */
     if (env->mp_state != -1)
-        kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
+        kvm_set_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
 #endif
 }
 
@@ -130,7 +130,7 @@  void kvm_arch_cpu_reset(CPUState *env)
 {
     if (kvm_irqchip_in_kernel(kvm_context)) {
 #ifdef KVM_CAP_MP_STATE
-	kvm_reset_mpstate(kvm_context, env->cpu_index);
+	kvm_reset_mpstate(env->kvm_cpu_state.vcpu_ctx);
 #endif
     } else {
 	env->interrupt_request &= ~CPU_INTERRUPT_HARD;
diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
index 98aa530..da4da93 100644
--- a/qemu-kvm-x86.c
+++ b/qemu-kvm-x86.c
@@ -195,7 +195,7 @@  void kvm_arch_load_regs(CPUState *env)
     regs.rflags = env->eflags;
     regs.rip = env->eip;
 
-    kvm_set_regs(kvm_context, env->cpu_index, &regs);
+    kvm_set_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
 
     memset(&fpu, 0, sizeof fpu);
     fpu.fsw = env->fpus & ~(7 << 11);
@@ -206,7 +206,7 @@  void kvm_arch_load_regs(CPUState *env)
     memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
     memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
     fpu.mxcsr = env->mxcsr;
-    kvm_set_fpu(kvm_context, env->cpu_index, &fpu);
+    kvm_set_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu);
 
     memcpy(sregs.interrupt_bitmap, env->interrupt_bitmap, sizeof(sregs.interrupt_bitmap));
 
@@ -251,7 +251,7 @@  void kvm_arch_load_regs(CPUState *env)
 
     sregs.efer = env->efer;
 
-    kvm_set_sregs(kvm_context, env->cpu_index, &sregs);
+    kvm_set_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
 
     /* msrs */
     n = 0;
@@ -270,7 +270,7 @@  void kvm_arch_load_regs(CPUState *env)
     }
 #endif
 
-    rc = kvm_set_msrs(kvm_context, env->cpu_index, msrs, n);
+    rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n);
     if (rc == -1)
         perror("kvm_set_msrs FAILED");
 }
@@ -282,7 +282,7 @@  void kvm_load_tsc(CPUState *env)
 
     set_msr_entry(&msr, MSR_IA32_TSC, env->tsc);
 
-    rc = kvm_set_msrs(kvm_context, env->cpu_index, &msr, 1);
+    rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, &msr, 1);
     if (rc == -1)
         perror("kvm_set_tsc FAILED.\n");
 }
@@ -293,7 +293,7 @@  void kvm_save_mpstate(CPUState *env)
     int r;
     struct kvm_mp_state mp_state;
 
-    r = kvm_get_mpstate(kvm_context, env->cpu_index, &mp_state);
+    r = kvm_get_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
     if (r < 0)
         env->mp_state = -1;
     else
@@ -311,7 +311,7 @@  void kvm_load_mpstate(CPUState *env)
      *  so don't touch it.
      */
     if (env->mp_state != -1)
-        kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
+        kvm_set_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
 #endif
 }
 
@@ -324,7 +324,7 @@  void kvm_arch_save_regs(CPUState *env)
     uint32_t hflags;
     uint32_t i, n, rc;
 
-    kvm_get_regs(kvm_context, env->cpu_index, &regs);
+    kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
 
     env->regs[R_EAX] = regs.rax;
     env->regs[R_EBX] = regs.rbx;
@@ -348,7 +348,7 @@  void kvm_arch_save_regs(CPUState *env)
     env->eflags = regs.rflags;
     env->eip = regs.rip;
 
-    kvm_get_fpu(kvm_context, env->cpu_index, &fpu);
+    kvm_get_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu);
     env->fpstt = (fpu.fsw >> 11) & 7;
     env->fpus = fpu.fsw;
     env->fpuc = fpu.fcw;
@@ -358,7 +358,7 @@  void kvm_arch_save_regs(CPUState *env)
     memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
     env->mxcsr = fpu.mxcsr;
 
-    kvm_get_sregs(kvm_context, env->cpu_index, &sregs);
+    kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
 
     memcpy(env->interrupt_bitmap, sregs.interrupt_bitmap, sizeof(env->interrupt_bitmap));
 
@@ -444,7 +444,7 @@  void kvm_arch_save_regs(CPUState *env)
         msrs[n++].index = MSR_LSTAR;
     }
 #endif
-    rc = kvm_get_msrs(kvm_context, env->cpu_index, msrs, n);
+    rc = kvm_get_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n);
     if (rc == -1) {
         perror("kvm_get_msrs FAILED");
     }
@@ -580,7 +580,7 @@  int kvm_arch_qemu_init_env(CPUState *cenv)
     for (i = 0x80000000; i <= limit; ++i)
 	do_cpuid_ent(&cpuid_ent[cpuid_nent++], i, 0, &copy);
 
-    kvm_setup_cpuid2(kvm_context, cenv->cpu_index, cpuid_nent, cpuid_ent);
+    kvm_setup_cpuid2(cenv->kvm_cpu_state.vcpu_ctx, cpuid_nent, cpuid_ent);
 
     kvm_trim_features(&cenv->cpuid_features,
                       kvm_arch_get_supported_cpuid(cenv, 1, R_EDX));
@@ -594,7 +594,7 @@  int kvm_arch_qemu_init_env(CPUState *cenv)
     return 0;
 }
 
-int kvm_arch_halt(void *opaque, int vcpu)
+int kvm_arch_halt(void *opaque, kvm_vcpu_context_t vcpu)
 {
     CPUState *env = cpu_single_env;
 
@@ -610,20 +610,18 @@  int kvm_arch_halt(void *opaque, int vcpu)
 void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
     if (!kvm_irqchip_in_kernel(kvm_context))
-	kvm_set_cr8(kvm_context, env->cpu_index, cpu_get_apic_tpr(env));
+	kvm_set_cr8(env->kvm_cpu_state.vcpu_ctx, cpu_get_apic_tpr(env));
 }
 
 void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
-    int vcpu = env->cpu_index;
-
     cpu_single_env = env;
 
-    env->eflags = kvm_get_interrupt_flag(kvm_context, vcpu)
+    env->eflags = kvm_get_interrupt_flag(env->kvm_cpu_state.vcpu_ctx)
 	? env->eflags | IF_MASK : env->eflags & ~IF_MASK;
 
-    cpu_set_apic_tpr(env, kvm_get_cr8(kvm_context, vcpu));
-    cpu_set_apic_base(env, kvm_get_apic_base(kvm_context, vcpu));
+    cpu_set_apic_tpr(env, kvm_get_cr8(env->kvm_cpu_state.vcpu_ctx));
+    cpu_set_apic_base(env, kvm_get_apic_base(env->kvm_cpu_state.vcpu_ctx));
 }
 
 int kvm_arch_has_work(CPUState *env)
@@ -641,13 +639,13 @@  int kvm_arch_try_push_interrupts(void *opaque)
     CPUState *env = cpu_single_env;
     int r, irq;
 
-    if (kvm_is_ready_for_interrupt_injection(kvm_context, env->cpu_index) &&
+    if (kvm_is_ready_for_interrupt_injection(env->kvm_cpu_state.vcpu_ctx) &&
         (env->interrupt_request & CPU_INTERRUPT_HARD) &&
         (env->eflags & IF_MASK)) {
             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
 	    irq = cpu_get_pic_interrupt(env);
 	    if (irq >= 0) {
-		r = kvm_inject_irq(kvm_context, env->cpu_index, irq);
+		r = kvm_inject_irq(env->kvm_cpu_state.vcpu_ctx, irq);
 		if (r < 0)
 		    printf("cpu %d fail inject %x\n", env->cpu_index, irq);
 	    }
@@ -666,7 +664,7 @@  void kvm_arch_push_nmi(void *opaque)
         return;
 
     env->interrupt_request &= ~CPU_INTERRUPT_NMI;
-    r = kvm_inject_nmi(kvm_context, env->cpu_index);
+    r = kvm_inject_nmi(env->kvm_cpu_state.vcpu_ctx);
     if (r < 0)
         printf("cpu %d fail inject NMI\n", env->cpu_index);
 }
@@ -682,7 +680,7 @@  void kvm_arch_update_regs_for_sipi(CPUState *env)
     kvm_arch_load_regs(env);
 }
 
-int handle_tpr_access(void *opaque, int vcpu,
+int handle_tpr_access(void *opaque, kvm_vcpu_context_t vcpu,
 			     uint64_t rip, int is_write)
 {
     kvm_tpr_access_report(cpu_single_env, rip, is_write);
@@ -695,7 +693,7 @@  void kvm_arch_cpu_reset(CPUState *env)
     if (env->cpu_index != 0) {
 	if (kvm_irqchip_in_kernel(kvm_context)) {
 #ifdef KVM_CAP_MP_STATE
-	    kvm_reset_mpstate(kvm_context, env->cpu_index);
+	    kvm_reset_mpstate(env->kvm_cpu_state.vcpu_ctx);
 #endif
 	} else {
 	    env->interrupt_request &= ~CPU_INTERRUPT_HARD;
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 68d3b92..b31b40d 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -204,7 +204,7 @@  int kvm_cpu_exec(CPUState *env)
 {
     int r;
 
-    r = kvm_run(kvm_context, env->cpu_index, env);
+    r = kvm_run(env->kvm_cpu_state.vcpu_ctx, env);
     if (r < 0) {
         printf("kvm_run returned %d\n", r);
         exit(1);
@@ -366,7 +366,7 @@  static void setup_kernel_sigmask(CPUState *env)
     sigprocmask(SIG_BLOCK, NULL, &set);
     sigdelset(&set, SIG_IPI);
     
-    kvm_set_signal_mask(kvm_context, env->cpu_index, &set);
+    kvm_set_signal_mask(env->kvm_cpu_state.vcpu_ctx, &set);
 }
 
 static void qemu_kvm_system_reset(void)
@@ -432,7 +432,7 @@  static void *ap_main_loop(void *_env)
     env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
-    kvm_create_vcpu(kvm_context, env->cpu_index);
+    env->kvm_cpu_state.vcpu_ctx = kvm_create_vcpu(kvm_context, env->cpu_index);
 
 #ifdef USE_KVM_DEVICE_ASSIGNMENT
     /* do ioperm for io ports of assigned devices */
@@ -723,7 +723,7 @@  static int kvm_io_window(void *opaque)
 }
 
  
-static int kvm_halt(void *opaque, int vcpu)
+static int kvm_halt(void *opaque, kvm_vcpu_context_t vcpu)
 {
     return kvm_arch_halt(opaque, vcpu);
 }
@@ -1027,8 +1027,8 @@  static void kvm_invoke_set_guest_debug(void *data)
 {
     struct kvm_set_guest_debug_data *dbg_data = data;
 
-    dbg_data->err = kvm_set_guest_debug(kvm_context, cpu_single_env->cpu_index,
-                                        &dbg_data->dbg);
+    dbg_data->err = kvm_set_guest_debug(cpu_single_env->kvm_cpu_state.vcpu_ctx,
+		    &dbg_data->dbg);
 }
 
 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 725589b..792ee2e 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -70,7 +70,6 @@  int kvm_arch_qemu_create_context(void);
 void kvm_arch_save_regs(CPUState *env);
 void kvm_arch_load_regs(CPUState *env);
 int kvm_arch_qemu_init_env(CPUState *cenv);
-int kvm_arch_halt(void *opaque, int vcpu);
 void kvm_arch_pre_kvm_run(void *opaque, CPUState *env);
 void kvm_arch_post_kvm_run(void *opaque, CPUState *env);
 int kvm_arch_has_work(CPUState *env);
@@ -113,8 +112,6 @@  void qemu_kvm_notify_work(void);
 
 void kvm_tpr_opt_setup(void);
 void kvm_tpr_access_report(CPUState *env, uint64_t rip, int is_write);
-int handle_tpr_access(void *opaque, int vcpu,
-			     uint64_t rip, int is_write);
 void kvm_tpr_vcpu_start(CPUState *env);
 
 int qemu_kvm_get_dirty_pages(unsigned long phys_addr, void *buf);
@@ -138,8 +135,8 @@  void kvm_arch_do_ioperm(void *_data);
 #endif
 
 #ifdef TARGET_PPC
-int handle_powerpc_dcr_read(int vcpu, uint32_t dcrn, uint32_t *data);
-int handle_powerpc_dcr_write(int vcpu,uint32_t dcrn, uint32_t data);
+int handle_powerpc_dcr_read(kvm_vcpu_context_t vcpu, uint32_t dcrn, uint32_t *data);
+int handle_powerpc_dcr_write(kvm_vcpu_context_t vcpu,uint32_t dcrn, uint32_t data);
 #endif
 
 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
@@ -165,6 +162,9 @@  struct ioperm_data {
 
 int qemu_kvm_has_sync_mmu(void);
 void qemu_kvm_cpu_stop(CPUState *env);
+int kvm_arch_halt(void *opaque, kvm_vcpu_context_t vcpu);
+int handle_tpr_access(void *opaque, kvm_vcpu_context_t vcpu,
+			     uint64_t rip, int is_write);
 
 #define kvm_enabled() (kvm_allowed)
 #define qemu_kvm_irqchip_in_kernel() kvm_irqchip_in_kernel(kvm_context)
diff --git a/target-i386/libkvm.c b/target-i386/libkvm.c
index 32d03f1..f88102e 100644
--- a/target-i386/libkvm.c
+++ b/target-i386/libkvm.c
@@ -96,22 +96,23 @@  int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
 
 #ifdef KVM_EXIT_TPR_ACCESS
 
-static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
+static int handle_tpr_access(kvm_vcpu_context_t vcpu)
 {
-	return kvm->callbacks->tpr_access(kvm->opaque, vcpu,
+	struct kvm_run *run = vcpu->run;
+	return vcpu->kvm->callbacks->tpr_access(vcpu->kvm->opaque, vcpu,
 					  run->tpr_access.rip,
 					  run->tpr_access.is_write);
 }
 
 
-int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
+int kvm_enable_vapic(kvm_vcpu_context_t vcpu, uint64_t vapic)
 {
 	int r;
 	struct kvm_vapic_addr va = {
 		.vapic_addr = vapic,
 	};
 
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_VAPIC_ADDR, &va);
+	r = ioctl(vcpu->fd, KVM_SET_VAPIC_ADDR, &va);
 	if (r == -1) {
 		r = -errno;
 		perror("kvm_enable_vapic");
@@ -122,9 +123,11 @@  int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
 
 #endif
 
-int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
+int kvm_arch_run(kvm_vcpu_context_t vcpu)
 {
 	int r = 0;
+	struct kvm_run *run = vcpu->run;
+
 
 	switch (run->exit_reason) {
 #ifdef KVM_EXIT_SET_TPR
@@ -133,7 +136,7 @@  int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
 #endif
 #ifdef KVM_EXIT_TPR_ACCESS
 		case KVM_EXIT_TPR_ACCESS:
-			r = handle_tpr_access(kvm, run, vcpu);
+			r = handle_tpr_access(vcpu);
 			break;
 #endif
 		default:
@@ -212,12 +215,12 @@  int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
 
 #ifdef KVM_CAP_IRQCHIP
 
-int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
+int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s)
 {
 	int r;
-	if (!kvm->irqchip_in_kernel)
+	if (!kvm_irqchip_in_kernel(vcpu->kvm))
 		return 0;
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_LAPIC, s);
+	r = ioctl(vcpu->fd, KVM_GET_LAPIC, s);
 	if (r == -1) {
 		r = -errno;
 		perror("kvm_get_lapic");
@@ -225,12 +228,12 @@  int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
 	return r;
 }
 
-int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
+int kvm_set_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s)
 {
 	int r;
-	if (!kvm->irqchip_in_kernel)
+	if (!kvm_irqchip_in_kernel(vcpu->kvm))
 		return 0;
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_LAPIC, s);
+	r = ioctl(vcpu->fd, KVM_SET_LAPIC, s);
 	if (r == -1) {
 		r = -errno;
 		perror("kvm_set_lapic");
@@ -270,10 +273,10 @@  int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
 
 #endif
 
-void kvm_show_code(kvm_context_t kvm, int vcpu)
+void kvm_show_code(kvm_vcpu_context_t vcpu)
 {
 #define SHOW_CODE_LEN 50
-	int fd = kvm->vcpu_fd[vcpu];
+	int fd = vcpu->fd;
 	struct kvm_regs regs;
 	struct kvm_sregs sregs;
 	int r, n;
@@ -281,6 +284,7 @@  void kvm_show_code(kvm_context_t kvm, int vcpu)
 	unsigned char code;
 	char code_str[SHOW_CODE_LEN * 3 + 1];
 	unsigned long rip;
+	kvm_context_t kvm = vcpu->kvm;
 
 	r = ioctl(fd, KVM_GET_SREGS, &sregs);
 	if (r == -1) {
@@ -339,8 +343,7 @@  struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
 	return msrs;
 }
 
-int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
-		 int n)
+int kvm_get_msrs(kvm_vcpu_context_t vcpu, struct kvm_msr_entry *msrs, int n)
 {
     struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
     int r, e;
@@ -351,7 +354,7 @@  int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
     }
     kmsrs->nmsrs = n;
     memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
-    r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MSRS, kmsrs);
+    r = ioctl(vcpu->fd, KVM_GET_MSRS, kmsrs);
     e = errno;
     memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
     free(kmsrs);
@@ -359,8 +362,7 @@  int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
     return r;
 }
 
-int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
-		 int n)
+int kvm_set_msrs(kvm_vcpu_context_t vcpu, struct kvm_msr_entry *msrs, int n)
 {
     struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
     int r, e;
@@ -371,7 +373,7 @@  int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
     }
     kmsrs->nmsrs = n;
     memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
-    r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MSRS, kmsrs);
+    r = ioctl(vcpu->fd, KVM_SET_MSRS, kmsrs);
     e = errno;
     free(kmsrs);
     errno = e;
@@ -393,9 +395,9 @@  static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
     	fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
 }
 
-void kvm_show_regs(kvm_context_t kvm, int vcpu)
+void kvm_show_regs(kvm_vcpu_context_t vcpu)
 {
-	int fd = kvm->vcpu_fd[vcpu];
+	int fd = vcpu->fd;
 	struct kvm_regs regs;
 	struct kvm_sregs sregs;
 	int r;
@@ -437,26 +439,22 @@  void kvm_show_regs(kvm_context_t kvm, int vcpu)
 		sregs.efer);
 }
 
-uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
+uint64_t kvm_get_apic_base(kvm_vcpu_context_t vcpu)
 {
-	struct kvm_run *run = kvm->run[vcpu];
-
-	return run->apic_base;
+	return vcpu->run->apic_base;
 }
 
-void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
+void kvm_set_cr8(kvm_vcpu_context_t vcpu, uint64_t cr8)
 {
-	struct kvm_run *run = kvm->run[vcpu];
-
-	run->cr8 = cr8;
+	vcpu->run->cr8 = cr8;
 }
 
-__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
+__u64 kvm_get_cr8(kvm_vcpu_context_t vcpu)
 {
-	return kvm->run[vcpu]->cr8;
+	return vcpu->run->cr8;
 }
 
-int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent,
 		    struct kvm_cpuid_entry *entries)
 {
 	struct kvm_cpuid *cpuid;
@@ -468,13 +466,13 @@  int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
 
 	cpuid->nent = nent;
 	memcpy(cpuid->entries, entries, nent * sizeof(*entries));
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID, cpuid);
+	r = ioctl(vcpu->fd, KVM_SET_CPUID, cpuid);
 
 	free(cpuid);
 	return r;
 }
 
-int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu, int nent,
 		     struct kvm_cpuid_entry2 *entries)
 {
 	struct kvm_cpuid2 *cpuid;
@@ -486,7 +484,7 @@  int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
 
 	cpuid->nent = nent;
 	memcpy(cpuid->entries, entries, nent * sizeof(*entries));
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID2, cpuid);
+	r = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
 	if (r == -1) {
 		fprintf(stderr, "kvm_setup_cpuid2: %m\n");
 		return -errno;
@@ -531,17 +529,17 @@  int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
 
 #ifdef KVM_CAP_VAPIC
 
-static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
+static int tpr_access_reporting(kvm_vcpu_context_t vcpu, int enabled)
 {
 	int r;
 	struct kvm_tpr_access_ctl tac = {
 		.enabled = enabled,
 	};
 
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
+	r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
 	if (r == -1 || r == 0)
 		return -ENOSYS;
-	r = ioctl(kvm->vcpu_fd[vcpu], KVM_TPR_ACCESS_REPORTING, &tac);
+	r = ioctl(vcpu->fd, KVM_TPR_ACCESS_REPORTING, &tac);
 	if (r == -1) {
 		r = -errno;
 		perror("KVM_TPR_ACCESS_REPORTING");
@@ -550,14 +548,14 @@  static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
 	return 0;
 }
 
-int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
+int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu)
 {
-	return tpr_access_reporting(kvm, vcpu, 1);
+	return tpr_access_reporting(vcpu, 1);
 }
 
-int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
+int kvm_disable_tpr_access_reporting(kvm_vcpu_context_t vcpu)
 {
-	return tpr_access_reporting(kvm, vcpu, 0);
+	return tpr_access_reporting(vcpu, 0);
 }
 
 #endif
diff --git a/target-ia64/libkvm.c b/target-ia64/libkvm.c
index 48669de..168721e 100644
--- a/target-ia64/libkvm.c
+++ b/target-ia64/libkvm.c
@@ -45,11 +45,11 @@  int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
 	return 0;
 }
 
-int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
+int kvm_arch_run(kvm_vcpu_context_t vcpu)
 {
 	int r = 0;
 
-	switch (run->exit_reason) {
+	switch (vcp->run->exit_reason) {
 		default:
 			r = 1;
 			break;
@@ -58,12 +58,12 @@  int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
 	return r;
 }
 
-void kvm_show_code(kvm_context_t kvm, int vcpu)
+void kvm_show_code(kvm_vcpu_context_t vcpu)
 {
 	fprintf(stderr, "kvm_show_code not supported yet!\n");
 }
 
-void kvm_show_regs(kvm_context_t kvm, int vcpu)
+void kvm_show_regs(kvm_vcpu_context_t vcpu)
 {
 	fprintf(stderr,"kvm_show_regs not supportted today!\n");
 }
diff --git a/target-ppc/libkvm.c b/target-ppc/libkvm.c
index 2dfff3b..da93026 100644
--- a/target-ppc/libkvm.c
+++ b/target-ppc/libkvm.c
@@ -23,9 +23,11 @@ 
 #include <stdio.h>
 #include <inttypes.h>
 
-int handle_dcr(struct kvm_run *run,  kvm_context_t kvm, int vcpu)
+int handle_dcr(kvm_vcpu_context_t vcpu)
 {
 	int ret = 0;
+	struct kvm_run *run = vcpu->run;
+	kvm_context_t kvm = vcpu->kvm;
 
 	if (run->dcr.is_write)
 		ret = kvm->callbacks->powerpc_dcr_write(vcpu,
@@ -39,17 +41,17 @@  int handle_dcr(struct kvm_run *run,  kvm_context_t kvm, int vcpu)
 	return ret;
 }
 
-void kvm_show_code(kvm_context_t kvm, int vcpu)
+void kvm_show_code(kvm_vcpu_context_t vcpu)
 {
 	fprintf(stderr, "%s: Operation not supported\n", __FUNCTION__);
 }
 
-void kvm_show_regs(kvm_context_t kvm, int vcpu)
+void kvm_show_regs(kvm_vcpu_context_t vcpu)
 {
 	struct kvm_regs regs;
 	int i;
 
-	if (kvm_get_regs(kvm, vcpu, &regs))
+	if (kvm_get_regs(vcpu, &regs))
 		return;
 
 	fprintf(stderr,"guest vcpu #%d\n", vcpu);
@@ -84,13 +86,13 @@  int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
 	return 0;
 }
 
-int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu)
+int kvm_arch_run(kvm_vcpu_context_t vcpu)
 {
 	int ret = 0;
 
-	switch (run->exit_reason){
+	switch (vcpu->run->exit_reason){
 	case KVM_EXIT_DCR:
-		ret = handle_dcr(run, kvm, vcpu);
+		ret = handle_dcr(vcpu);
 		break;
 	default:
 		ret = 1;