diff mbox

use KVMState, as upstream do

Message ID 1244139783-15787-1-git-send-email-glommer@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Glauber Costa June 4, 2009, 6:23 p.m. UTC
This is a pretty mechanical change. To make code look
closer to upstream qemu, I'm renaming kvm_context_t to
KVMState. Mid term goal here is to start sharing code
whereas possible.

Avi, please apply, or I'll send you a video of myself
dancing naked.

Signed-off-by: Glauber Costa <glommer@redhat.com>
---
 kvm/user/main-ppc.c  |    2 +-
 kvm/user/main.c      |    4 +-
 libkvm-all.c         |  164 ++++++++++++++++++++++----------------------
 libkvm-all.h         |  186 +++++++++++++++++++++++++-------------------------
 libkvm-common.h      |   24 +++---
 qemu-kvm-x86.c       |    2 +-
 qemu-kvm.c           |    2 +-
 qemu-kvm.h           |    2 +-
 target-i386/libkvm.c |   62 ++++++++--------
 target-i386/libkvm.h |    2 +-
 target-ia64/libkvm.c |   12 ++--
 target-ppc/libkvm.c  |   10 ++--
 12 files changed, 236 insertions(+), 236 deletions(-)

Comments

Gleb Natapov June 4, 2009, 7:23 p.m. UTC | #1
On Thu, Jun 04, 2009 at 02:23:03PM -0400, Glauber Costa wrote:
> This is a pretty mechanical change. To make code look
> closer to upstream qemu, I'm renaming kvm_context_t to
> KVMState. Mid term goal here is to start sharing code
> whereas possible.
> 
> Avi, please apply, or I'll send you a video of myself
> dancing naked.
> 
You can start recording it since I doubt this patch will apply cleanly
to today's master (other mechanical change was applied). Regardless, I
think trying to use bits of qemu kvm is dangerous. It has similar function
with same names, but with different assumptions about conditional they
can be executed in (look at commit a5ddb119). I actually prefer to be
different enough to not call upstream qemu function by mistake.

> Signed-off-by: Glauber Costa <glommer@redhat.com>
> ---
>  kvm/user/main-ppc.c  |    2 +-
>  kvm/user/main.c      |    4 +-
>  libkvm-all.c         |  164 ++++++++++++++++++++++----------------------
>  libkvm-all.h         |  186 +++++++++++++++++++++++++-------------------------
>  libkvm-common.h      |   24 +++---
>  qemu-kvm-x86.c       |    2 +-
>  qemu-kvm.c           |    2 +-
>  qemu-kvm.h           |    2 +-
>  target-i386/libkvm.c |   62 ++++++++--------
>  target-i386/libkvm.h |    2 +-
>  target-ia64/libkvm.c |   12 ++--
>  target-ppc/libkvm.c  |   10 ++--
>  12 files changed, 236 insertions(+), 236 deletions(-)
> 
> diff --git a/kvm/user/main-ppc.c b/kvm/user/main-ppc.c
> index 5af59f8..fbfd721 100644
> --- a/kvm/user/main-ppc.c
> +++ b/kvm/user/main-ppc.c
> @@ -44,7 +44,7 @@ static int gettid(void)
>  	return syscall(__NR_gettid);
>  }
>  
> -kvm_context_t kvm;
> +KVMState *kvm;
>  
>  #define IPI_SIGNAL (SIGRTMIN + 4)
>  
> diff --git a/kvm/user/main.c b/kvm/user/main.c
> index 1530ae2..ecd1b28 100644
> --- a/kvm/user/main.c
> +++ b/kvm/user/main.c
> @@ -50,7 +50,7 @@ static int tkill(int pid, int sig)
>  	return syscall(__NR_tkill, pid, sig);
>  }
>  
> -kvm_context_t kvm;
> +KVMState *kvm;
>  
>  #define MAX_VCPUS 4
>  
> @@ -406,7 +406,7 @@ static void load_file(void *mem, const char *fname)
>  	}
>  }
>  
> -static void enter_32(kvm_context_t kvm)
> +static void enter_32(KVMState *kvm)
>  {
>  	struct kvm_regs regs = {
>  		.rsp = 0x80000,  /* 512KB */
> diff --git a/libkvm-all.c b/libkvm-all.c
> index 1668e32..30b5a6c 100644
> --- a/libkvm-all.c
> +++ b/libkvm-all.c
> @@ -53,7 +53,7 @@
>  int kvm_abi = EXPECTED_KVM_API_VERSION;
>  int kvm_page_size;
>  
> -static inline void set_gsi(kvm_context_t kvm, unsigned int gsi)
> +static inline void set_gsi(KVMState *kvm, unsigned int gsi)
>  {
>  	uint32_t *bitmap = kvm->used_gsi_bitmap;
>  
> @@ -63,7 +63,7 @@ static inline void set_gsi(kvm_context_t kvm, unsigned int gsi)
>  		DPRINTF("Invalid GSI %d\n");
>  }
>  
> -static inline void clear_gsi(kvm_context_t kvm, unsigned int gsi)
> +static inline void clear_gsi(KVMState *kvm, unsigned int gsi)
>  {
>  	uint32_t *bitmap = kvm->used_gsi_bitmap;
>  
> @@ -91,7 +91,7 @@ static void init_slots(void)
>  		slots[i].len = 0;
>  }
>  
> -static int get_free_slot(kvm_context_t kvm)
> +static int get_free_slot(KVMState *kvm)
>  {
>  	int i;
>  	int tss_ext;
> @@ -158,7 +158,7 @@ static int get_container_slot(uint64_t phys_addr, unsigned long size)
>  	return -1;
>  }
>  
> -int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigned long size)
> +int kvm_is_containing_region(KVMState *kvm, unsigned long phys_addr, unsigned long size)
>  {
>  	int slot = get_container_slot(phys_addr, size);
>  	if (slot == -1)
> @@ -169,7 +169,7 @@ int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigne
>  /* 
>   * dirty pages logging control 
>   */
> -static int kvm_dirty_pages_log_change(kvm_context_t kvm,
> +static int kvm_dirty_pages_log_change(KVMState *kvm,
>  				      unsigned long phys_addr,
>  				      unsigned flags,
>  				      unsigned mask)
> @@ -209,8 +209,8 @@ static int kvm_dirty_pages_log_change(kvm_context_t kvm,
>  	return r;
>  }
>  
> -static int kvm_dirty_pages_log_change_all(kvm_context_t kvm,
> -					  int (*change)(kvm_context_t kvm,
> +static int kvm_dirty_pages_log_change_all(KVMState *kvm,
> +					  int (*change)(KVMState *kvm,
>  							uint64_t start,
>  							uint64_t len))
>  {
> @@ -223,7 +223,7 @@ static int kvm_dirty_pages_log_change_all(kvm_context_t kvm,
>  	return r;
>  }
>  
> -int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
> +int kvm_dirty_pages_log_enable_slot(KVMState *kvm,
>  				    uint64_t phys_addr,
>  				    uint64_t len)
>  {
> @@ -243,7 +243,7 @@ int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
>  					  KVM_MEM_LOG_DIRTY_PAGES);
>  }
>  
> -int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
> +int kvm_dirty_pages_log_disable_slot(KVMState *kvm,
>  				     uint64_t phys_addr,
>  				     uint64_t len)
>  {
> @@ -265,7 +265,7 @@ int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
>  /**
>   * Enable dirty page logging for all memory regions
>   */
> -int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
> +int kvm_dirty_pages_log_enable_all(KVMState *kvm)
>  {
>  	if (kvm->dirty_pages_log_all)
>  		return 0;
> @@ -278,7 +278,7 @@ int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
>   * Enable dirty page logging only for memory regions that were created with
>   *     dirty logging enabled (disable for all other memory regions).
>   */
> -int kvm_dirty_pages_log_reset(kvm_context_t kvm)
> +int kvm_dirty_pages_log_reset(KVMState *kvm)
>  {
>  	if (!kvm->dirty_pages_log_all)
>  		return 0;
> @@ -288,11 +288,11 @@ int kvm_dirty_pages_log_reset(kvm_context_t kvm)
>  }
>  
>  
> -kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
> +KVMState *kvm_init(struct kvm_callbacks *callbacks,
>  		       void *opaque)
>  {
>  	int fd;
> -	kvm_context_t kvm;
> +	KVMState *kvm;
>  	int r, gsi_count;
>  
>  	fd = open("/dev/kvm", O_RDWR);
> @@ -354,7 +354,7 @@ kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
>  	return NULL;
>  }
>  
> -void kvm_finalize(kvm_context_t kvm)
> +void kvm_finalize(KVMState *kvm)
>  {
>      	if (kvm->vcpu_fd[0] != -1)
>  		close(kvm->vcpu_fd[0]);
> @@ -364,17 +364,17 @@ void kvm_finalize(kvm_context_t kvm)
>  	free(kvm);
>  }
>  
> -void kvm_disable_irqchip_creation(kvm_context_t kvm)
> +void kvm_disable_irqchip_creation(KVMState *kvm)
>  {
>  	kvm->no_irqchip_creation = 1;
>  }
>  
> -void kvm_disable_pit_creation(kvm_context_t kvm)
> +void kvm_disable_pit_creation(KVMState *kvm)
>  {
>  	kvm->no_pit_creation = 1;
>  }
>  
> -int kvm_create_vcpu(kvm_context_t kvm, int slot)
> +int kvm_create_vcpu(KVMState *kvm, int slot)
>  {
>  	long mmap_size;
>  	int r;
> @@ -402,7 +402,7 @@ int kvm_create_vcpu(kvm_context_t kvm, int slot)
>  	return 0;
>  }
>  
> -int kvm_create_vm(kvm_context_t kvm)
> +int kvm_create_vm(KVMState *kvm)
>  {
>  	int fd = kvm->fd;
>  
> @@ -425,7 +425,7 @@ int kvm_create_vm(kvm_context_t kvm)
>  	return 0;
>  }
>  
> -static int kvm_create_default_phys_mem(kvm_context_t kvm,
> +static int kvm_create_default_phys_mem(KVMState *kvm,
>  				       unsigned long phys_mem_bytes,
>  				       void **vm_mem)
>  {
> @@ -440,7 +440,7 @@ static int kvm_create_default_phys_mem(kvm_context_t kvm,
>  	return -1;
>  }
>  
> -int kvm_check_extension(kvm_context_t kvm, int ext)
> +int kvm_check_extension(KVMState *kvm, int ext)
>  {
>  	int ret;
>  
> @@ -450,7 +450,7 @@ int kvm_check_extension(kvm_context_t kvm, int ext)
>  	return 0;
>  }
>  
> -void kvm_create_irqchip(kvm_context_t kvm)
> +void kvm_create_irqchip(KVMState *kvm)
>  {
>  	int r;
>  
> @@ -477,7 +477,7 @@ void kvm_create_irqchip(kvm_context_t kvm)
>  #endif
>  }
>  
> -int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
> +int kvm_create(KVMState *kvm, unsigned long phys_mem_bytes, void **vm_mem)
>  {
>  	int r;
>  	
> @@ -497,7 +497,7 @@ int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
>  }
>  
>  
> -void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
> +void *kvm_create_phys_mem(KVMState *kvm, unsigned long phys_start,
>  			  unsigned long len, int log, int writable)
>  {
>  	int r;
> @@ -543,7 +543,7 @@ void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
>          return ptr;
>  }
>  
> -int kvm_register_phys_mem(kvm_context_t kvm,
> +int kvm_register_phys_mem(KVMState *kvm,
>  			  unsigned long phys_start, void *userspace_addr,
>  			  unsigned long len, int log)
>  {
> @@ -574,7 +574,7 @@ int kvm_register_phys_mem(kvm_context_t kvm,
>  /* destroy/free a whole slot.
>   * phys_start, len and slot are the params passed to kvm_create_phys_mem()
>   */
> -void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start, 
> +void kvm_destroy_phys_mem(KVMState *kvm, unsigned long phys_start, 
>  			  unsigned long len)
>  {
>  	int slot;
> @@ -616,7 +616,7 @@ void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
>  	free_slot(memory.slot);
>  }
>  
> -void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
> +void kvm_unregister_memory_area(KVMState *kvm, uint64_t phys_addr, unsigned long size)
>  {
>  
>  	int slot = get_container_slot(phys_addr, size);
> @@ -628,7 +628,7 @@ void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned
>  	}
>  }
>  
> -static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
> +static int kvm_get_map(KVMState *kvm, int ioctl_num, int slot, void *buf)
>  {
>  	int r;
>  	struct kvm_dirty_log log = {
> @@ -643,7 +643,7 @@ static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
>  	return 0;
>  }
>  
> -int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
> +int kvm_get_dirty_pages(KVMState *kvm, unsigned long phys_addr, void *buf)
>  {
>  	int slot;
>  
> @@ -651,7 +651,7 @@ int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
>  	return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
>  }
>  
> -int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
> +int kvm_get_dirty_pages_range(KVMState *kvm, unsigned long phys_addr,
>  			      unsigned long len, void *buf, void *opaque,
>  			      int (*cb)(unsigned long start, unsigned long len,
>  					void*bitmap, void *opaque))
> @@ -676,7 +676,7 @@ int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
>  
>  #ifdef KVM_CAP_IRQCHIP
>  
> -int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status)
> +int kvm_set_irq_level(KVMState *kvm, int irq, int level, int *status)
>  {
>  	struct kvm_irq_level event;
>  	int r;
> @@ -701,7 +701,7 @@ int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status)
>  	return 1;
>  }
>  
> -int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
> +int kvm_get_irqchip(KVMState *kvm, struct kvm_irqchip *chip)
>  {
>  	int r;
>  
> @@ -715,7 +715,7 @@ int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
>  	return r;
>  }
>  
> -int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
> +int kvm_set_irqchip(KVMState *kvm, struct kvm_irqchip *chip)
>  {
>  	int r;
>  
> @@ -731,7 +731,7 @@ int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
>  
>  #endif
>  
> -static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
> +static int handle_io(KVMState *kvm, struct kvm_run *run, int vcpu)
>  {
>  	uint16_t addr = run->io.port;
>  	int r;
> @@ -786,7 +786,7 @@ static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
>  	return 0;
>  }
>  
> -int handle_debug(kvm_context_t kvm, int vcpu, void *env)
> +int handle_debug(KVMState *kvm, int vcpu, void *env)
>  {
>  #ifdef KVM_CAP_SET_GUEST_DEBUG
>      struct kvm_run *run = kvm->run[vcpu];
> @@ -797,38 +797,38 @@ int handle_debug(kvm_context_t kvm, int vcpu, void *env)
>  #endif
>  }
>  
> -int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
> +int kvm_get_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs)
>  {
>      return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
>  }
>  
> -int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
> +int kvm_set_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs)
>  {
>      return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
>  }
>  
> -int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
> +int kvm_get_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu)
>  {
>      return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
>  }
>  
> -int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
> +int kvm_set_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu)
>  {
>      return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
>  }
>  
> -int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
> +int kvm_get_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *sregs)
>  {
>      return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
>  }
>  
> -int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
> +int kvm_set_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *sregs)
>  {
>      return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
>  }
>  
>  #ifdef KVM_CAP_MP_STATE
> -int kvm_get_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
> +int kvm_get_mpstate(KVMState *kvm, int vcpu, struct kvm_mp_state *mp_state)
>  {
>      int r;
>  
> @@ -838,7 +838,7 @@ int kvm_get_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
>      return -ENOSYS;
>  }
>  
> -int kvm_set_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
> +int kvm_set_mpstate(KVMState *kvm, int vcpu, struct kvm_mp_state *mp_state)
>  {
>      int r;
>  
> @@ -849,7 +849,7 @@ int kvm_set_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
>  }
>  #endif
>  
> -static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
> +static int handle_mmio(KVMState *kvm, struct kvm_run *kvm_run)
>  {
>  	unsigned long addr = kvm_run->mmio.phys_addr;
>  	void *data = kvm_run->mmio.data;
> @@ -866,58 +866,58 @@ static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
>  					kvm_run->mmio.len);
>  }
>  
> -int handle_io_window(kvm_context_t kvm)
> +int handle_io_window(KVMState *kvm)
>  {
>  	return kvm->callbacks->io_window(kvm->opaque);
>  }
>  
> -int handle_halt(kvm_context_t kvm, int vcpu)
> +int handle_halt(KVMState *kvm, int vcpu)
>  {
>  	return kvm->callbacks->halt(kvm->opaque, vcpu);
>  }
>  
> -int handle_shutdown(kvm_context_t kvm, void *env)
> +int handle_shutdown(KVMState *kvm, void *env)
>  {
>  	return kvm->callbacks->shutdown(kvm->opaque, env);
>  }
>  
> -int try_push_interrupts(kvm_context_t kvm)
> +int try_push_interrupts(KVMState *kvm)
>  {
>  	return kvm->callbacks->try_push_interrupts(kvm->opaque);
>  }
>  
> -static inline void push_nmi(kvm_context_t kvm)
> +static inline void push_nmi(KVMState *kvm)
>  {
>  #ifdef KVM_CAP_USER_NMI
>  	kvm->callbacks->push_nmi(kvm->opaque);
>  #endif /* KVM_CAP_USER_NMI */
>  }
>  
> -void post_kvm_run(kvm_context_t kvm, void *env)
> +void post_kvm_run(KVMState *kvm, void *env)
>  {
>  	kvm->callbacks->post_kvm_run(kvm->opaque, env);
>  }
>  
> -int pre_kvm_run(kvm_context_t kvm, void *env)
> +int pre_kvm_run(KVMState *kvm, void *env)
>  {
>  	return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
>  }
>  
> -int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
> +int kvm_get_interrupt_flag(KVMState *kvm, int vcpu)
>  {
>  	struct kvm_run *run = kvm->run[vcpu];
>  
>  	return run->if_flag;
>  }
>  
> -int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
> +int kvm_is_ready_for_interrupt_injection(KVMState *kvm, int vcpu)
>  {
>  	struct kvm_run *run = kvm->run[vcpu];
>  
>  	return run->ready_for_interrupt_injection;
>  }
>  
> -int kvm_run(kvm_context_t kvm, int vcpu, void *env)
> +int kvm_run(KVMState *kvm, int vcpu, void *env)
>  {
>  	int r;
>  	int fd = kvm->vcpu_fd[vcpu];
> @@ -1029,7 +1029,7 @@ more:
>  	return r;
>  }
>  
> -int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
> +int kvm_inject_irq(KVMState *kvm, int vcpu, unsigned irq)
>  {
>  	struct kvm_interrupt intr;
>  
> @@ -1038,13 +1038,13 @@ int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
>  }
>  
>  #ifdef KVM_CAP_SET_GUEST_DEBUG
> -int kvm_set_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_guest_debug *dbg)
> +int kvm_set_guest_debug(KVMState *kvm, int vcpu, struct kvm_guest_debug *dbg)
>  {
>  	return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_GUEST_DEBUG, dbg);
>  }
>  #endif
>  
> -int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
> +int kvm_set_signal_mask(KVMState *kvm, int vcpu, const sigset_t *sigset)
>  {
>  	struct kvm_signal_mask *sigmask;
>  	int r;
> @@ -1068,17 +1068,17 @@ int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
>  	return r;
>  }
>  
> -int kvm_irqchip_in_kernel(kvm_context_t kvm)
> +int kvm_irqchip_in_kernel(KVMState *kvm)
>  {
>  	return kvm->irqchip_in_kernel;
>  }
>  
> -int kvm_pit_in_kernel(kvm_context_t kvm)
> +int kvm_pit_in_kernel(KVMState *kvm)
>  {
>  	return kvm->pit_in_kernel;
>  }
>  
> -int kvm_has_sync_mmu(kvm_context_t kvm)
> +int kvm_has_sync_mmu(KVMState *kvm)
>  {
>          int r = 0;
>  #ifdef KVM_CAP_SYNC_MMU
> @@ -1087,7 +1087,7 @@ int kvm_has_sync_mmu(kvm_context_t kvm)
>          return r;
>  }
>  
> -int kvm_inject_nmi(kvm_context_t kvm, int vcpu)
> +int kvm_inject_nmi(KVMState *kvm, int vcpu)
>  {
>  #ifdef KVM_CAP_USER_NMI
>  	return ioctl(kvm->vcpu_fd[vcpu], KVM_NMI);
> @@ -1096,7 +1096,7 @@ int kvm_inject_nmi(kvm_context_t kvm, int vcpu)
>  #endif
>  }
>  
> -int kvm_init_coalesced_mmio(kvm_context_t kvm)
> +int kvm_init_coalesced_mmio(KVMState *kvm)
>  {
>  	int r = 0;
>  	kvm->coalesced_mmio = 0;
> @@ -1110,7 +1110,7 @@ int kvm_init_coalesced_mmio(kvm_context_t kvm)
>  	return r;
>  }
>  
> -int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
> +int kvm_register_coalesced_mmio(KVMState *kvm, uint64_t addr, uint32_t size)
>  {
>  #ifdef KVM_CAP_COALESCED_MMIO
>  	struct kvm_coalesced_mmio_zone zone;
> @@ -1132,7 +1132,7 @@ int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
>  	return -ENOSYS;
>  }
>  
> -int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
> +int kvm_unregister_coalesced_mmio(KVMState *kvm, uint64_t addr, uint32_t size)
>  {
>  #ifdef KVM_CAP_COALESCED_MMIO
>  	struct kvm_coalesced_mmio_zone zone;
> @@ -1156,7 +1156,7 @@ int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t siz
>  }
>  
>  #ifdef KVM_CAP_DEVICE_ASSIGNMENT
> -int kvm_assign_pci_device(kvm_context_t kvm,
> +int kvm_assign_pci_device(KVMState *kvm,
>  			  struct kvm_assigned_pci_dev *assigned_dev)
>  {
>  	int ret;
> @@ -1168,7 +1168,7 @@ int kvm_assign_pci_device(kvm_context_t kvm,
>  	return ret;
>  }
>  
> -static int kvm_old_assign_irq(kvm_context_t kvm,
> +static int kvm_old_assign_irq(KVMState *kvm,
>  		   struct kvm_assigned_irq *assigned_irq)
>  {
>  	int ret;
> @@ -1181,7 +1181,7 @@ static int kvm_old_assign_irq(kvm_context_t kvm,
>  }
>  
>  #ifdef KVM_CAP_ASSIGN_DEV_IRQ
> -int kvm_assign_irq(kvm_context_t kvm,
> +int kvm_assign_irq(KVMState *kvm,
>  		   struct kvm_assigned_irq *assigned_irq)
>  {
>  	int ret;
> @@ -1197,7 +1197,7 @@ int kvm_assign_irq(kvm_context_t kvm,
>  	return kvm_old_assign_irq(kvm, assigned_irq);
>  }
>  
> -int kvm_deassign_irq(kvm_context_t kvm,
> +int kvm_deassign_irq(KVMState *kvm,
>  		     struct kvm_assigned_irq *assigned_irq)
>  {
>  	int ret;
> @@ -1209,7 +1209,7 @@ int kvm_deassign_irq(kvm_context_t kvm,
>  	return ret;
>  }
>  #else
> -int kvm_assign_irq(kvm_context_t kvm,
> +int kvm_assign_irq(KVMState *kvm,
>  		   struct kvm_assigned_irq *assigned_irq)
>  {
>  	return kvm_old_assign_irq(kvm, assigned_irq);
> @@ -1218,7 +1218,7 @@ int kvm_assign_irq(kvm_context_t kvm,
>  #endif
>  
>  #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
> -int kvm_deassign_pci_device(kvm_context_t kvm,
> +int kvm_deassign_pci_device(KVMState *kvm,
>  			    struct kvm_assigned_pci_dev *assigned_dev)
>  {
>  	int ret;
> @@ -1231,7 +1231,7 @@ int kvm_deassign_pci_device(kvm_context_t kvm,
>  }
>  #endif
>  
> -int kvm_destroy_memory_region_works(kvm_context_t kvm)
> +int kvm_destroy_memory_region_works(KVMState *kvm)
>  {
>  	int ret = 0;
>  
> @@ -1244,7 +1244,7 @@ int kvm_destroy_memory_region_works(kvm_context_t kvm)
>  	return ret;
>  }
>  
> -int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
> +int kvm_reinject_control(KVMState *kvm, int pit_reinject)
>  {
>  #ifdef KVM_CAP_REINJECT_CONTROL
>  	int r;
> @@ -1263,7 +1263,7 @@ int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
>  	return -ENOSYS;
>  }
>  
> -int kvm_has_gsi_routing(kvm_context_t kvm)
> +int kvm_has_gsi_routing(KVMState *kvm)
>  {
>      int r = 0;
>  
> @@ -1273,7 +1273,7 @@ int kvm_has_gsi_routing(kvm_context_t kvm)
>      return r;
>  }
>  
> -int kvm_get_gsi_count(kvm_context_t kvm)
> +int kvm_get_gsi_count(KVMState *kvm)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
>  	return kvm_check_extension(kvm, KVM_CAP_IRQ_ROUTING);
> @@ -1282,7 +1282,7 @@ int kvm_get_gsi_count(kvm_context_t kvm)
>  #endif
>  }
>  
> -int kvm_clear_gsi_routes(kvm_context_t kvm)
> +int kvm_clear_gsi_routes(KVMState *kvm)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
>  	kvm->irq_routes->nr = 0;
> @@ -1292,7 +1292,7 @@ int kvm_clear_gsi_routes(kvm_context_t kvm)
>  #endif
>  }
>  
> -int kvm_add_routing_entry(kvm_context_t kvm,
> +int kvm_add_routing_entry(KVMState *kvm,
>  		          struct kvm_irq_routing_entry* entry)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
> @@ -1328,7 +1328,7 @@ int kvm_add_routing_entry(kvm_context_t kvm,
>  #endif
>  }
>  
> -int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
> +int kvm_add_irq_route(KVMState *kvm, int gsi, int irqchip, int pin)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
>  	struct kvm_irq_routing_entry e;
> @@ -1344,7 +1344,7 @@ int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
>  #endif
>  }
>  
> -int kvm_del_routing_entry(kvm_context_t kvm,
> +int kvm_del_routing_entry(KVMState *kvm,
>  	                  struct kvm_irq_routing_entry* entry)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
> @@ -1408,7 +1408,7 @@ int kvm_del_routing_entry(kvm_context_t kvm,
>  #endif
>  }
>  
> -int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
> +int kvm_del_irq_route(KVMState *kvm, int gsi, int irqchip, int pin)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
>  	struct kvm_irq_routing_entry e;
> @@ -1424,7 +1424,7 @@ int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
>  #endif
>  }
>  
> -int kvm_commit_irq_routes(kvm_context_t kvm)
> +int kvm_commit_irq_routes(KVMState *kvm)
>  {
>  #ifdef KVM_CAP_IRQ_ROUTING
>  	int r;
> @@ -1439,7 +1439,7 @@ int kvm_commit_irq_routes(kvm_context_t kvm)
>  #endif
>  }
>  
> -int kvm_get_irq_route_gsi(kvm_context_t kvm)
> +int kvm_get_irq_route_gsi(KVMState *kvm)
>  {
>  	int i, bit;
>  	uint32_t *buf = kvm->used_gsi_bitmap;
> @@ -1457,7 +1457,7 @@ int kvm_get_irq_route_gsi(kvm_context_t kvm)
>  }
>  
>  #ifdef KVM_CAP_DEVICE_MSIX
> -int kvm_assign_set_msix_nr(kvm_context_t kvm,
> +int kvm_assign_set_msix_nr(KVMState *kvm,
>                             struct kvm_assigned_msix_nr *msix_nr)
>  {
>          int ret;
> @@ -1469,7 +1469,7 @@ int kvm_assign_set_msix_nr(kvm_context_t kvm,
>          return ret;
>  }
>  
> -int kvm_assign_set_msix_entry(kvm_context_t kvm,
> +int kvm_assign_set_msix_entry(KVMState *kvm,
>                                struct kvm_assigned_msix_entry *entry)
>  {
>          int ret;
> diff --git a/libkvm-all.h b/libkvm-all.h
> index 4821a1e..d4ae12f 100644
> --- a/libkvm-all.h
> +++ b/libkvm-all.h
> @@ -21,12 +21,12 @@
>  
>  struct kvm_context;
>  
> -typedef struct kvm_context *kvm_context_t;
> +typedef struct kvm_context KVMState;
>  
>  #if defined(__x86_64__) || defined(__i386__)
> -struct kvm_msr_list *kvm_get_msr_list(kvm_context_t);
> -int kvm_get_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
> -int kvm_set_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
> +struct kvm_msr_list *kvm_get_msr_list(KVMState *);
> +int kvm_get_msrs(KVMState *, int vcpu, struct kvm_msr_entry *msrs, int n);
> +int kvm_set_msrs(KVMState *, int vcpu, struct kvm_msr_entry *msrs, int n);
>  #endif
>  
>  /*!
> @@ -80,9 +80,9 @@ struct kvm_callbacks {
>      int (*powerpc_dcr_write)(int vcpu, uint32_t dcrn, uint32_t data);
>  #endif
>  #if defined(__s390__)
> -    int (*s390_handle_intercept)(kvm_context_t context, int vcpu,
> +    int (*s390_handle_intercept)(KVMState *context, int vcpu,
>  	struct kvm_run *run);
> -    int (*s390_handle_reset)(kvm_context_t context, int vcpu,
> +    int (*s390_handle_reset)(KVMState *context, int vcpu,
>  	 struct kvm_run *run);
>  #endif
>  };
> @@ -98,7 +98,7 @@ struct kvm_callbacks {
>   * \param opaque Not used
>   * \return NULL on failure
>   */
> -kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
> +KVMState *kvm_init(struct kvm_callbacks *callbacks,
>  		       void *opaque);
>  
>  /*!
> @@ -110,7 +110,7 @@ kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
>   *
>   * \param kvm Pointer to the kvm_context that is to be freed
>   */
> -void kvm_finalize(kvm_context_t kvm);
> +void kvm_finalize(KVMState *kvm);
>  
>  /*!
>   * \brief Disable the in-kernel IRQCHIP creation
> @@ -120,7 +120,7 @@ void kvm_finalize(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the kvm_context
>   */
> -void kvm_disable_irqchip_creation(kvm_context_t kvm);
> +void kvm_disable_irqchip_creation(KVMState *kvm);
>  
>  /*!
>   * \brief Disable the in-kernel PIT creation
> @@ -130,7 +130,7 @@ void kvm_disable_irqchip_creation(kvm_context_t kvm);
>   *
>   *  \param kvm Pointer to the kvm_context
>   */
> -void kvm_disable_pit_creation(kvm_context_t kvm);
> +void kvm_disable_pit_creation(KVMState *kvm);
>  
>  /*!
>   * \brief Create new virtual machine
> @@ -146,12 +146,12 @@ void kvm_disable_pit_creation(kvm_context_t kvm);
>   * kvm_create allocates for physical RAM
>   * \return 0 on success
>   */
> -int kvm_create(kvm_context_t kvm,
> +int kvm_create(KVMState *kvm,
>  	       unsigned long phys_mem_bytes,
>  	       void **phys_mem);
> -int kvm_create_vm(kvm_context_t kvm);
> -int kvm_check_extension(kvm_context_t kvm, int ext);
> -void kvm_create_irqchip(kvm_context_t kvm);
> +int kvm_create_vm(KVMState *kvm);
> +int kvm_check_extension(KVMState *kvm, int ext);
> +void kvm_create_irqchip(KVMState *kvm);
>  
>  /*!
>   * \brief Create a new virtual cpu
> @@ -163,7 +163,7 @@ void kvm_create_irqchip(kvm_context_t kvm);
>   * \param slot vcpu number (> 0)
>   * \return 0 on success, -errno on failure
>   */
> -int kvm_create_vcpu(kvm_context_t kvm, int slot);
> +int kvm_create_vcpu(KVMState *kvm, int slot);
>  
>  /*!
>   * \brief Start the VCPU
> @@ -186,7 +186,7 @@ int kvm_create_vcpu(kvm_context_t kvm, int slot);
>   * return except for when an error has occured, or when you have sent it
>   * an EINTR signal.
>   */
> -int kvm_run(kvm_context_t kvm, int vcpu, void *env);
> +int kvm_run(KVMState *kvm, int vcpu, void *env);
>  
>  /*!
>   * \brief Get interrupt flag from on last exit to userspace
> @@ -197,7 +197,7 @@ int kvm_run(kvm_context_t kvm, int vcpu, void *env);
>   * \param vcpu Which virtual CPU should get dumped
>   * \return interrupt flag value (0 or 1)
>   */
> -int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
> +int kvm_get_interrupt_flag(KVMState *kvm, int vcpu);
>  
>  /*!
>   * \brief Get the value of the APIC_BASE msr as of last exit to userspace
> @@ -208,7 +208,7 @@ int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
>   * \param vcpu Which virtual CPU should get dumped
>   * \return APIC_BASE msr contents
>   */
> -uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
> +uint64_t kvm_get_apic_base(KVMState *kvm, int vcpu);
>  
>  /*!
>   * \brief Check if a vcpu is ready for interrupt injection
> @@ -219,7 +219,7 @@ uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
>   * \param vcpu Which virtual CPU should get dumped
>   * \return boolean indicating interrupt injection readiness
>   */
> -int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
> +int kvm_is_ready_for_interrupt_injection(KVMState *kvm, int vcpu);
>  
>  /*!
>   * \brief Read VCPU registers
> @@ -236,7 +236,7 @@ int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
>   * registers values
>   * \return 0 on success
>   */
> -int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
> +int kvm_get_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs);
>  
>  /*!
>   * \brief Write VCPU registers
> @@ -251,7 +251,7 @@ int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
>   * registers values
>   * \return 0 on success
>   */
> -int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
> +int kvm_set_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs);
>  /*!
>   * \brief Read VCPU fpu registers
>   *
> @@ -267,7 +267,7 @@ int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
>   * fpu registers values
>   * \return 0 on success
>   */
> -int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
> +int kvm_get_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu);
>  
>  /*!
>   * \brief Write VCPU fpu registers
> @@ -281,7 +281,7 @@ int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
>   * \param fpu Pointer to a kvm_fpu which holds the new vcpu fpu state
>   * \return 0 on success
>   */
> -int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
> +int kvm_set_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu);
>  
>  /*!
>   * \brief Read VCPU system registers
> @@ -299,7 +299,7 @@ int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
>   * registers values
>   * \return 0 on success
>   */
> -int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
> +int kvm_get_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *regs);
>  
>  /*!
>   * \brief Write VCPU system registers
> @@ -314,27 +314,27 @@ int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
>   * registers values
>   * \return 0 on success
>   */
> -int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
> +int kvm_set_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *regs);
>  
>  #ifdef KVM_CAP_MP_STATE
>  /*!
>   *  * \brief Read VCPU MP state
>   *
>   */
> -int kvm_get_mpstate(kvm_context_t kvm, int vcpu,
> +int kvm_get_mpstate(KVMState *kvm, int vcpu,
>                      struct kvm_mp_state *mp_state);
>  
>  /*!
>   *  * \brief Write VCPU MP state
>   *
>   */
> -int kvm_set_mpstate(kvm_context_t kvm, int vcpu,
> +int kvm_set_mpstate(KVMState *kvm, int vcpu,
>                      struct kvm_mp_state *mp_state);
>  /*!
>   *  * \brief Reset VCPU MP state
>   *
>   */
> -static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
> +static inline int kvm_reset_mpstate(KVMState *kvm, int vcpu)
>  {
>      struct kvm_mp_state mp_state = {.mp_state = KVM_MP_STATE_UNINITIALIZED};
>      return kvm_set_mpstate(kvm, vcpu, &mp_state);
> @@ -351,10 +351,10 @@ static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
>   * \param irq Vector number
>   * \return 0 on success
>   */
> -int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq);
> +int kvm_inject_irq(KVMState *kvm, int vcpu, unsigned irq);
>  
>  #ifdef KVM_CAP_SET_GUEST_DEBUG
> -int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
> +int kvm_set_guest_debug(KVMState *, int vcpu, struct kvm_guest_debug *dbg);
>  #endif
>  
>  #if defined(__i386__) || defined(__x86_64__)
> @@ -369,7 +369,7 @@ int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
>   * \param entries cpuid function entries table
>   * \return 0 on success, or -errno on error
>   */
> -int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
> +int kvm_setup_cpuid(KVMState *kvm, int vcpu, int nent,
>  		    struct kvm_cpuid_entry *entries);
>  
>  /*!
> @@ -385,7 +385,7 @@ int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
>   * \param entries cpuid function entries table
>   * \return 0 on success, or -errno on error
>   */
> -int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
> +int kvm_setup_cpuid2(KVMState *kvm, int vcpu, int nent,
>  		     struct kvm_cpuid_entry2 *entries);
>  
>  /*!
> @@ -394,7 +394,7 @@ int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
>   * \param kvm pointer to kvm_context
>   * \param nrshadow_pages number of pages to be allocated
>   */
> -int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages);
> +int kvm_set_shadow_pages(KVMState *kvm, unsigned int nrshadow_pages);
>  
>  /*!
>   * \brief Getting the number of shadow pages that are allocated to the vm
> @@ -402,7 +402,7 @@ int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages);
>   * \param kvm pointer to kvm_context
>   * \param nrshadow_pages number of pages to be allocated
>   */
> -int kvm_get_shadow_pages(kvm_context_t kvm , unsigned int *nrshadow_pages);
> +int kvm_get_shadow_pages(KVMState *kvm , unsigned int *nrshadow_pages);
>  
>  /*!
>   * \brief Set up cr8 for next time the vcpu is executed
> @@ -414,7 +414,7 @@ int kvm_get_shadow_pages(kvm_context_t kvm , unsigned int *nrshadow_pages);
>   * \param vcpu Which virtual CPU should get dumped
>   * \param cr8 next cr8 value
>   */
> -void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
> +void kvm_set_cr8(KVMState *kvm, int vcpu, uint64_t cr8);
>  
>  /*!
>   * \brief Get cr8 for sync tpr in qemu apic emulation
> @@ -425,7 +425,7 @@ void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
>   * \param kvm Pointer to the current kvm_context
>   * \param vcpu Which virtual CPU should get dumped
>   */
> -__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
> +__u64 kvm_get_cr8(KVMState *kvm, int vcpu);
>  #endif
>  
>  /*!
> @@ -441,7 +441,7 @@ __u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
>   * \param sigset signal mask for guest mode
>   * \return 0 on success, or -errno on error
>   */
> -int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset);
> +int kvm_set_signal_mask(KVMState *kvm, int vcpu, const sigset_t *sigset);
>  
>  /*!
>   * \brief Dump all VCPU information
> @@ -457,7 +457,7 @@ int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset);
>   * \param vcpu Which virtual CPU should get dumped
>   * \return 0 on success
>   */
> -int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
> +int kvm_dump_vcpu(KVMState *kvm, int vcpu);
>  
>  /*!
>   * \brief Dump VCPU registers
> @@ -471,28 +471,28 @@ int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
>   * \param vcpu Which virtual CPU should get dumped
>   * \return 0 on success
>   */
> -void kvm_show_regs(kvm_context_t kvm, int vcpu);
> +void kvm_show_regs(KVMState *kvm, int vcpu);
>  
>  
> -void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start, 
> +void *kvm_create_phys_mem(KVMState *, unsigned long phys_start, 
>  			  unsigned long len, int log, int writable);
> -void kvm_destroy_phys_mem(kvm_context_t, unsigned long phys_start, 
> +void kvm_destroy_phys_mem(KVMState *, unsigned long phys_start, 
>  			  unsigned long len);
> -void kvm_unregister_memory_area(kvm_context_t, uint64_t phys_start,
> +void kvm_unregister_memory_area(KVMState *, uint64_t phys_start,
>                                  unsigned long len);
>  
> -int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_start, unsigned long size);
> -int kvm_register_phys_mem(kvm_context_t kvm,
> +int kvm_is_containing_region(KVMState *kvm, unsigned long phys_start, unsigned long size);
> +int kvm_register_phys_mem(KVMState *kvm,
>  			unsigned long phys_start, void *userspace_addr,
>  			unsigned long len, int log);
> -int kvm_get_dirty_pages(kvm_context_t, unsigned long phys_addr, void *buf);
> -int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
> +int kvm_get_dirty_pages(KVMState *, unsigned long phys_addr, void *buf);
> +int kvm_get_dirty_pages_range(KVMState *kvm, unsigned long phys_addr,
>  			      unsigned long end_addr, void *buf, void*opaque,
>  			      int (*cb)(unsigned long start, unsigned long len,
>  					void*bitmap, void *opaque));
> -int kvm_register_coalesced_mmio(kvm_context_t kvm,
> +int kvm_register_coalesced_mmio(KVMState *kvm,
>  				uint64_t addr, uint32_t size);
> -int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
> +int kvm_unregister_coalesced_mmio(KVMState *kvm,
>  				  uint64_t addr, uint32_t size);
>  
>  /*!
> @@ -502,7 +502,7 @@ int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
>   * accesses the alias region, it will behave exactly as if it accessed
>   * the target memory.
>   */
> -int kvm_create_memory_alias(kvm_context_t,
> +int kvm_create_memory_alias(KVMState *,
>  			    uint64_t phys_start, uint64_t len,
>  			    uint64_t target_phys);
>  
> @@ -511,7 +511,7 @@ int kvm_create_memory_alias(kvm_context_t,
>   *
>   * Removes an alias created with kvm_create_memory_alias().
>   */
> -int kvm_destroy_memory_alias(kvm_context_t, uint64_t phys_start);
> +int kvm_destroy_memory_alias(KVMState *, uint64_t phys_start);
>  
>  /*!
>   * \brief Get a bitmap of guest ram pages which are allocated to the guest.
> @@ -520,17 +520,17 @@ int kvm_destroy_memory_alias(kvm_context_t, uint64_t phys_start);
>   * \param phys_addr Memory slot phys addr
>   * \param bitmap Long aligned address of a big enough bitmap (one bit per page)
>   */
> -int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *bitmap);
> -int kvm_get_mem_map_range(kvm_context_t kvm, unsigned long phys_addr,
> +int kvm_get_mem_map(KVMState *kvm, unsigned long phys_addr, void *bitmap);
> +int kvm_get_mem_map_range(KVMState *kvm, unsigned long phys_addr,
>  			   unsigned long len, void *buf, void *opaque,
>  			   int (*cb)(unsigned long start,unsigned long len,
>  				     void* bitmap, void* opaque));
> -int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status);
> +int kvm_set_irq_level(KVMState *kvm, int irq, int level, int *status);
>  
> -int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
> +int kvm_dirty_pages_log_enable_slot(KVMState *kvm,
>  				    uint64_t phys_start,
>  				    uint64_t len);
> -int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
> +int kvm_dirty_pages_log_disable_slot(KVMState *kvm,
>  				     uint64_t phys_start,
>  				     uint64_t len);
>  /*!
> @@ -538,7 +538,7 @@ int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_dirty_pages_log_enable_all(kvm_context_t kvm);
> +int kvm_dirty_pages_log_enable_all(KVMState *kvm);
>  
>  /*!
>   * \brief Disable dirty-page-logging for some memory regions
> @@ -548,16 +548,16 @@ int kvm_dirty_pages_log_enable_all(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_dirty_pages_log_reset(kvm_context_t kvm);
> +int kvm_dirty_pages_log_reset(KVMState *kvm);
>  
>  /*!
>   * \brief Query whether in kernel irqchip is used
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_irqchip_in_kernel(kvm_context_t kvm);
> +int kvm_irqchip_in_kernel(KVMState *kvm);
>  
> -int kvm_has_sync_mmu(kvm_context_t kvm);
> +int kvm_has_sync_mmu(KVMState *kvm);
>  
>  #ifdef KVM_CAP_IRQCHIP
>  /*!
> @@ -569,7 +569,7 @@ int kvm_has_sync_mmu(kvm_context_t kvm);
>   * \param kvm Pointer to the current kvm_context
>   * \param chip The irq chip device to be dumped
>   */
> -int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
> +int kvm_get_irqchip(KVMState *kvm, struct kvm_irqchip *chip);
>  
>  /*!
>   * \brief Set in kernel IRQCHIP contents
> @@ -581,7 +581,7 @@ int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
>   * \param kvm Pointer to the current kvm_context
>   * \param chip THe irq chip device to be written
>   */
> -int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
> +int kvm_set_irqchip(KVMState *kvm, struct kvm_irqchip *chip);
>  
>  #if defined(__i386__) || defined(__x86_64__)
>  /*!
> @@ -593,7 +593,7 @@ int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
>   * \param vcpu Which virtual CPU should be accessed
>   * \param s Local apic state of the specific virtual CPU
>   */
> -int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
> +int kvm_get_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s);
>  
>  /*!
>   * \brief Set in kernel local APIC for vcpu
> @@ -604,7 +604,7 @@ int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
>   * \param vcpu Which virtual CPU should be accessed
>   * \param s Local apic state of the specific virtual CPU
>   */
> -int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
> +int kvm_set_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s);
>  
>  #endif
>  
> @@ -617,7 +617,7 @@ int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
>   * \param vcpu Which virtual CPU should get dumped
>   * \return 0 on success
>   */
> -int kvm_inject_nmi(kvm_context_t kvm, int vcpu);
> +int kvm_inject_nmi(KVMState *kvm, int vcpu);
>  
>  #endif
>  
> @@ -626,7 +626,7 @@ int kvm_inject_nmi(kvm_context_t kvm, int vcpu);
>   *
>   *  \param kvm Pointer to the current kvm_context
>   */
> -int kvm_pit_in_kernel(kvm_context_t kvm);
> +int kvm_pit_in_kernel(KVMState *kvm);
>  
>  /*!
>   * \brief Initialize coalesced MMIO
> @@ -635,7 +635,7 @@ int kvm_pit_in_kernel(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_init_coalesced_mmio(kvm_context_t kvm);
> +int kvm_init_coalesced_mmio(KVMState *kvm);
>  
>  #ifdef KVM_CAP_PIT
>  
> @@ -648,7 +648,7 @@ int kvm_init_coalesced_mmio(kvm_context_t kvm);
>   * \param kvm Pointer to the current kvm_context
>   * \param s PIT state of the virtual domain
>   */
> -int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s);
> +int kvm_get_pit(KVMState *kvm, struct kvm_pit_state *s);
>  
>  /*!
>   * \brief Set in kernel PIT of the virtual domain
> @@ -659,10 +659,10 @@ int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s);
>   * \param kvm Pointer to the current kvm_context
>   * \param s PIT state of the virtual domain
>   */
> -int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s);
> +int kvm_set_pit(KVMState *kvm, struct kvm_pit_state *s);
>  #endif
>  
> -int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
> +int kvm_reinject_control(KVMState *kvm, int pit_reinject);
>  
>  #endif
>  
> @@ -677,7 +677,7 @@ int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
>   * \param kvm Pointer to the current kvm_context
>   * \param vcpu vcpu to enable tpr access reporting on
>   */
> -int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
> +int kvm_enable_tpr_access_reporting(KVMState *kvm, int vcpu);
>  
>  /*!
>   * \brief Disable kernel tpr access reporting
> @@ -687,18 +687,18 @@ int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
>   * \param kvm Pointer to the current kvm_context
>   * \param vcpu vcpu to disable tpr access reporting on
>   */
> -int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
> +int kvm_disable_tpr_access_reporting(KVMState *kvm, int vcpu);
>  
> -int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic);
> +int kvm_enable_vapic(KVMState *kvm, int vcpu, uint64_t vapic);
>  
>  #endif
>  
>  #if defined(__s390__)
> -int kvm_s390_initial_reset(kvm_context_t kvm, int slot);
> -int kvm_s390_interrupt(kvm_context_t kvm, int slot,
> +int kvm_s390_initial_reset(KVMState *kvm, int slot);
> +int kvm_s390_interrupt(KVMState *kvm, int slot,
>  	struct kvm_s390_interrupt *kvmint);
> -int kvm_s390_set_initial_psw(kvm_context_t kvm, int slot, psw_t psw);
> -int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr);
> +int kvm_s390_set_initial_psw(KVMState *kvm, int slot, psw_t psw);
> +int kvm_s390_store_status(KVMState *kvm, int slot, unsigned long addr);
>  #endif
>  
>  #ifdef KVM_CAP_DEVICE_ASSIGNMENT
> @@ -711,7 +711,7 @@ int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr);
>   * \param kvm Pointer to the current kvm_context
>   * \param assigned_dev Parameters, like bus, devfn number, etc
>   */
> -int kvm_assign_pci_device(kvm_context_t kvm,
> +int kvm_assign_pci_device(KVMState *kvm,
>  			  struct kvm_assigned_pci_dev *assigned_dev);
>  
>  /*!
> @@ -723,7 +723,7 @@ int kvm_assign_pci_device(kvm_context_t kvm,
>   * \param kvm Pointer to the current kvm_context
>   * \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
>   */
> -int kvm_assign_irq(kvm_context_t kvm,
> +int kvm_assign_irq(KVMState *kvm,
>  		   struct kvm_assigned_irq *assigned_irq);
>  
>  #ifdef KVM_CAP_ASSIGN_DEV_IRQ
> @@ -736,7 +736,7 @@ int kvm_assign_irq(kvm_context_t kvm,
>   * \param kvm Pointer to the current kvm_context
>   * \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
>   */
> -int kvm_deassign_irq(kvm_context_t kvm,
> +int kvm_deassign_irq(KVMState *kvm,
>                     struct kvm_assigned_irq *assigned_irq);
>  #endif
>  #endif
> @@ -748,7 +748,7 @@ int kvm_deassign_irq(kvm_context_t kvm,
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_destroy_memory_region_works(kvm_context_t kvm);
> +int kvm_destroy_memory_region_works(KVMState *kvm);
>  
>  #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
>  /*!
> @@ -760,7 +760,7 @@ int kvm_destroy_memory_region_works(kvm_context_t kvm);
>   * \param kvm Pointer to the current kvm_context
>   * \param assigned_dev Parameters, like bus, devfn number, etc
>   */
> -int kvm_deassign_pci_device(kvm_context_t kvm,
> +int kvm_deassign_pci_device(KVMState *kvm,
>  			    struct kvm_assigned_pci_dev *assigned_dev);
>  #endif
>  
> @@ -772,7 +772,7 @@ int kvm_deassign_pci_device(kvm_context_t kvm,
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_has_gsi_routing(kvm_context_t kvm);
> +int kvm_has_gsi_routing(KVMState *kvm);
>  
>  /*!
>   * \brief Determines the number of gsis that can be routed
> @@ -783,7 +783,7 @@ int kvm_has_gsi_routing(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_get_gsi_count(kvm_context_t kvm);
> +int kvm_get_gsi_count(KVMState *kvm);
>  
>  /*!
>   * \brief Clears the temporary irq routing table
> @@ -793,7 +793,7 @@ int kvm_get_gsi_count(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_clear_gsi_routes(kvm_context_t kvm);
> +int kvm_clear_gsi_routes(KVMState *kvm);
>  
>  /*!
>   * \brief Adds an irq route to the temporary irq routing table
> @@ -803,7 +803,7 @@ int kvm_clear_gsi_routes(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
> +int kvm_add_irq_route(KVMState *kvm, int gsi, int irqchip, int pin);
>  
>  /*!
>   * \brief Removes an irq route from the temporary irq routing table
> @@ -813,7 +813,7 @@ int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
> +int kvm_del_irq_route(KVMState *kvm, int gsi, int irqchip, int pin);
>  
>  struct kvm_irq_routing_entry;
>  /*!
> @@ -824,7 +824,7 @@ struct kvm_irq_routing_entry;
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_add_routing_entry(kvm_context_t kvm,
> +int kvm_add_routing_entry(KVMState *kvm,
>                            struct kvm_irq_routing_entry* entry);
>  
>  /*!
> @@ -835,7 +835,7 @@ int kvm_add_routing_entry(kvm_context_t kvm,
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_del_routing_entry(kvm_context_t kvm,
> +int kvm_del_routing_entry(KVMState *kvm,
>  		          struct kvm_irq_routing_entry* entry);
>  
>  /*!
> @@ -845,7 +845,7 @@ int kvm_del_routing_entry(kvm_context_t kvm,
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_commit_irq_routes(kvm_context_t kvm);
> +int kvm_commit_irq_routes(KVMState *kvm);
>  
>  /*!
>   * \brief Get unused GSI number for irq routing table
> @@ -854,15 +854,15 @@ int kvm_commit_irq_routes(kvm_context_t kvm);
>   *
>   * \param kvm Pointer to the current kvm_context
>   */
> -int kvm_get_irq_route_gsi(kvm_context_t kvm);
> +int kvm_get_irq_route_gsi(KVMState *kvm);
>  
>  #ifdef KVM_CAP_DEVICE_MSIX
> -int kvm_assign_set_msix_nr(kvm_context_t kvm,
> +int kvm_assign_set_msix_nr(KVMState *kvm,
>  			   struct kvm_assigned_msix_nr *msix_nr);
> -int kvm_assign_set_msix_entry(kvm_context_t kvm,
> +int kvm_assign_set_msix_entry(KVMState *kvm,
>                                struct kvm_assigned_msix_entry *entry);
>  #endif
>  
> -uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg);
> +uint32_t kvm_get_supported_cpuid(KVMState *kvm, uint32_t function, int reg);
>  
>  #endif
> diff --git a/libkvm-common.h b/libkvm-common.h
> index c95c591..ad981b3 100644
> --- a/libkvm-common.h
> +++ b/libkvm-common.h
> @@ -71,24 +71,24 @@ struct kvm_context {
>  	int max_gsi;
>  };
>  
> -int kvm_alloc_kernel_memory(kvm_context_t kvm, unsigned long memory,
> +int kvm_alloc_kernel_memory(KVMState *kvm, unsigned long memory,
>  								void **vm_mem);
> -int kvm_alloc_userspace_memory(kvm_context_t kvm, unsigned long memory,
> +int kvm_alloc_userspace_memory(KVMState *kvm, unsigned long memory,
>  								void **vm_mem);
>  
> -int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
> +int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
>                          void **vm_mem);
> -int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu);
> +int kvm_arch_run(struct kvm_run *run, KVMState *kvm, int vcpu);
>  
>  
> -void kvm_show_code(kvm_context_t kvm, int vcpu);
> +void kvm_show_code(KVMState *kvm, int vcpu);
>  
> -int handle_halt(kvm_context_t kvm, int vcpu);
> -int handle_shutdown(kvm_context_t kvm, void *env);
> -void post_kvm_run(kvm_context_t kvm, void *env);
> -int pre_kvm_run(kvm_context_t kvm, void *env);
> -int handle_io_window(kvm_context_t kvm);
> -int handle_debug(kvm_context_t kvm, int vcpu, void *env);
> -int try_push_interrupts(kvm_context_t kvm);
> +int handle_halt(KVMState *kvm, int vcpu);
> +int handle_shutdown(KVMState *kvm, void *env);
> +void post_kvm_run(KVMState *kvm, void *env);
> +int pre_kvm_run(KVMState *kvm, void *env);
> +int handle_io_window(KVMState *kvm);
> +int handle_debug(KVMState *kvm, int vcpu, void *env);
> +int try_push_interrupts(KVMState *kvm);
>  
>  #endif
> diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
> index 1096e65..9e8810a 100644
> --- a/qemu-kvm-x86.c
> +++ b/qemu-kvm-x86.c
> @@ -498,7 +498,7 @@ struct kvm_para_features {
>  	{ -1, -1 }
>  };
>  
> -static int get_para_features(kvm_context_t kvm_context)
> +static int get_para_features(KVMState *kvm_context)
>  {
>  	int i, features = 0;
>  
> diff --git a/qemu-kvm.c b/qemu-kvm.c
> index 68d3b92..a0846e8 100644
> --- a/qemu-kvm.c
> +++ b/qemu-kvm.c
> @@ -33,7 +33,7 @@ int kvm_irqchip = 1;
>  int kvm_pit = 1;
>  int kvm_pit_reinject = 1;
>  int kvm_nested = 0;
> -kvm_context_t kvm_context;
> +KVMState *kvm_context;
>  
>  pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
>  pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
> diff --git a/qemu-kvm.h b/qemu-kvm.h
> index 725589b..a470f3c 100644
> --- a/qemu-kvm.h
> +++ b/qemu-kvm.h
> @@ -154,7 +154,7 @@ extern int kvm_irqchip;
>  extern int kvm_pit;
>  extern int kvm_pit_reinject;
>  extern int kvm_nested;
> -extern kvm_context_t kvm_context;
> +extern KVMState *kvm_context;
>  
>  struct ioperm_data {
>      unsigned long start_port;
> diff --git a/target-i386/libkvm.c b/target-i386/libkvm.c
> index 32d03f1..b64e632 100644
> --- a/target-i386/libkvm.c
> +++ b/target-i386/libkvm.c
> @@ -12,7 +12,7 @@
>  #include <fcntl.h>
>  #include <stdlib.h>
>  
> -int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
> +int kvm_set_tss_addr(KVMState *kvm, unsigned long addr)
>  {
>  #ifdef KVM_CAP_SET_TSS_ADDR
>  	int r;
> @@ -30,7 +30,7 @@ int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
>  	return -ENOSYS;
>  }
>  
> -static int kvm_init_tss(kvm_context_t kvm)
> +static int kvm_init_tss(KVMState *kvm)
>  {
>  #ifdef KVM_CAP_SET_TSS_ADDR
>  	int r;
> @@ -52,7 +52,7 @@ static int kvm_init_tss(kvm_context_t kvm)
>  	return 0;
>  }
>  
> -static int kvm_create_pit(kvm_context_t kvm)
> +static int kvm_create_pit(KVMState *kvm)
>  {
>  #ifdef KVM_CAP_PIT
>  	int r;
> @@ -74,7 +74,7 @@ static int kvm_create_pit(kvm_context_t kvm)
>  	return 0;
>  }
>  
> -int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
> +int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
>   			void **vm_mem)
>  {
>  	int r = 0;
> @@ -96,7 +96,7 @@ int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
>  
>  #ifdef KVM_EXIT_TPR_ACCESS
>  
> -static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
> +static int handle_tpr_access(KVMState *kvm, struct kvm_run *run, int vcpu)
>  {
>  	return kvm->callbacks->tpr_access(kvm->opaque, vcpu,
>  					  run->tpr_access.rip,
> @@ -104,7 +104,7 @@ static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
>  }
>  
>  
> -int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
> +int kvm_enable_vapic(KVMState *kvm, int vcpu, uint64_t vapic)
>  {
>  	int r;
>  	struct kvm_vapic_addr va = {
> @@ -122,7 +122,7 @@ int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
>  
>  #endif
>  
> -int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
> +int kvm_arch_run(struct kvm_run *run,KVMState *kvm, int vcpu)
>  {
>  	int r = 0;
>  
> @@ -175,7 +175,7 @@ static void register_alias(int slot, uint64_t start, uint64_t len)
>  	kvm_aliases[slot].len   = len;
>  }
>  
> -int kvm_create_memory_alias(kvm_context_t kvm,
> +int kvm_create_memory_alias(KVMState *kvm,
>  			    uint64_t phys_start,
>  			    uint64_t len,
>  			    uint64_t target_phys)
> @@ -205,14 +205,14 @@ int kvm_create_memory_alias(kvm_context_t kvm,
>  	return 0;
>  }
>  
> -int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
> +int kvm_destroy_memory_alias(KVMState *kvm, uint64_t phys_start)
>  {
>  	return kvm_create_memory_alias(kvm, phys_start, 0, 0);
>  }
>  
>  #ifdef KVM_CAP_IRQCHIP
>  
> -int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
> +int kvm_get_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s)
>  {
>  	int r;
>  	if (!kvm->irqchip_in_kernel)
> @@ -225,7 +225,7 @@ int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
>  	return r;
>  }
>  
> -int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
> +int kvm_set_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s)
>  {
>  	int r;
>  	if (!kvm->irqchip_in_kernel)
> @@ -242,7 +242,7 @@ int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
>  
>  #ifdef KVM_CAP_PIT
>  
> -int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
> +int kvm_get_pit(KVMState *kvm, struct kvm_pit_state *s)
>  {
>  	int r;
>  	if (!kvm->pit_in_kernel)
> @@ -255,7 +255,7 @@ int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
>  	return r;
>  }
>  
> -int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
> +int kvm_set_pit(KVMState *kvm, struct kvm_pit_state *s)
>  {
>  	int r;
>  	if (!kvm->pit_in_kernel)
> @@ -270,7 +270,7 @@ int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
>  
>  #endif
>  
> -void kvm_show_code(kvm_context_t kvm, int vcpu)
> +void kvm_show_code(KVMState *kvm, int vcpu)
>  {
>  #define SHOW_CODE_LEN 50
>  	int fd = kvm->vcpu_fd[vcpu];
> @@ -314,7 +314,7 @@ void kvm_show_code(kvm_context_t kvm, int vcpu)
>  /*
>   * Returns available msr list.  User must free.
>   */
> -struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
> +struct kvm_msr_list *kvm_get_msr_list(KVMState *kvm)
>  {
>  	struct kvm_msr_list sizer, *msrs;
>  	int r, e;
> @@ -339,7 +339,7 @@ struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
>  	return msrs;
>  }
>  
> -int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
> +int kvm_get_msrs(KVMState *kvm, int vcpu, struct kvm_msr_entry *msrs,
>  		 int n)
>  {
>      struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
> @@ -359,7 +359,7 @@ int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
>      return r;
>  }
>  
> -int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
> +int kvm_set_msrs(KVMState *kvm, int vcpu, struct kvm_msr_entry *msrs,
>  		 int n)
>  {
>      struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
> @@ -393,7 +393,7 @@ static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
>      	fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
>  }
>  
> -void kvm_show_regs(kvm_context_t kvm, int vcpu)
> +void kvm_show_regs(KVMState *kvm, int vcpu)
>  {
>  	int fd = kvm->vcpu_fd[vcpu];
>  	struct kvm_regs regs;
> @@ -437,26 +437,26 @@ void kvm_show_regs(kvm_context_t kvm, int vcpu)
>  		sregs.efer);
>  }
>  
> -uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
> +uint64_t kvm_get_apic_base(KVMState *kvm, int vcpu)
>  {
>  	struct kvm_run *run = kvm->run[vcpu];
>  
>  	return run->apic_base;
>  }
>  
> -void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
> +void kvm_set_cr8(KVMState *kvm, int vcpu, uint64_t cr8)
>  {
>  	struct kvm_run *run = kvm->run[vcpu];
>  
>  	run->cr8 = cr8;
>  }
>  
> -__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
> +__u64 kvm_get_cr8(KVMState *kvm, int vcpu)
>  {
>  	return kvm->run[vcpu]->cr8;
>  }
>  
> -int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
> +int kvm_setup_cpuid(KVMState *kvm, int vcpu, int nent,
>  		    struct kvm_cpuid_entry *entries)
>  {
>  	struct kvm_cpuid *cpuid;
> @@ -474,7 +474,7 @@ int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
>  	return r;
>  }
>  
> -int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
> +int kvm_setup_cpuid2(KVMState *kvm, int vcpu, int nent,
>  		     struct kvm_cpuid_entry2 *entries)
>  {
>  	struct kvm_cpuid2 *cpuid;
> @@ -495,7 +495,7 @@ int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
>  	return r;
>  }
>  
> -int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
> +int kvm_set_shadow_pages(KVMState *kvm, unsigned int nrshadow_pages)
>  {
>  #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
>  	int r;
> @@ -514,7 +514,7 @@ int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
>  	return -1;
>  }
>  
> -int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
> +int kvm_get_shadow_pages(KVMState *kvm, unsigned int *nrshadow_pages)
>  {
>  #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
>  	int r;
> @@ -531,7 +531,7 @@ int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
>  
>  #ifdef KVM_CAP_VAPIC
>  
> -static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
> +static int tpr_access_reporting(KVMState *kvm, int vcpu, int enabled)
>  {
>  	int r;
>  	struct kvm_tpr_access_ctl tac = {
> @@ -550,12 +550,12 @@ static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
>  	return 0;
>  }
>  
> -int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
> +int kvm_enable_tpr_access_reporting(KVMState *kvm, int vcpu)
>  {
>  	return tpr_access_reporting(kvm, vcpu, 1);
>  }
>  
> -int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
> +int kvm_disable_tpr_access_reporting(KVMState *kvm, int vcpu)
>  {
>  	return tpr_access_reporting(kvm, vcpu, 0);
>  }
> @@ -564,7 +564,7 @@ int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
>  
>  #ifdef KVM_CAP_EXT_CPUID
>  
> -static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
> +static struct kvm_cpuid2 *try_get_cpuid(KVMState *kvm, int max)
>  {
>  	struct kvm_cpuid2 *cpuid;
>  	int r, size;
> @@ -599,7 +599,7 @@ static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
>  #define R_ESI 6
>  #define R_EDI 7
>  
> -uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
> +uint32_t kvm_get_supported_cpuid(KVMState *kvm, uint32_t function, int reg)
>  {
>  	struct kvm_cpuid2 *cpuid;
>  	int i, max;
> @@ -658,7 +658,7 @@ uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
>  
>  #else
>  
> -uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
> +uint32_t kvm_get_supported_cpuid(KVMState *kvm, uint32_t function, int reg)
>  {
>  	return -1U;
>  }
> diff --git a/target-i386/libkvm.h b/target-i386/libkvm.h
> index 081e010..26a4597 100644
> --- a/target-i386/libkvm.h
> +++ b/target-i386/libkvm.h
> @@ -23,7 +23,7 @@
>  #define PAGE_SIZE 4096ul
>  #define PAGE_MASK (~(PAGE_SIZE - 1))
>  
> -int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr);
> +int kvm_set_tss_addr(KVMState *kvm, unsigned long addr);
>  
>  #define smp_wmb()   asm volatile("" ::: "memory")
>  
> diff --git a/target-ia64/libkvm.c b/target-ia64/libkvm.c
> index 48669de..bcd9750 100644
> --- a/target-ia64/libkvm.c
> +++ b/target-ia64/libkvm.c
> @@ -33,7 +33,7 @@
>  #include <fcntl.h>
>  #include <stdlib.h>
>  
> -int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
> +int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
>  			void **vm_mem)
>  {
>  	int r;
> @@ -45,7 +45,7 @@ int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
>  	return 0;
>  }
>  
> -int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
> +int kvm_arch_run(struct kvm_run *run,KVMState *kvm, int vcpu)
>  {
>  	int r = 0;
>  
> @@ -58,17 +58,17 @@ int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
>  	return r;
>  }
>  
> -void kvm_show_code(kvm_context_t kvm, int vcpu)
> +void kvm_show_code(KVMState *kvm, int vcpu)
>  {
>  	fprintf(stderr, "kvm_show_code not supported yet!\n");
>  }
>  
> -void kvm_show_regs(kvm_context_t kvm, int vcpu)
> +void kvm_show_regs(KVMState *kvm, int vcpu)
>  {
>  	fprintf(stderr,"kvm_show_regs not supportted today!\n");
>  }
>  
> -int kvm_create_memory_alias(kvm_context_t kvm,
> +int kvm_create_memory_alias(KVMState *kvm,
>  			    uint64_t phys_start,
>  			    uint64_t len,
>  			    uint64_t target_phys)
> @@ -76,7 +76,7 @@ int kvm_create_memory_alias(kvm_context_t kvm,
>      return 0;
>  }
>  
> -int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
> +int kvm_destroy_memory_alias(KVMState *kvm, uint64_t phys_start)
>  {
>  	return 0;
>  }
> diff --git a/target-ppc/libkvm.c b/target-ppc/libkvm.c
> index 2dfff3b..b5c59e8 100644
> --- a/target-ppc/libkvm.c
> +++ b/target-ppc/libkvm.c
> @@ -23,7 +23,7 @@
>  #include <stdio.h>
>  #include <inttypes.h>
>  
> -int handle_dcr(struct kvm_run *run,  kvm_context_t kvm, int vcpu)
> +int handle_dcr(struct kvm_run *run,  KVMState *kvm, int vcpu)
>  {
>  	int ret = 0;
>  
> @@ -39,12 +39,12 @@ int handle_dcr(struct kvm_run *run,  kvm_context_t kvm, int vcpu)
>  	return ret;
>  }
>  
> -void kvm_show_code(kvm_context_t kvm, int vcpu)
> +void kvm_show_code(KVMState *kvm, int vcpu)
>  {
>  	fprintf(stderr, "%s: Operation not supported\n", __FUNCTION__);
>  }
>  
> -void kvm_show_regs(kvm_context_t kvm, int vcpu)
> +void kvm_show_regs(KVMState *kvm, int vcpu)
>  {
>  	struct kvm_regs regs;
>  	int i;
> @@ -72,7 +72,7 @@ void kvm_show_regs(kvm_context_t kvm, int vcpu)
>  	fflush(stdout);
>  }
>  
> -int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
> +int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
>  			 void **vm_mem)
>  {
>  	int r;
> @@ -84,7 +84,7 @@ int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
>  	return 0;
>  }
>  
> -int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu)
> +int kvm_arch_run(struct kvm_run *run, KVMState *kvm, int vcpu)
>  {
>  	int ret = 0;
>  
> -- 
> 1.5.6.6
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Glauber Costa June 4, 2009, 7:33 p.m. UTC | #2
On Thu, Jun 04, 2009 at 10:23:29PM +0300, Gleb Natapov wrote:
> On Thu, Jun 04, 2009 at 02:23:03PM -0400, Glauber Costa wrote:
> > This is a pretty mechanical change. To make code look
> > closer to upstream qemu, I'm renaming kvm_context_t to
> > KVMState. Mid term goal here is to start sharing code
> > whereas possible.
> > 
> > Avi, please apply, or I'll send you a video of myself
> > dancing naked.
> > 
> You can start recording it since I doubt this patch will apply cleanly
> to today's master (other mechanical change was applied). Regardless, I
> think trying to use bits of qemu kvm is dangerous. It has similar function
> with same names, but with different assumptions about conditional they
> can be executed in (look at commit a5ddb119). I actually prefer to be
> different enough to not call upstream qemu function by mistake.

I did it against today's master. If new patches came in, is just
a matter of regenerating this, since it is, as I said, mechanical.

Also, as we don't compile in upstream functions yet (kvm-all.c and kvm.c
are not included in the final object), there is no such risk.
Of course, I am aiming towards it, but the first step will be to change
the name of conflicting functions until we can pick qemu's implementation,
in which case the former will just go away.

If we are serious about merging qemu-kvm into qemu, I don't see a way out
of it. We should start changing things this way to accomodate it. Different
enough won't do.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov June 4, 2009, 8 p.m. UTC | #3
On Thu, Jun 04, 2009 at 04:33:19PM -0300, Glauber Costa wrote:
> On Thu, Jun 04, 2009 at 10:23:29PM +0300, Gleb Natapov wrote:
> > On Thu, Jun 04, 2009 at 02:23:03PM -0400, Glauber Costa wrote:
> > > This is a pretty mechanical change. To make code look
> > > closer to upstream qemu, I'm renaming kvm_context_t to
> > > KVMState. Mid term goal here is to start sharing code
> > > whereas possible.
> > > 
> > > Avi, please apply, or I'll send you a video of myself
> > > dancing naked.
> > > 
> > You can start recording it since I doubt this patch will apply cleanly
> > to today's master (other mechanical change was applied). Regardless, I
> > think trying to use bits of qemu kvm is dangerous. It has similar function
> > with same names, but with different assumptions about conditional they
> > can be executed in (look at commit a5ddb119). I actually prefer to be
> > different enough to not call upstream qemu function by mistake.
> 
> I did it against today's master. If new patches came in, is just
> a matter of regenerating this, since it is, as I said, mechanical.
> 
> Also, as we don't compile in upstream functions yet (kvm-all.c and kvm.c
> are not included in the final object), there is no such risk.
> Of course, I am aiming towards it, but the first step will be to change
> the name of conflicting functions until we can pick qemu's implementation,
> in which case the former will just go away.
That is the point. We can't just pick qemu's implementation most of the
times.

> 
> If we are serious about merging qemu-kvm into qemu, I don't see a way out
> of it. We should start changing things this way to accomodate it. Different
> enough won't do.
I don't really like the idea to morph working implementation to look like
non-working one. I do agree that qemu-kvm should be cleaned substantially
before going upstream. Upstream qemu kvm should go away than. I don't
see much work done to enhance it anyway.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov June 4, 2009, 8:09 p.m. UTC | #4
On Thu, Jun 04, 2009 at 05:10:51PM -0300, Glauber Costa wrote:
> On Thu, Jun 04, 2009 at 11:00:46PM +0300, Gleb Natapov wrote:
> > On Thu, Jun 04, 2009 at 04:33:19PM -0300, Glauber Costa wrote:
> > > On Thu, Jun 04, 2009 at 10:23:29PM +0300, Gleb Natapov wrote:
> > > > On Thu, Jun 04, 2009 at 02:23:03PM -0400, Glauber Costa wrote:
> > > > > This is a pretty mechanical change. To make code look
> > > > > closer to upstream qemu, I'm renaming kvm_context_t to
> > > > > KVMState. Mid term goal here is to start sharing code
> > > > > whereas possible.
> > > > > 
> > > > > Avi, please apply, or I'll send you a video of myself
> > > > > dancing naked.
> > > > > 
> > > > You can start recording it since I doubt this patch will apply cleanly
> > > > to today's master (other mechanical change was applied). Regardless, I
> > > > think trying to use bits of qemu kvm is dangerous. It has similar function
> > > > with same names, but with different assumptions about conditional they
> > > > can be executed in (look at commit a5ddb119). I actually prefer to be
> > > > different enough to not call upstream qemu function by mistake.
> > > 
> > > I did it against today's master. If new patches came in, is just
> > > a matter of regenerating this, since it is, as I said, mechanical.
> > > 
> > > Also, as we don't compile in upstream functions yet (kvm-all.c and kvm.c
> > > are not included in the final object), there is no such risk.
> > > Of course, I am aiming towards it, but the first step will be to change
> > > the name of conflicting functions until we can pick qemu's implementation,
> > > in which case the former will just go away.
> > That is the point. We can't just pick qemu's implementation most of the
> > times.
> "until we can pick up qemu's implementation" potentially involves replacing
> that particular piece with upstream version first.
> 
> > 
> > > 
> > > If we are serious about merging qemu-kvm into qemu, I don't see a way out
> > > of it. We should start changing things this way to accomodate it. Different
> > > enough won't do.
> > I don't really like the idea to morph working implementation to look like
> > non-working one. I do agree that qemu-kvm should be cleaned substantially
> > before going upstream. Upstream qemu kvm should go away than. I don't
> > see much work done to enhance it anyway.
> > 
> 
> this first phase has nothing to do with functionality. To begin with,
> KVMState is qemu style, kvm_context_t is not, like it or not (I don't).
> 
I am not against this mechanical change at all, don't get me wrong. I
don't want to mix two kvm implementation together in strange ways.

> I don't plan to introduce regressions, you can rest assured. But we _do_
> have to make things look much more qemuer, and that's what this patch
> aims at.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Glauber Costa June 4, 2009, 8:10 p.m. UTC | #5
On Thu, Jun 04, 2009 at 11:00:46PM +0300, Gleb Natapov wrote:
> On Thu, Jun 04, 2009 at 04:33:19PM -0300, Glauber Costa wrote:
> > On Thu, Jun 04, 2009 at 10:23:29PM +0300, Gleb Natapov wrote:
> > > On Thu, Jun 04, 2009 at 02:23:03PM -0400, Glauber Costa wrote:
> > > > This is a pretty mechanical change. To make code look
> > > > closer to upstream qemu, I'm renaming kvm_context_t to
> > > > KVMState. Mid term goal here is to start sharing code
> > > > whereas possible.
> > > > 
> > > > Avi, please apply, or I'll send you a video of myself
> > > > dancing naked.
> > > > 
> > > You can start recording it since I doubt this patch will apply cleanly
> > > to today's master (other mechanical change was applied). Regardless, I
> > > think trying to use bits of qemu kvm is dangerous. It has similar function
> > > with same names, but with different assumptions about conditional they
> > > can be executed in (look at commit a5ddb119). I actually prefer to be
> > > different enough to not call upstream qemu function by mistake.
> > 
> > I did it against today's master. If new patches came in, is just
> > a matter of regenerating this, since it is, as I said, mechanical.
> > 
> > Also, as we don't compile in upstream functions yet (kvm-all.c and kvm.c
> > are not included in the final object), there is no such risk.
> > Of course, I am aiming towards it, but the first step will be to change
> > the name of conflicting functions until we can pick qemu's implementation,
> > in which case the former will just go away.
> That is the point. We can't just pick qemu's implementation most of the
> times.
"until we can pick up qemu's implementation" potentially involves replacing
that particular piece with upstream version first.

> 
> > 
> > If we are serious about merging qemu-kvm into qemu, I don't see a way out
> > of it. We should start changing things this way to accomodate it. Different
> > enough won't do.
> I don't really like the idea to morph working implementation to look like
> non-working one. I do agree that qemu-kvm should be cleaned substantially
> before going upstream. Upstream qemu kvm should go away than. I don't
> see much work done to enhance it anyway.
> 

this first phase has nothing to do with functionality. To begin with,
KVMState is qemu style, kvm_context_t is not, like it or not (I don't).

I don't plan to introduce regressions, you can rest assured. But we _do_
have to make things look much more qemuer, and that's what this patch
aims at.


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov June 4, 2009, 8:17 p.m. UTC | #6
On Thu, Jun 04, 2009 at 05:18:06PM -0300, Glauber Costa wrote:
> > > this first phase has nothing to do with functionality. To begin with,
> > > KVMState is qemu style, kvm_context_t is not, like it or not (I don't).
> > > 
> > I am not against this mechanical change at all, don't get me wrong. I
> > don't want to mix two kvm implementation together in strange ways.
> > 
> too late for not wanting anything strange to happen ;-)
> 
You are right, I should have said "in stranger ways".

> But I do believe this is the way to turn qemu-kvm.git into something
> that feeds qemu.git. And that's what we all want.
Disagree with first part, agree with second :)

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Glauber Costa June 4, 2009, 8:18 p.m. UTC | #7
On Thu, Jun 04, 2009 at 11:09:52PM +0300, Gleb Natapov wrote:
> On Thu, Jun 04, 2009 at 05:10:51PM -0300, Glauber Costa wrote:
> > On Thu, Jun 04, 2009 at 11:00:46PM +0300, Gleb Natapov wrote:
> > > On Thu, Jun 04, 2009 at 04:33:19PM -0300, Glauber Costa wrote:
> > > > On Thu, Jun 04, 2009 at 10:23:29PM +0300, Gleb Natapov wrote:
> > > > > On Thu, Jun 04, 2009 at 02:23:03PM -0400, Glauber Costa wrote:
> > > > > > This is a pretty mechanical change. To make code look
> > > > > > closer to upstream qemu, I'm renaming kvm_context_t to
> > > > > > KVMState. Mid term goal here is to start sharing code
> > > > > > whereas possible.
> > > > > > 
> > > > > > Avi, please apply, or I'll send you a video of myself
> > > > > > dancing naked.
> > > > > > 
> > > > > You can start recording it since I doubt this patch will apply cleanly
> > > > > to today's master (other mechanical change was applied). Regardless, I
> > > > > think trying to use bits of qemu kvm is dangerous. It has similar function
> > > > > with same names, but with different assumptions about conditional they
> > > > > can be executed in (look at commit a5ddb119). I actually prefer to be
> > > > > different enough to not call upstream qemu function by mistake.
> > > > 
> > > > I did it against today's master. If new patches came in, is just
> > > > a matter of regenerating this, since it is, as I said, mechanical.
> > > > 
> > > > Also, as we don't compile in upstream functions yet (kvm-all.c and kvm.c
> > > > are not included in the final object), there is no such risk.
> > > > Of course, I am aiming towards it, but the first step will be to change
> > > > the name of conflicting functions until we can pick qemu's implementation,
> > > > in which case the former will just go away.
> > > That is the point. We can't just pick qemu's implementation most of the
> > > times.
> > "until we can pick up qemu's implementation" potentially involves replacing
> > that particular piece with upstream version first.
> > 
> > > 
> > > > 
> > > > If we are serious about merging qemu-kvm into qemu, I don't see a way out
> > > > of it. We should start changing things this way to accomodate it. Different
> > > > enough won't do.
> > > I don't really like the idea to morph working implementation to look like
> > > non-working one. I do agree that qemu-kvm should be cleaned substantially
> > > before going upstream. Upstream qemu kvm should go away than. I don't
> > > see much work done to enhance it anyway.
> > > 
> > 
> > this first phase has nothing to do with functionality. To begin with,
> > KVMState is qemu style, kvm_context_t is not, like it or not (I don't).
> > 
> I am not against this mechanical change at all, don't get me wrong. I
> don't want to mix two kvm implementation together in strange ways.
> 
too late for not wanting anything strange to happen ;-)

But I do believe this is the way to turn qemu-kvm.git into something
that feeds qemu.git. And that's what we all want.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity June 7, 2009, 9:21 a.m. UTC | #8
Glauber Costa wrote:
> This is a pretty mechanical change. To make code look
> closer to upstream qemu, I'm renaming kvm_context_t to
> KVMState. Mid term goal here is to start sharing code
> whereas possible.
>   

Doesn't apply.

Also, it's a little fishy.  We now have two KVMState types, but they're 
pretty different.

What we could do is embed a kvm_context_t variable in the real KVMState, 
and start using that.  Then, we could, function by function, use the 
upstream KVMState functionality and remove the corresponding 
kvm_context_t functionality.

> Avi, please apply, or I'll send you a video of myself
> dancing naked.
>   

The list has a size limit, so don't attach a huge video.  Post a URL 
instead.
Jan Kiszka June 7, 2009, 10:08 a.m. UTC | #9
Avi Kivity wrote:
> Glauber Costa wrote:
>> This is a pretty mechanical change. To make code look
>> closer to upstream qemu, I'm renaming kvm_context_t to
>> KVMState. Mid term goal here is to start sharing code
>> whereas possible.
>>   
> 
> Doesn't apply.
> 
> Also, it's a little fishy.  We now have two KVMState types, but they're
> pretty different.
> 
> What we could do is embed a kvm_context_t variable in the real KVMState,
> and start using that.  Then, we could, function by function, use the
> upstream KVMState functionality and remove the corresponding
> kvm_context_t functionality.

That would be great and was also what I had vaguely in mind. Likely
there are already fields that can be used from KVMState (fd and vmfd?).

> 
>> Avi, please apply, or I'll send you a video of myself
>> dancing naked.
>>   
> 
> The list has a size limit, so don't attach a huge video.  Post a URL
> instead.
> 

But only after 22:00 - in case underages are reading.

Jan
Avi Kivity June 7, 2009, 10:10 a.m. UTC | #10
Jan Kiszka wrote:
>> What we could do is embed a kvm_context_t variable in the real KVMState,
>> and start using that.  Then, we could, function by function, use the
>> upstream KVMState functionality and remove the corresponding
>> kvm_context_t functionality.
>>     
>
> That would be great and was also what I had vaguely in mind. Likely
> there are already fields that can be used from KVMState (fd and vmfd?).
>   

Obvious candidates are to start using kvm_*_ioctl() everywhere.
diff mbox

Patch

diff --git a/kvm/user/main-ppc.c b/kvm/user/main-ppc.c
index 5af59f8..fbfd721 100644
--- a/kvm/user/main-ppc.c
+++ b/kvm/user/main-ppc.c
@@ -44,7 +44,7 @@  static int gettid(void)
 	return syscall(__NR_gettid);
 }
 
-kvm_context_t kvm;
+KVMState *kvm;
 
 #define IPI_SIGNAL (SIGRTMIN + 4)
 
diff --git a/kvm/user/main.c b/kvm/user/main.c
index 1530ae2..ecd1b28 100644
--- a/kvm/user/main.c
+++ b/kvm/user/main.c
@@ -50,7 +50,7 @@  static int tkill(int pid, int sig)
 	return syscall(__NR_tkill, pid, sig);
 }
 
-kvm_context_t kvm;
+KVMState *kvm;
 
 #define MAX_VCPUS 4
 
@@ -406,7 +406,7 @@  static void load_file(void *mem, const char *fname)
 	}
 }
 
-static void enter_32(kvm_context_t kvm)
+static void enter_32(KVMState *kvm)
 {
 	struct kvm_regs regs = {
 		.rsp = 0x80000,  /* 512KB */
diff --git a/libkvm-all.c b/libkvm-all.c
index 1668e32..30b5a6c 100644
--- a/libkvm-all.c
+++ b/libkvm-all.c
@@ -53,7 +53,7 @@ 
 int kvm_abi = EXPECTED_KVM_API_VERSION;
 int kvm_page_size;
 
-static inline void set_gsi(kvm_context_t kvm, unsigned int gsi)
+static inline void set_gsi(KVMState *kvm, unsigned int gsi)
 {
 	uint32_t *bitmap = kvm->used_gsi_bitmap;
 
@@ -63,7 +63,7 @@  static inline void set_gsi(kvm_context_t kvm, unsigned int gsi)
 		DPRINTF("Invalid GSI %d\n");
 }
 
-static inline void clear_gsi(kvm_context_t kvm, unsigned int gsi)
+static inline void clear_gsi(KVMState *kvm, unsigned int gsi)
 {
 	uint32_t *bitmap = kvm->used_gsi_bitmap;
 
@@ -91,7 +91,7 @@  static void init_slots(void)
 		slots[i].len = 0;
 }
 
-static int get_free_slot(kvm_context_t kvm)
+static int get_free_slot(KVMState *kvm)
 {
 	int i;
 	int tss_ext;
@@ -158,7 +158,7 @@  static int get_container_slot(uint64_t phys_addr, unsigned long size)
 	return -1;
 }
 
-int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigned long size)
+int kvm_is_containing_region(KVMState *kvm, unsigned long phys_addr, unsigned long size)
 {
 	int slot = get_container_slot(phys_addr, size);
 	if (slot == -1)
@@ -169,7 +169,7 @@  int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigne
 /* 
  * dirty pages logging control 
  */
-static int kvm_dirty_pages_log_change(kvm_context_t kvm,
+static int kvm_dirty_pages_log_change(KVMState *kvm,
 				      unsigned long phys_addr,
 				      unsigned flags,
 				      unsigned mask)
@@ -209,8 +209,8 @@  static int kvm_dirty_pages_log_change(kvm_context_t kvm,
 	return r;
 }
 
-static int kvm_dirty_pages_log_change_all(kvm_context_t kvm,
-					  int (*change)(kvm_context_t kvm,
+static int kvm_dirty_pages_log_change_all(KVMState *kvm,
+					  int (*change)(KVMState *kvm,
 							uint64_t start,
 							uint64_t len))
 {
@@ -223,7 +223,7 @@  static int kvm_dirty_pages_log_change_all(kvm_context_t kvm,
 	return r;
 }
 
-int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
+int kvm_dirty_pages_log_enable_slot(KVMState *kvm,
 				    uint64_t phys_addr,
 				    uint64_t len)
 {
@@ -243,7 +243,7 @@  int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
 					  KVM_MEM_LOG_DIRTY_PAGES);
 }
 
-int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
+int kvm_dirty_pages_log_disable_slot(KVMState *kvm,
 				     uint64_t phys_addr,
 				     uint64_t len)
 {
@@ -265,7 +265,7 @@  int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
 /**
  * Enable dirty page logging for all memory regions
  */
-int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
+int kvm_dirty_pages_log_enable_all(KVMState *kvm)
 {
 	if (kvm->dirty_pages_log_all)
 		return 0;
@@ -278,7 +278,7 @@  int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
  * Enable dirty page logging only for memory regions that were created with
  *     dirty logging enabled (disable for all other memory regions).
  */
-int kvm_dirty_pages_log_reset(kvm_context_t kvm)
+int kvm_dirty_pages_log_reset(KVMState *kvm)
 {
 	if (!kvm->dirty_pages_log_all)
 		return 0;
@@ -288,11 +288,11 @@  int kvm_dirty_pages_log_reset(kvm_context_t kvm)
 }
 
 
-kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
+KVMState *kvm_init(struct kvm_callbacks *callbacks,
 		       void *opaque)
 {
 	int fd;
-	kvm_context_t kvm;
+	KVMState *kvm;
 	int r, gsi_count;
 
 	fd = open("/dev/kvm", O_RDWR);
@@ -354,7 +354,7 @@  kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
 	return NULL;
 }
 
-void kvm_finalize(kvm_context_t kvm)
+void kvm_finalize(KVMState *kvm)
 {
     	if (kvm->vcpu_fd[0] != -1)
 		close(kvm->vcpu_fd[0]);
@@ -364,17 +364,17 @@  void kvm_finalize(kvm_context_t kvm)
 	free(kvm);
 }
 
-void kvm_disable_irqchip_creation(kvm_context_t kvm)
+void kvm_disable_irqchip_creation(KVMState *kvm)
 {
 	kvm->no_irqchip_creation = 1;
 }
 
-void kvm_disable_pit_creation(kvm_context_t kvm)
+void kvm_disable_pit_creation(KVMState *kvm)
 {
 	kvm->no_pit_creation = 1;
 }
 
-int kvm_create_vcpu(kvm_context_t kvm, int slot)
+int kvm_create_vcpu(KVMState *kvm, int slot)
 {
 	long mmap_size;
 	int r;
@@ -402,7 +402,7 @@  int kvm_create_vcpu(kvm_context_t kvm, int slot)
 	return 0;
 }
 
-int kvm_create_vm(kvm_context_t kvm)
+int kvm_create_vm(KVMState *kvm)
 {
 	int fd = kvm->fd;
 
@@ -425,7 +425,7 @@  int kvm_create_vm(kvm_context_t kvm)
 	return 0;
 }
 
-static int kvm_create_default_phys_mem(kvm_context_t kvm,
+static int kvm_create_default_phys_mem(KVMState *kvm,
 				       unsigned long phys_mem_bytes,
 				       void **vm_mem)
 {
@@ -440,7 +440,7 @@  static int kvm_create_default_phys_mem(kvm_context_t kvm,
 	return -1;
 }
 
-int kvm_check_extension(kvm_context_t kvm, int ext)
+int kvm_check_extension(KVMState *kvm, int ext)
 {
 	int ret;
 
@@ -450,7 +450,7 @@  int kvm_check_extension(kvm_context_t kvm, int ext)
 	return 0;
 }
 
-void kvm_create_irqchip(kvm_context_t kvm)
+void kvm_create_irqchip(KVMState *kvm)
 {
 	int r;
 
@@ -477,7 +477,7 @@  void kvm_create_irqchip(kvm_context_t kvm)
 #endif
 }
 
-int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
+int kvm_create(KVMState *kvm, unsigned long phys_mem_bytes, void **vm_mem)
 {
 	int r;
 	
@@ -497,7 +497,7 @@  int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
 }
 
 
-void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
+void *kvm_create_phys_mem(KVMState *kvm, unsigned long phys_start,
 			  unsigned long len, int log, int writable)
 {
 	int r;
@@ -543,7 +543,7 @@  void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
         return ptr;
 }
 
-int kvm_register_phys_mem(kvm_context_t kvm,
+int kvm_register_phys_mem(KVMState *kvm,
 			  unsigned long phys_start, void *userspace_addr,
 			  unsigned long len, int log)
 {
@@ -574,7 +574,7 @@  int kvm_register_phys_mem(kvm_context_t kvm,
 /* destroy/free a whole slot.
  * phys_start, len and slot are the params passed to kvm_create_phys_mem()
  */
-void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start, 
+void kvm_destroy_phys_mem(KVMState *kvm, unsigned long phys_start, 
 			  unsigned long len)
 {
 	int slot;
@@ -616,7 +616,7 @@  void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
 	free_slot(memory.slot);
 }
 
-void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
+void kvm_unregister_memory_area(KVMState *kvm, uint64_t phys_addr, unsigned long size)
 {
 
 	int slot = get_container_slot(phys_addr, size);
@@ -628,7 +628,7 @@  void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned
 	}
 }
 
-static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
+static int kvm_get_map(KVMState *kvm, int ioctl_num, int slot, void *buf)
 {
 	int r;
 	struct kvm_dirty_log log = {
@@ -643,7 +643,7 @@  static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
 	return 0;
 }
 
-int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
+int kvm_get_dirty_pages(KVMState *kvm, unsigned long phys_addr, void *buf)
 {
 	int slot;
 
@@ -651,7 +651,7 @@  int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
 	return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
 }
 
-int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
+int kvm_get_dirty_pages_range(KVMState *kvm, unsigned long phys_addr,
 			      unsigned long len, void *buf, void *opaque,
 			      int (*cb)(unsigned long start, unsigned long len,
 					void*bitmap, void *opaque))
@@ -676,7 +676,7 @@  int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
 
 #ifdef KVM_CAP_IRQCHIP
 
-int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status)
+int kvm_set_irq_level(KVMState *kvm, int irq, int level, int *status)
 {
 	struct kvm_irq_level event;
 	int r;
@@ -701,7 +701,7 @@  int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status)
 	return 1;
 }
 
-int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
+int kvm_get_irqchip(KVMState *kvm, struct kvm_irqchip *chip)
 {
 	int r;
 
@@ -715,7 +715,7 @@  int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
 	return r;
 }
 
-int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
+int kvm_set_irqchip(KVMState *kvm, struct kvm_irqchip *chip)
 {
 	int r;
 
@@ -731,7 +731,7 @@  int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
 
 #endif
 
-static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
+static int handle_io(KVMState *kvm, struct kvm_run *run, int vcpu)
 {
 	uint16_t addr = run->io.port;
 	int r;
@@ -786,7 +786,7 @@  static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
 	return 0;
 }
 
-int handle_debug(kvm_context_t kvm, int vcpu, void *env)
+int handle_debug(KVMState *kvm, int vcpu, void *env)
 {
 #ifdef KVM_CAP_SET_GUEST_DEBUG
     struct kvm_run *run = kvm->run[vcpu];
@@ -797,38 +797,38 @@  int handle_debug(kvm_context_t kvm, int vcpu, void *env)
 #endif
 }
 
-int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
+int kvm_get_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs)
 {
     return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
 }
 
-int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
+int kvm_set_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs)
 {
     return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
 }
 
-int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
+int kvm_get_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu)
 {
     return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
 }
 
-int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
+int kvm_set_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu)
 {
     return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
 }
 
-int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
+int kvm_get_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *sregs)
 {
     return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
 }
 
-int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
+int kvm_set_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *sregs)
 {
     return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
 }
 
 #ifdef KVM_CAP_MP_STATE
-int kvm_get_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
+int kvm_get_mpstate(KVMState *kvm, int vcpu, struct kvm_mp_state *mp_state)
 {
     int r;
 
@@ -838,7 +838,7 @@  int kvm_get_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
     return -ENOSYS;
 }
 
-int kvm_set_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
+int kvm_set_mpstate(KVMState *kvm, int vcpu, struct kvm_mp_state *mp_state)
 {
     int r;
 
@@ -849,7 +849,7 @@  int kvm_set_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
 }
 #endif
 
-static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
+static int handle_mmio(KVMState *kvm, struct kvm_run *kvm_run)
 {
 	unsigned long addr = kvm_run->mmio.phys_addr;
 	void *data = kvm_run->mmio.data;
@@ -866,58 +866,58 @@  static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
 					kvm_run->mmio.len);
 }
 
-int handle_io_window(kvm_context_t kvm)
+int handle_io_window(KVMState *kvm)
 {
 	return kvm->callbacks->io_window(kvm->opaque);
 }
 
-int handle_halt(kvm_context_t kvm, int vcpu)
+int handle_halt(KVMState *kvm, int vcpu)
 {
 	return kvm->callbacks->halt(kvm->opaque, vcpu);
 }
 
-int handle_shutdown(kvm_context_t kvm, void *env)
+int handle_shutdown(KVMState *kvm, void *env)
 {
 	return kvm->callbacks->shutdown(kvm->opaque, env);
 }
 
-int try_push_interrupts(kvm_context_t kvm)
+int try_push_interrupts(KVMState *kvm)
 {
 	return kvm->callbacks->try_push_interrupts(kvm->opaque);
 }
 
-static inline void push_nmi(kvm_context_t kvm)
+static inline void push_nmi(KVMState *kvm)
 {
 #ifdef KVM_CAP_USER_NMI
 	kvm->callbacks->push_nmi(kvm->opaque);
 #endif /* KVM_CAP_USER_NMI */
 }
 
-void post_kvm_run(kvm_context_t kvm, void *env)
+void post_kvm_run(KVMState *kvm, void *env)
 {
 	kvm->callbacks->post_kvm_run(kvm->opaque, env);
 }
 
-int pre_kvm_run(kvm_context_t kvm, void *env)
+int pre_kvm_run(KVMState *kvm, void *env)
 {
 	return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
 }
 
-int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
+int kvm_get_interrupt_flag(KVMState *kvm, int vcpu)
 {
 	struct kvm_run *run = kvm->run[vcpu];
 
 	return run->if_flag;
 }
 
-int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
+int kvm_is_ready_for_interrupt_injection(KVMState *kvm, int vcpu)
 {
 	struct kvm_run *run = kvm->run[vcpu];
 
 	return run->ready_for_interrupt_injection;
 }
 
-int kvm_run(kvm_context_t kvm, int vcpu, void *env)
+int kvm_run(KVMState *kvm, int vcpu, void *env)
 {
 	int r;
 	int fd = kvm->vcpu_fd[vcpu];
@@ -1029,7 +1029,7 @@  more:
 	return r;
 }
 
-int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
+int kvm_inject_irq(KVMState *kvm, int vcpu, unsigned irq)
 {
 	struct kvm_interrupt intr;
 
@@ -1038,13 +1038,13 @@  int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
 }
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_set_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_guest_debug *dbg)
+int kvm_set_guest_debug(KVMState *kvm, int vcpu, struct kvm_guest_debug *dbg)
 {
 	return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_GUEST_DEBUG, dbg);
 }
 #endif
 
-int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
+int kvm_set_signal_mask(KVMState *kvm, int vcpu, const sigset_t *sigset)
 {
 	struct kvm_signal_mask *sigmask;
 	int r;
@@ -1068,17 +1068,17 @@  int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
 	return r;
 }
 
-int kvm_irqchip_in_kernel(kvm_context_t kvm)
+int kvm_irqchip_in_kernel(KVMState *kvm)
 {
 	return kvm->irqchip_in_kernel;
 }
 
-int kvm_pit_in_kernel(kvm_context_t kvm)
+int kvm_pit_in_kernel(KVMState *kvm)
 {
 	return kvm->pit_in_kernel;
 }
 
-int kvm_has_sync_mmu(kvm_context_t kvm)
+int kvm_has_sync_mmu(KVMState *kvm)
 {
         int r = 0;
 #ifdef KVM_CAP_SYNC_MMU
@@ -1087,7 +1087,7 @@  int kvm_has_sync_mmu(kvm_context_t kvm)
         return r;
 }
 
-int kvm_inject_nmi(kvm_context_t kvm, int vcpu)
+int kvm_inject_nmi(KVMState *kvm, int vcpu)
 {
 #ifdef KVM_CAP_USER_NMI
 	return ioctl(kvm->vcpu_fd[vcpu], KVM_NMI);
@@ -1096,7 +1096,7 @@  int kvm_inject_nmi(kvm_context_t kvm, int vcpu)
 #endif
 }
 
-int kvm_init_coalesced_mmio(kvm_context_t kvm)
+int kvm_init_coalesced_mmio(KVMState *kvm)
 {
 	int r = 0;
 	kvm->coalesced_mmio = 0;
@@ -1110,7 +1110,7 @@  int kvm_init_coalesced_mmio(kvm_context_t kvm)
 	return r;
 }
 
-int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
+int kvm_register_coalesced_mmio(KVMState *kvm, uint64_t addr, uint32_t size)
 {
 #ifdef KVM_CAP_COALESCED_MMIO
 	struct kvm_coalesced_mmio_zone zone;
@@ -1132,7 +1132,7 @@  int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
 	return -ENOSYS;
 }
 
-int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
+int kvm_unregister_coalesced_mmio(KVMState *kvm, uint64_t addr, uint32_t size)
 {
 #ifdef KVM_CAP_COALESCED_MMIO
 	struct kvm_coalesced_mmio_zone zone;
@@ -1156,7 +1156,7 @@  int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t siz
 }
 
 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
-int kvm_assign_pci_device(kvm_context_t kvm,
+int kvm_assign_pci_device(KVMState *kvm,
 			  struct kvm_assigned_pci_dev *assigned_dev)
 {
 	int ret;
@@ -1168,7 +1168,7 @@  int kvm_assign_pci_device(kvm_context_t kvm,
 	return ret;
 }
 
-static int kvm_old_assign_irq(kvm_context_t kvm,
+static int kvm_old_assign_irq(KVMState *kvm,
 		   struct kvm_assigned_irq *assigned_irq)
 {
 	int ret;
@@ -1181,7 +1181,7 @@  static int kvm_old_assign_irq(kvm_context_t kvm,
 }
 
 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
-int kvm_assign_irq(kvm_context_t kvm,
+int kvm_assign_irq(KVMState *kvm,
 		   struct kvm_assigned_irq *assigned_irq)
 {
 	int ret;
@@ -1197,7 +1197,7 @@  int kvm_assign_irq(kvm_context_t kvm,
 	return kvm_old_assign_irq(kvm, assigned_irq);
 }
 
-int kvm_deassign_irq(kvm_context_t kvm,
+int kvm_deassign_irq(KVMState *kvm,
 		     struct kvm_assigned_irq *assigned_irq)
 {
 	int ret;
@@ -1209,7 +1209,7 @@  int kvm_deassign_irq(kvm_context_t kvm,
 	return ret;
 }
 #else
-int kvm_assign_irq(kvm_context_t kvm,
+int kvm_assign_irq(KVMState *kvm,
 		   struct kvm_assigned_irq *assigned_irq)
 {
 	return kvm_old_assign_irq(kvm, assigned_irq);
@@ -1218,7 +1218,7 @@  int kvm_assign_irq(kvm_context_t kvm,
 #endif
 
 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
-int kvm_deassign_pci_device(kvm_context_t kvm,
+int kvm_deassign_pci_device(KVMState *kvm,
 			    struct kvm_assigned_pci_dev *assigned_dev)
 {
 	int ret;
@@ -1231,7 +1231,7 @@  int kvm_deassign_pci_device(kvm_context_t kvm,
 }
 #endif
 
-int kvm_destroy_memory_region_works(kvm_context_t kvm)
+int kvm_destroy_memory_region_works(KVMState *kvm)
 {
 	int ret = 0;
 
@@ -1244,7 +1244,7 @@  int kvm_destroy_memory_region_works(kvm_context_t kvm)
 	return ret;
 }
 
-int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
+int kvm_reinject_control(KVMState *kvm, int pit_reinject)
 {
 #ifdef KVM_CAP_REINJECT_CONTROL
 	int r;
@@ -1263,7 +1263,7 @@  int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
 	return -ENOSYS;
 }
 
-int kvm_has_gsi_routing(kvm_context_t kvm)
+int kvm_has_gsi_routing(KVMState *kvm)
 {
     int r = 0;
 
@@ -1273,7 +1273,7 @@  int kvm_has_gsi_routing(kvm_context_t kvm)
     return r;
 }
 
-int kvm_get_gsi_count(kvm_context_t kvm)
+int kvm_get_gsi_count(KVMState *kvm)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
 	return kvm_check_extension(kvm, KVM_CAP_IRQ_ROUTING);
@@ -1282,7 +1282,7 @@  int kvm_get_gsi_count(kvm_context_t kvm)
 #endif
 }
 
-int kvm_clear_gsi_routes(kvm_context_t kvm)
+int kvm_clear_gsi_routes(KVMState *kvm)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
 	kvm->irq_routes->nr = 0;
@@ -1292,7 +1292,7 @@  int kvm_clear_gsi_routes(kvm_context_t kvm)
 #endif
 }
 
-int kvm_add_routing_entry(kvm_context_t kvm,
+int kvm_add_routing_entry(KVMState *kvm,
 		          struct kvm_irq_routing_entry* entry)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
@@ -1328,7 +1328,7 @@  int kvm_add_routing_entry(kvm_context_t kvm,
 #endif
 }
 
-int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
+int kvm_add_irq_route(KVMState *kvm, int gsi, int irqchip, int pin)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
 	struct kvm_irq_routing_entry e;
@@ -1344,7 +1344,7 @@  int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
 #endif
 }
 
-int kvm_del_routing_entry(kvm_context_t kvm,
+int kvm_del_routing_entry(KVMState *kvm,
 	                  struct kvm_irq_routing_entry* entry)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
@@ -1408,7 +1408,7 @@  int kvm_del_routing_entry(kvm_context_t kvm,
 #endif
 }
 
-int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
+int kvm_del_irq_route(KVMState *kvm, int gsi, int irqchip, int pin)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
 	struct kvm_irq_routing_entry e;
@@ -1424,7 +1424,7 @@  int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
 #endif
 }
 
-int kvm_commit_irq_routes(kvm_context_t kvm)
+int kvm_commit_irq_routes(KVMState *kvm)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
 	int r;
@@ -1439,7 +1439,7 @@  int kvm_commit_irq_routes(kvm_context_t kvm)
 #endif
 }
 
-int kvm_get_irq_route_gsi(kvm_context_t kvm)
+int kvm_get_irq_route_gsi(KVMState *kvm)
 {
 	int i, bit;
 	uint32_t *buf = kvm->used_gsi_bitmap;
@@ -1457,7 +1457,7 @@  int kvm_get_irq_route_gsi(kvm_context_t kvm)
 }
 
 #ifdef KVM_CAP_DEVICE_MSIX
-int kvm_assign_set_msix_nr(kvm_context_t kvm,
+int kvm_assign_set_msix_nr(KVMState *kvm,
                            struct kvm_assigned_msix_nr *msix_nr)
 {
         int ret;
@@ -1469,7 +1469,7 @@  int kvm_assign_set_msix_nr(kvm_context_t kvm,
         return ret;
 }
 
-int kvm_assign_set_msix_entry(kvm_context_t kvm,
+int kvm_assign_set_msix_entry(KVMState *kvm,
                               struct kvm_assigned_msix_entry *entry)
 {
         int ret;
diff --git a/libkvm-all.h b/libkvm-all.h
index 4821a1e..d4ae12f 100644
--- a/libkvm-all.h
+++ b/libkvm-all.h
@@ -21,12 +21,12 @@ 
 
 struct kvm_context;
 
-typedef struct kvm_context *kvm_context_t;
+typedef struct kvm_context KVMState;
 
 #if defined(__x86_64__) || defined(__i386__)
-struct kvm_msr_list *kvm_get_msr_list(kvm_context_t);
-int kvm_get_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
-int kvm_set_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
+struct kvm_msr_list *kvm_get_msr_list(KVMState *);
+int kvm_get_msrs(KVMState *, int vcpu, struct kvm_msr_entry *msrs, int n);
+int kvm_set_msrs(KVMState *, int vcpu, struct kvm_msr_entry *msrs, int n);
 #endif
 
 /*!
@@ -80,9 +80,9 @@  struct kvm_callbacks {
     int (*powerpc_dcr_write)(int vcpu, uint32_t dcrn, uint32_t data);
 #endif
 #if defined(__s390__)
-    int (*s390_handle_intercept)(kvm_context_t context, int vcpu,
+    int (*s390_handle_intercept)(KVMState *context, int vcpu,
 	struct kvm_run *run);
-    int (*s390_handle_reset)(kvm_context_t context, int vcpu,
+    int (*s390_handle_reset)(KVMState *context, int vcpu,
 	 struct kvm_run *run);
 #endif
 };
@@ -98,7 +98,7 @@  struct kvm_callbacks {
  * \param opaque Not used
  * \return NULL on failure
  */
-kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
+KVMState *kvm_init(struct kvm_callbacks *callbacks,
 		       void *opaque);
 
 /*!
@@ -110,7 +110,7 @@  kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
  *
  * \param kvm Pointer to the kvm_context that is to be freed
  */
-void kvm_finalize(kvm_context_t kvm);
+void kvm_finalize(KVMState *kvm);
 
 /*!
  * \brief Disable the in-kernel IRQCHIP creation
@@ -120,7 +120,7 @@  void kvm_finalize(kvm_context_t kvm);
  *
  * \param kvm Pointer to the kvm_context
  */
-void kvm_disable_irqchip_creation(kvm_context_t kvm);
+void kvm_disable_irqchip_creation(KVMState *kvm);
 
 /*!
  * \brief Disable the in-kernel PIT creation
@@ -130,7 +130,7 @@  void kvm_disable_irqchip_creation(kvm_context_t kvm);
  *
  *  \param kvm Pointer to the kvm_context
  */
-void kvm_disable_pit_creation(kvm_context_t kvm);
+void kvm_disable_pit_creation(KVMState *kvm);
 
 /*!
  * \brief Create new virtual machine
@@ -146,12 +146,12 @@  void kvm_disable_pit_creation(kvm_context_t kvm);
  * kvm_create allocates for physical RAM
  * \return 0 on success
  */
-int kvm_create(kvm_context_t kvm,
+int kvm_create(KVMState *kvm,
 	       unsigned long phys_mem_bytes,
 	       void **phys_mem);
-int kvm_create_vm(kvm_context_t kvm);
-int kvm_check_extension(kvm_context_t kvm, int ext);
-void kvm_create_irqchip(kvm_context_t kvm);
+int kvm_create_vm(KVMState *kvm);
+int kvm_check_extension(KVMState *kvm, int ext);
+void kvm_create_irqchip(KVMState *kvm);
 
 /*!
  * \brief Create a new virtual cpu
@@ -163,7 +163,7 @@  void kvm_create_irqchip(kvm_context_t kvm);
  * \param slot vcpu number (> 0)
  * \return 0 on success, -errno on failure
  */
-int kvm_create_vcpu(kvm_context_t kvm, int slot);
+int kvm_create_vcpu(KVMState *kvm, int slot);
 
 /*!
  * \brief Start the VCPU
@@ -186,7 +186,7 @@  int kvm_create_vcpu(kvm_context_t kvm, int slot);
  * return except for when an error has occured, or when you have sent it
  * an EINTR signal.
  */
-int kvm_run(kvm_context_t kvm, int vcpu, void *env);
+int kvm_run(KVMState *kvm, int vcpu, void *env);
 
 /*!
  * \brief Get interrupt flag from on last exit to userspace
@@ -197,7 +197,7 @@  int kvm_run(kvm_context_t kvm, int vcpu, void *env);
  * \param vcpu Which virtual CPU should get dumped
  * \return interrupt flag value (0 or 1)
  */
-int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
+int kvm_get_interrupt_flag(KVMState *kvm, int vcpu);
 
 /*!
  * \brief Get the value of the APIC_BASE msr as of last exit to userspace
@@ -208,7 +208,7 @@  int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
  * \param vcpu Which virtual CPU should get dumped
  * \return APIC_BASE msr contents
  */
-uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
+uint64_t kvm_get_apic_base(KVMState *kvm, int vcpu);
 
 /*!
  * \brief Check if a vcpu is ready for interrupt injection
@@ -219,7 +219,7 @@  uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
  * \param vcpu Which virtual CPU should get dumped
  * \return boolean indicating interrupt injection readiness
  */
-int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
+int kvm_is_ready_for_interrupt_injection(KVMState *kvm, int vcpu);
 
 /*!
  * \brief Read VCPU registers
@@ -236,7 +236,7 @@  int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
  * registers values
  * \return 0 on success
  */
-int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
+int kvm_get_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs);
 
 /*!
  * \brief Write VCPU registers
@@ -251,7 +251,7 @@  int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
  * registers values
  * \return 0 on success
  */
-int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
+int kvm_set_regs(KVMState *kvm, int vcpu, struct kvm_regs *regs);
 /*!
  * \brief Read VCPU fpu registers
  *
@@ -267,7 +267,7 @@  int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
  * fpu registers values
  * \return 0 on success
  */
-int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
+int kvm_get_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu);
 
 /*!
  * \brief Write VCPU fpu registers
@@ -281,7 +281,7 @@  int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
  * \param fpu Pointer to a kvm_fpu which holds the new vcpu fpu state
  * \return 0 on success
  */
-int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
+int kvm_set_fpu(KVMState *kvm, int vcpu, struct kvm_fpu *fpu);
 
 /*!
  * \brief Read VCPU system registers
@@ -299,7 +299,7 @@  int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
  * registers values
  * \return 0 on success
  */
-int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
+int kvm_get_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *regs);
 
 /*!
  * \brief Write VCPU system registers
@@ -314,27 +314,27 @@  int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
  * registers values
  * \return 0 on success
  */
-int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
+int kvm_set_sregs(KVMState *kvm, int vcpu, struct kvm_sregs *regs);
 
 #ifdef KVM_CAP_MP_STATE
 /*!
  *  * \brief Read VCPU MP state
  *
  */
-int kvm_get_mpstate(kvm_context_t kvm, int vcpu,
+int kvm_get_mpstate(KVMState *kvm, int vcpu,
                     struct kvm_mp_state *mp_state);
 
 /*!
  *  * \brief Write VCPU MP state
  *
  */
-int kvm_set_mpstate(kvm_context_t kvm, int vcpu,
+int kvm_set_mpstate(KVMState *kvm, int vcpu,
                     struct kvm_mp_state *mp_state);
 /*!
  *  * \brief Reset VCPU MP state
  *
  */
-static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
+static inline int kvm_reset_mpstate(KVMState *kvm, int vcpu)
 {
     struct kvm_mp_state mp_state = {.mp_state = KVM_MP_STATE_UNINITIALIZED};
     return kvm_set_mpstate(kvm, vcpu, &mp_state);
@@ -351,10 +351,10 @@  static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
  * \param irq Vector number
  * \return 0 on success
  */
-int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq);
+int kvm_inject_irq(KVMState *kvm, int vcpu, unsigned irq);
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
+int kvm_set_guest_debug(KVMState *, int vcpu, struct kvm_guest_debug *dbg);
 #endif
 
 #if defined(__i386__) || defined(__x86_64__)
@@ -369,7 +369,7 @@  int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
  * \param entries cpuid function entries table
  * \return 0 on success, or -errno on error
  */
-int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid(KVMState *kvm, int vcpu, int nent,
 		    struct kvm_cpuid_entry *entries);
 
 /*!
@@ -385,7 +385,7 @@  int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
  * \param entries cpuid function entries table
  * \return 0 on success, or -errno on error
  */
-int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid2(KVMState *kvm, int vcpu, int nent,
 		     struct kvm_cpuid_entry2 *entries);
 
 /*!
@@ -394,7 +394,7 @@  int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
  * \param kvm pointer to kvm_context
  * \param nrshadow_pages number of pages to be allocated
  */
-int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages);
+int kvm_set_shadow_pages(KVMState *kvm, unsigned int nrshadow_pages);
 
 /*!
  * \brief Getting the number of shadow pages that are allocated to the vm
@@ -402,7 +402,7 @@  int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages);
  * \param kvm pointer to kvm_context
  * \param nrshadow_pages number of pages to be allocated
  */
-int kvm_get_shadow_pages(kvm_context_t kvm , unsigned int *nrshadow_pages);
+int kvm_get_shadow_pages(KVMState *kvm , unsigned int *nrshadow_pages);
 
 /*!
  * \brief Set up cr8 for next time the vcpu is executed
@@ -414,7 +414,7 @@  int kvm_get_shadow_pages(kvm_context_t kvm , unsigned int *nrshadow_pages);
  * \param vcpu Which virtual CPU should get dumped
  * \param cr8 next cr8 value
  */
-void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
+void kvm_set_cr8(KVMState *kvm, int vcpu, uint64_t cr8);
 
 /*!
  * \brief Get cr8 for sync tpr in qemu apic emulation
@@ -425,7 +425,7 @@  void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
  * \param kvm Pointer to the current kvm_context
  * \param vcpu Which virtual CPU should get dumped
  */
-__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
+__u64 kvm_get_cr8(KVMState *kvm, int vcpu);
 #endif
 
 /*!
@@ -441,7 +441,7 @@  __u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
  * \param sigset signal mask for guest mode
  * \return 0 on success, or -errno on error
  */
-int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset);
+int kvm_set_signal_mask(KVMState *kvm, int vcpu, const sigset_t *sigset);
 
 /*!
  * \brief Dump all VCPU information
@@ -457,7 +457,7 @@  int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset);
  * \param vcpu Which virtual CPU should get dumped
  * \return 0 on success
  */
-int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
+int kvm_dump_vcpu(KVMState *kvm, int vcpu);
 
 /*!
  * \brief Dump VCPU registers
@@ -471,28 +471,28 @@  int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
  * \param vcpu Which virtual CPU should get dumped
  * \return 0 on success
  */
-void kvm_show_regs(kvm_context_t kvm, int vcpu);
+void kvm_show_regs(KVMState *kvm, int vcpu);
 
 
-void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start, 
+void *kvm_create_phys_mem(KVMState *, unsigned long phys_start, 
 			  unsigned long len, int log, int writable);
-void kvm_destroy_phys_mem(kvm_context_t, unsigned long phys_start, 
+void kvm_destroy_phys_mem(KVMState *, unsigned long phys_start, 
 			  unsigned long len);
-void kvm_unregister_memory_area(kvm_context_t, uint64_t phys_start,
+void kvm_unregister_memory_area(KVMState *, uint64_t phys_start,
                                 unsigned long len);
 
-int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_start, unsigned long size);
-int kvm_register_phys_mem(kvm_context_t kvm,
+int kvm_is_containing_region(KVMState *kvm, unsigned long phys_start, unsigned long size);
+int kvm_register_phys_mem(KVMState *kvm,
 			unsigned long phys_start, void *userspace_addr,
 			unsigned long len, int log);
-int kvm_get_dirty_pages(kvm_context_t, unsigned long phys_addr, void *buf);
-int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
+int kvm_get_dirty_pages(KVMState *, unsigned long phys_addr, void *buf);
+int kvm_get_dirty_pages_range(KVMState *kvm, unsigned long phys_addr,
 			      unsigned long end_addr, void *buf, void*opaque,
 			      int (*cb)(unsigned long start, unsigned long len,
 					void*bitmap, void *opaque));
-int kvm_register_coalesced_mmio(kvm_context_t kvm,
+int kvm_register_coalesced_mmio(KVMState *kvm,
 				uint64_t addr, uint32_t size);
-int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
+int kvm_unregister_coalesced_mmio(KVMState *kvm,
 				  uint64_t addr, uint32_t size);
 
 /*!
@@ -502,7 +502,7 @@  int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
  * accesses the alias region, it will behave exactly as if it accessed
  * the target memory.
  */
-int kvm_create_memory_alias(kvm_context_t,
+int kvm_create_memory_alias(KVMState *,
 			    uint64_t phys_start, uint64_t len,
 			    uint64_t target_phys);
 
@@ -511,7 +511,7 @@  int kvm_create_memory_alias(kvm_context_t,
  *
  * Removes an alias created with kvm_create_memory_alias().
  */
-int kvm_destroy_memory_alias(kvm_context_t, uint64_t phys_start);
+int kvm_destroy_memory_alias(KVMState *, uint64_t phys_start);
 
 /*!
  * \brief Get a bitmap of guest ram pages which are allocated to the guest.
@@ -520,17 +520,17 @@  int kvm_destroy_memory_alias(kvm_context_t, uint64_t phys_start);
  * \param phys_addr Memory slot phys addr
  * \param bitmap Long aligned address of a big enough bitmap (one bit per page)
  */
-int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *bitmap);
-int kvm_get_mem_map_range(kvm_context_t kvm, unsigned long phys_addr,
+int kvm_get_mem_map(KVMState *kvm, unsigned long phys_addr, void *bitmap);
+int kvm_get_mem_map_range(KVMState *kvm, unsigned long phys_addr,
 			   unsigned long len, void *buf, void *opaque,
 			   int (*cb)(unsigned long start,unsigned long len,
 				     void* bitmap, void* opaque));
-int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status);
+int kvm_set_irq_level(KVMState *kvm, int irq, int level, int *status);
 
-int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
+int kvm_dirty_pages_log_enable_slot(KVMState *kvm,
 				    uint64_t phys_start,
 				    uint64_t len);
-int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
+int kvm_dirty_pages_log_disable_slot(KVMState *kvm,
 				     uint64_t phys_start,
 				     uint64_t len);
 /*!
@@ -538,7 +538,7 @@  int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_dirty_pages_log_enable_all(kvm_context_t kvm);
+int kvm_dirty_pages_log_enable_all(KVMState *kvm);
 
 /*!
  * \brief Disable dirty-page-logging for some memory regions
@@ -548,16 +548,16 @@  int kvm_dirty_pages_log_enable_all(kvm_context_t kvm);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_dirty_pages_log_reset(kvm_context_t kvm);
+int kvm_dirty_pages_log_reset(KVMState *kvm);
 
 /*!
  * \brief Query whether in kernel irqchip is used
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_irqchip_in_kernel(kvm_context_t kvm);
+int kvm_irqchip_in_kernel(KVMState *kvm);
 
-int kvm_has_sync_mmu(kvm_context_t kvm);
+int kvm_has_sync_mmu(KVMState *kvm);
 
 #ifdef KVM_CAP_IRQCHIP
 /*!
@@ -569,7 +569,7 @@  int kvm_has_sync_mmu(kvm_context_t kvm);
  * \param kvm Pointer to the current kvm_context
  * \param chip The irq chip device to be dumped
  */
-int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
+int kvm_get_irqchip(KVMState *kvm, struct kvm_irqchip *chip);
 
 /*!
  * \brief Set in kernel IRQCHIP contents
@@ -581,7 +581,7 @@  int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
  * \param kvm Pointer to the current kvm_context
  * \param chip THe irq chip device to be written
  */
-int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
+int kvm_set_irqchip(KVMState *kvm, struct kvm_irqchip *chip);
 
 #if defined(__i386__) || defined(__x86_64__)
 /*!
@@ -593,7 +593,7 @@  int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
  * \param vcpu Which virtual CPU should be accessed
  * \param s Local apic state of the specific virtual CPU
  */
-int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
+int kvm_get_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s);
 
 /*!
  * \brief Set in kernel local APIC for vcpu
@@ -604,7 +604,7 @@  int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
  * \param vcpu Which virtual CPU should be accessed
  * \param s Local apic state of the specific virtual CPU
  */
-int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
+int kvm_set_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s);
 
 #endif
 
@@ -617,7 +617,7 @@  int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
  * \param vcpu Which virtual CPU should get dumped
  * \return 0 on success
  */
-int kvm_inject_nmi(kvm_context_t kvm, int vcpu);
+int kvm_inject_nmi(KVMState *kvm, int vcpu);
 
 #endif
 
@@ -626,7 +626,7 @@  int kvm_inject_nmi(kvm_context_t kvm, int vcpu);
  *
  *  \param kvm Pointer to the current kvm_context
  */
-int kvm_pit_in_kernel(kvm_context_t kvm);
+int kvm_pit_in_kernel(KVMState *kvm);
 
 /*!
  * \brief Initialize coalesced MMIO
@@ -635,7 +635,7 @@  int kvm_pit_in_kernel(kvm_context_t kvm);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_init_coalesced_mmio(kvm_context_t kvm);
+int kvm_init_coalesced_mmio(KVMState *kvm);
 
 #ifdef KVM_CAP_PIT
 
@@ -648,7 +648,7 @@  int kvm_init_coalesced_mmio(kvm_context_t kvm);
  * \param kvm Pointer to the current kvm_context
  * \param s PIT state of the virtual domain
  */
-int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s);
+int kvm_get_pit(KVMState *kvm, struct kvm_pit_state *s);
 
 /*!
  * \brief Set in kernel PIT of the virtual domain
@@ -659,10 +659,10 @@  int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s);
  * \param kvm Pointer to the current kvm_context
  * \param s PIT state of the virtual domain
  */
-int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s);
+int kvm_set_pit(KVMState *kvm, struct kvm_pit_state *s);
 #endif
 
-int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
+int kvm_reinject_control(KVMState *kvm, int pit_reinject);
 
 #endif
 
@@ -677,7 +677,7 @@  int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
  * \param kvm Pointer to the current kvm_context
  * \param vcpu vcpu to enable tpr access reporting on
  */
-int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+int kvm_enable_tpr_access_reporting(KVMState *kvm, int vcpu);
 
 /*!
  * \brief Disable kernel tpr access reporting
@@ -687,18 +687,18 @@  int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
  * \param kvm Pointer to the current kvm_context
  * \param vcpu vcpu to disable tpr access reporting on
  */
-int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+int kvm_disable_tpr_access_reporting(KVMState *kvm, int vcpu);
 
-int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic);
+int kvm_enable_vapic(KVMState *kvm, int vcpu, uint64_t vapic);
 
 #endif
 
 #if defined(__s390__)
-int kvm_s390_initial_reset(kvm_context_t kvm, int slot);
-int kvm_s390_interrupt(kvm_context_t kvm, int slot,
+int kvm_s390_initial_reset(KVMState *kvm, int slot);
+int kvm_s390_interrupt(KVMState *kvm, int slot,
 	struct kvm_s390_interrupt *kvmint);
-int kvm_s390_set_initial_psw(kvm_context_t kvm, int slot, psw_t psw);
-int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr);
+int kvm_s390_set_initial_psw(KVMState *kvm, int slot, psw_t psw);
+int kvm_s390_store_status(KVMState *kvm, int slot, unsigned long addr);
 #endif
 
 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
@@ -711,7 +711,7 @@  int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr);
  * \param kvm Pointer to the current kvm_context
  * \param assigned_dev Parameters, like bus, devfn number, etc
  */
-int kvm_assign_pci_device(kvm_context_t kvm,
+int kvm_assign_pci_device(KVMState *kvm,
 			  struct kvm_assigned_pci_dev *assigned_dev);
 
 /*!
@@ -723,7 +723,7 @@  int kvm_assign_pci_device(kvm_context_t kvm,
  * \param kvm Pointer to the current kvm_context
  * \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
  */
-int kvm_assign_irq(kvm_context_t kvm,
+int kvm_assign_irq(KVMState *kvm,
 		   struct kvm_assigned_irq *assigned_irq);
 
 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
@@ -736,7 +736,7 @@  int kvm_assign_irq(kvm_context_t kvm,
  * \param kvm Pointer to the current kvm_context
  * \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
  */
-int kvm_deassign_irq(kvm_context_t kvm,
+int kvm_deassign_irq(KVMState *kvm,
                    struct kvm_assigned_irq *assigned_irq);
 #endif
 #endif
@@ -748,7 +748,7 @@  int kvm_deassign_irq(kvm_context_t kvm,
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_destroy_memory_region_works(kvm_context_t kvm);
+int kvm_destroy_memory_region_works(KVMState *kvm);
 
 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
 /*!
@@ -760,7 +760,7 @@  int kvm_destroy_memory_region_works(kvm_context_t kvm);
  * \param kvm Pointer to the current kvm_context
  * \param assigned_dev Parameters, like bus, devfn number, etc
  */
-int kvm_deassign_pci_device(kvm_context_t kvm,
+int kvm_deassign_pci_device(KVMState *kvm,
 			    struct kvm_assigned_pci_dev *assigned_dev);
 #endif
 
@@ -772,7 +772,7 @@  int kvm_deassign_pci_device(kvm_context_t kvm,
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_has_gsi_routing(kvm_context_t kvm);
+int kvm_has_gsi_routing(KVMState *kvm);
 
 /*!
  * \brief Determines the number of gsis that can be routed
@@ -783,7 +783,7 @@  int kvm_has_gsi_routing(kvm_context_t kvm);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_get_gsi_count(kvm_context_t kvm);
+int kvm_get_gsi_count(KVMState *kvm);
 
 /*!
  * \brief Clears the temporary irq routing table
@@ -793,7 +793,7 @@  int kvm_get_gsi_count(kvm_context_t kvm);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_clear_gsi_routes(kvm_context_t kvm);
+int kvm_clear_gsi_routes(KVMState *kvm);
 
 /*!
  * \brief Adds an irq route to the temporary irq routing table
@@ -803,7 +803,7 @@  int kvm_clear_gsi_routes(kvm_context_t kvm);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
+int kvm_add_irq_route(KVMState *kvm, int gsi, int irqchip, int pin);
 
 /*!
  * \brief Removes an irq route from the temporary irq routing table
@@ -813,7 +813,7 @@  int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
+int kvm_del_irq_route(KVMState *kvm, int gsi, int irqchip, int pin);
 
 struct kvm_irq_routing_entry;
 /*!
@@ -824,7 +824,7 @@  struct kvm_irq_routing_entry;
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_add_routing_entry(kvm_context_t kvm,
+int kvm_add_routing_entry(KVMState *kvm,
                           struct kvm_irq_routing_entry* entry);
 
 /*!
@@ -835,7 +835,7 @@  int kvm_add_routing_entry(kvm_context_t kvm,
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_del_routing_entry(kvm_context_t kvm,
+int kvm_del_routing_entry(KVMState *kvm,
 		          struct kvm_irq_routing_entry* entry);
 
 /*!
@@ -845,7 +845,7 @@  int kvm_del_routing_entry(kvm_context_t kvm,
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_commit_irq_routes(kvm_context_t kvm);
+int kvm_commit_irq_routes(KVMState *kvm);
 
 /*!
  * \brief Get unused GSI number for irq routing table
@@ -854,15 +854,15 @@  int kvm_commit_irq_routes(kvm_context_t kvm);
  *
  * \param kvm Pointer to the current kvm_context
  */
-int kvm_get_irq_route_gsi(kvm_context_t kvm);
+int kvm_get_irq_route_gsi(KVMState *kvm);
 
 #ifdef KVM_CAP_DEVICE_MSIX
-int kvm_assign_set_msix_nr(kvm_context_t kvm,
+int kvm_assign_set_msix_nr(KVMState *kvm,
 			   struct kvm_assigned_msix_nr *msix_nr);
-int kvm_assign_set_msix_entry(kvm_context_t kvm,
+int kvm_assign_set_msix_entry(KVMState *kvm,
                               struct kvm_assigned_msix_entry *entry);
 #endif
 
-uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg);
+uint32_t kvm_get_supported_cpuid(KVMState *kvm, uint32_t function, int reg);
 
 #endif
diff --git a/libkvm-common.h b/libkvm-common.h
index c95c591..ad981b3 100644
--- a/libkvm-common.h
+++ b/libkvm-common.h
@@ -71,24 +71,24 @@  struct kvm_context {
 	int max_gsi;
 };
 
-int kvm_alloc_kernel_memory(kvm_context_t kvm, unsigned long memory,
+int kvm_alloc_kernel_memory(KVMState *kvm, unsigned long memory,
 								void **vm_mem);
-int kvm_alloc_userspace_memory(kvm_context_t kvm, unsigned long memory,
+int kvm_alloc_userspace_memory(KVMState *kvm, unsigned long memory,
 								void **vm_mem);
 
-int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
                         void **vm_mem);
-int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu);
+int kvm_arch_run(struct kvm_run *run, KVMState *kvm, int vcpu);
 
 
-void kvm_show_code(kvm_context_t kvm, int vcpu);
+void kvm_show_code(KVMState *kvm, int vcpu);
 
-int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, void *env);
-void post_kvm_run(kvm_context_t kvm, void *env);
-int pre_kvm_run(kvm_context_t kvm, void *env);
-int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu, void *env);
-int try_push_interrupts(kvm_context_t kvm);
+int handle_halt(KVMState *kvm, int vcpu);
+int handle_shutdown(KVMState *kvm, void *env);
+void post_kvm_run(KVMState *kvm, void *env);
+int pre_kvm_run(KVMState *kvm, void *env);
+int handle_io_window(KVMState *kvm);
+int handle_debug(KVMState *kvm, int vcpu, void *env);
+int try_push_interrupts(KVMState *kvm);
 
 #endif
diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
index 1096e65..9e8810a 100644
--- a/qemu-kvm-x86.c
+++ b/qemu-kvm-x86.c
@@ -498,7 +498,7 @@  struct kvm_para_features {
 	{ -1, -1 }
 };
 
-static int get_para_features(kvm_context_t kvm_context)
+static int get_para_features(KVMState *kvm_context)
 {
 	int i, features = 0;
 
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 68d3b92..a0846e8 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -33,7 +33,7 @@  int kvm_irqchip = 1;
 int kvm_pit = 1;
 int kvm_pit_reinject = 1;
 int kvm_nested = 0;
-kvm_context_t kvm_context;
+KVMState *kvm_context;
 
 pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
 pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 725589b..a470f3c 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -154,7 +154,7 @@  extern int kvm_irqchip;
 extern int kvm_pit;
 extern int kvm_pit_reinject;
 extern int kvm_nested;
-extern kvm_context_t kvm_context;
+extern KVMState *kvm_context;
 
 struct ioperm_data {
     unsigned long start_port;
diff --git a/target-i386/libkvm.c b/target-i386/libkvm.c
index 32d03f1..b64e632 100644
--- a/target-i386/libkvm.c
+++ b/target-i386/libkvm.c
@@ -12,7 +12,7 @@ 
 #include <fcntl.h>
 #include <stdlib.h>
 
-int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
+int kvm_set_tss_addr(KVMState *kvm, unsigned long addr)
 {
 #ifdef KVM_CAP_SET_TSS_ADDR
 	int r;
@@ -30,7 +30,7 @@  int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
 	return -ENOSYS;
 }
 
-static int kvm_init_tss(kvm_context_t kvm)
+static int kvm_init_tss(KVMState *kvm)
 {
 #ifdef KVM_CAP_SET_TSS_ADDR
 	int r;
@@ -52,7 +52,7 @@  static int kvm_init_tss(kvm_context_t kvm)
 	return 0;
 }
 
-static int kvm_create_pit(kvm_context_t kvm)
+static int kvm_create_pit(KVMState *kvm)
 {
 #ifdef KVM_CAP_PIT
 	int r;
@@ -74,7 +74,7 @@  static int kvm_create_pit(kvm_context_t kvm)
 	return 0;
 }
 
-int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
  			void **vm_mem)
 {
 	int r = 0;
@@ -96,7 +96,7 @@  int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
 
 #ifdef KVM_EXIT_TPR_ACCESS
 
-static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
+static int handle_tpr_access(KVMState *kvm, struct kvm_run *run, int vcpu)
 {
 	return kvm->callbacks->tpr_access(kvm->opaque, vcpu,
 					  run->tpr_access.rip,
@@ -104,7 +104,7 @@  static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
 }
 
 
-int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
+int kvm_enable_vapic(KVMState *kvm, int vcpu, uint64_t vapic)
 {
 	int r;
 	struct kvm_vapic_addr va = {
@@ -122,7 +122,7 @@  int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
 
 #endif
 
-int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
+int kvm_arch_run(struct kvm_run *run,KVMState *kvm, int vcpu)
 {
 	int r = 0;
 
@@ -175,7 +175,7 @@  static void register_alias(int slot, uint64_t start, uint64_t len)
 	kvm_aliases[slot].len   = len;
 }
 
-int kvm_create_memory_alias(kvm_context_t kvm,
+int kvm_create_memory_alias(KVMState *kvm,
 			    uint64_t phys_start,
 			    uint64_t len,
 			    uint64_t target_phys)
@@ -205,14 +205,14 @@  int kvm_create_memory_alias(kvm_context_t kvm,
 	return 0;
 }
 
-int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
+int kvm_destroy_memory_alias(KVMState *kvm, uint64_t phys_start)
 {
 	return kvm_create_memory_alias(kvm, phys_start, 0, 0);
 }
 
 #ifdef KVM_CAP_IRQCHIP
 
-int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
+int kvm_get_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s)
 {
 	int r;
 	if (!kvm->irqchip_in_kernel)
@@ -225,7 +225,7 @@  int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
 	return r;
 }
 
-int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
+int kvm_set_lapic(KVMState *kvm, int vcpu, struct kvm_lapic_state *s)
 {
 	int r;
 	if (!kvm->irqchip_in_kernel)
@@ -242,7 +242,7 @@  int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
 
 #ifdef KVM_CAP_PIT
 
-int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
+int kvm_get_pit(KVMState *kvm, struct kvm_pit_state *s)
 {
 	int r;
 	if (!kvm->pit_in_kernel)
@@ -255,7 +255,7 @@  int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
 	return r;
 }
 
-int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
+int kvm_set_pit(KVMState *kvm, struct kvm_pit_state *s)
 {
 	int r;
 	if (!kvm->pit_in_kernel)
@@ -270,7 +270,7 @@  int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
 
 #endif
 
-void kvm_show_code(kvm_context_t kvm, int vcpu)
+void kvm_show_code(KVMState *kvm, int vcpu)
 {
 #define SHOW_CODE_LEN 50
 	int fd = kvm->vcpu_fd[vcpu];
@@ -314,7 +314,7 @@  void kvm_show_code(kvm_context_t kvm, int vcpu)
 /*
  * Returns available msr list.  User must free.
  */
-struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
+struct kvm_msr_list *kvm_get_msr_list(KVMState *kvm)
 {
 	struct kvm_msr_list sizer, *msrs;
 	int r, e;
@@ -339,7 +339,7 @@  struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
 	return msrs;
 }
 
-int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
+int kvm_get_msrs(KVMState *kvm, int vcpu, struct kvm_msr_entry *msrs,
 		 int n)
 {
     struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
@@ -359,7 +359,7 @@  int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
     return r;
 }
 
-int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
+int kvm_set_msrs(KVMState *kvm, int vcpu, struct kvm_msr_entry *msrs,
 		 int n)
 {
     struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
@@ -393,7 +393,7 @@  static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
     	fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
 }
 
-void kvm_show_regs(kvm_context_t kvm, int vcpu)
+void kvm_show_regs(KVMState *kvm, int vcpu)
 {
 	int fd = kvm->vcpu_fd[vcpu];
 	struct kvm_regs regs;
@@ -437,26 +437,26 @@  void kvm_show_regs(kvm_context_t kvm, int vcpu)
 		sregs.efer);
 }
 
-uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
+uint64_t kvm_get_apic_base(KVMState *kvm, int vcpu)
 {
 	struct kvm_run *run = kvm->run[vcpu];
 
 	return run->apic_base;
 }
 
-void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
+void kvm_set_cr8(KVMState *kvm, int vcpu, uint64_t cr8)
 {
 	struct kvm_run *run = kvm->run[vcpu];
 
 	run->cr8 = cr8;
 }
 
-__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
+__u64 kvm_get_cr8(KVMState *kvm, int vcpu)
 {
 	return kvm->run[vcpu]->cr8;
 }
 
-int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid(KVMState *kvm, int vcpu, int nent,
 		    struct kvm_cpuid_entry *entries)
 {
 	struct kvm_cpuid *cpuid;
@@ -474,7 +474,7 @@  int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
 	return r;
 }
 
-int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
+int kvm_setup_cpuid2(KVMState *kvm, int vcpu, int nent,
 		     struct kvm_cpuid_entry2 *entries)
 {
 	struct kvm_cpuid2 *cpuid;
@@ -495,7 +495,7 @@  int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
 	return r;
 }
 
-int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
+int kvm_set_shadow_pages(KVMState *kvm, unsigned int nrshadow_pages)
 {
 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
 	int r;
@@ -514,7 +514,7 @@  int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
 	return -1;
 }
 
-int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
+int kvm_get_shadow_pages(KVMState *kvm, unsigned int *nrshadow_pages)
 {
 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
 	int r;
@@ -531,7 +531,7 @@  int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
 
 #ifdef KVM_CAP_VAPIC
 
-static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
+static int tpr_access_reporting(KVMState *kvm, int vcpu, int enabled)
 {
 	int r;
 	struct kvm_tpr_access_ctl tac = {
@@ -550,12 +550,12 @@  static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
 	return 0;
 }
 
-int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
+int kvm_enable_tpr_access_reporting(KVMState *kvm, int vcpu)
 {
 	return tpr_access_reporting(kvm, vcpu, 1);
 }
 
-int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
+int kvm_disable_tpr_access_reporting(KVMState *kvm, int vcpu)
 {
 	return tpr_access_reporting(kvm, vcpu, 0);
 }
@@ -564,7 +564,7 @@  int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
 
 #ifdef KVM_CAP_EXT_CPUID
 
-static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
+static struct kvm_cpuid2 *try_get_cpuid(KVMState *kvm, int max)
 {
 	struct kvm_cpuid2 *cpuid;
 	int r, size;
@@ -599,7 +599,7 @@  static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
 #define R_ESI 6
 #define R_EDI 7
 
-uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
+uint32_t kvm_get_supported_cpuid(KVMState *kvm, uint32_t function, int reg)
 {
 	struct kvm_cpuid2 *cpuid;
 	int i, max;
@@ -658,7 +658,7 @@  uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
 
 #else
 
-uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
+uint32_t kvm_get_supported_cpuid(KVMState *kvm, uint32_t function, int reg)
 {
 	return -1U;
 }
diff --git a/target-i386/libkvm.h b/target-i386/libkvm.h
index 081e010..26a4597 100644
--- a/target-i386/libkvm.h
+++ b/target-i386/libkvm.h
@@ -23,7 +23,7 @@ 
 #define PAGE_SIZE 4096ul
 #define PAGE_MASK (~(PAGE_SIZE - 1))
 
-int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr);
+int kvm_set_tss_addr(KVMState *kvm, unsigned long addr);
 
 #define smp_wmb()   asm volatile("" ::: "memory")
 
diff --git a/target-ia64/libkvm.c b/target-ia64/libkvm.c
index 48669de..bcd9750 100644
--- a/target-ia64/libkvm.c
+++ b/target-ia64/libkvm.c
@@ -33,7 +33,7 @@ 
 #include <fcntl.h>
 #include <stdlib.h>
 
-int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
 			void **vm_mem)
 {
 	int r;
@@ -45,7 +45,7 @@  int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
 	return 0;
 }
 
-int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
+int kvm_arch_run(struct kvm_run *run,KVMState *kvm, int vcpu)
 {
 	int r = 0;
 
@@ -58,17 +58,17 @@  int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
 	return r;
 }
 
-void kvm_show_code(kvm_context_t kvm, int vcpu)
+void kvm_show_code(KVMState *kvm, int vcpu)
 {
 	fprintf(stderr, "kvm_show_code not supported yet!\n");
 }
 
-void kvm_show_regs(kvm_context_t kvm, int vcpu)
+void kvm_show_regs(KVMState *kvm, int vcpu)
 {
 	fprintf(stderr,"kvm_show_regs not supportted today!\n");
 }
 
-int kvm_create_memory_alias(kvm_context_t kvm,
+int kvm_create_memory_alias(KVMState *kvm,
 			    uint64_t phys_start,
 			    uint64_t len,
 			    uint64_t target_phys)
@@ -76,7 +76,7 @@  int kvm_create_memory_alias(kvm_context_t kvm,
     return 0;
 }
 
-int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
+int kvm_destroy_memory_alias(KVMState *kvm, uint64_t phys_start)
 {
 	return 0;
 }
diff --git a/target-ppc/libkvm.c b/target-ppc/libkvm.c
index 2dfff3b..b5c59e8 100644
--- a/target-ppc/libkvm.c
+++ b/target-ppc/libkvm.c
@@ -23,7 +23,7 @@ 
 #include <stdio.h>
 #include <inttypes.h>
 
-int handle_dcr(struct kvm_run *run,  kvm_context_t kvm, int vcpu)
+int handle_dcr(struct kvm_run *run,  KVMState *kvm, int vcpu)
 {
 	int ret = 0;
 
@@ -39,12 +39,12 @@  int handle_dcr(struct kvm_run *run,  kvm_context_t kvm, int vcpu)
 	return ret;
 }
 
-void kvm_show_code(kvm_context_t kvm, int vcpu)
+void kvm_show_code(KVMState *kvm, int vcpu)
 {
 	fprintf(stderr, "%s: Operation not supported\n", __FUNCTION__);
 }
 
-void kvm_show_regs(kvm_context_t kvm, int vcpu)
+void kvm_show_regs(KVMState *kvm, int vcpu)
 {
 	struct kvm_regs regs;
 	int i;
@@ -72,7 +72,7 @@  void kvm_show_regs(kvm_context_t kvm, int vcpu)
 	fflush(stdout);
 }
 
-int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+int kvm_arch_create(KVMState *kvm, unsigned long phys_mem_bytes,
 			 void **vm_mem)
 {
 	int r;
@@ -84,7 +84,7 @@  int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
 	return 0;
 }
 
-int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu)
+int kvm_arch_run(struct kvm_run *run, KVMState *kvm, int vcpu)
 {
 	int ret = 0;