diff mbox

[v3] kvm tools, vesa: Use guest-mapped memory for framebuffer

Message ID 1307369333-11831-1-git-send-email-penberg@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Pekka Enberg June 6, 2011, 2:08 p.m. UTC
This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
drops the slow MMIO emulation. This speeds up framebuffer accesses
considerably. Please note that this can be optimized even more with the
KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.

Cc: Alexander Graf <agraf@suse.de>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: John Floren <john@jfloren.net>
Cc: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
---
v2 -> v3: Add comment that kvm__register_mem() doesn't support overlapping
          memory regions.

 tools/kvm/hw/vesa.c         |   17 +++++------------
 tools/kvm/include/kvm/kvm.h |    3 +++
 tools/kvm/kvm.c             |   15 ++++++++++-----
 3 files changed, 18 insertions(+), 17 deletions(-)

Comments

Sasha Levin June 6, 2011, 4:06 p.m. UTC | #1
On Mon, 2011-06-06 at 17:08 +0300, Pekka Enberg wrote:
> This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
> drops the slow MMIO emulation. This speeds up framebuffer accesses
> considerably. Please note that this can be optimized even more with the
> KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.
> 
> Cc: Alexander Graf <agraf@suse.de>
> Cc: Cyrill Gorcunov <gorcunov@gmail.com>
> Cc: Ingo Molnar <mingo@elte.hu>
> Cc: John Floren <john@jfloren.net>
> Cc: Sasha Levin <levinsasha928@gmail.com>
> Signed-off-by: Pekka Enberg <penberg@kernel.org>
> ---
> v2 -> v3: Add comment that kvm__register_mem() doesn't support overlapping
>           memory regions.
> 
>  tools/kvm/hw/vesa.c         |   17 +++++------------
>  tools/kvm/include/kvm/kvm.h |    3 +++
>  tools/kvm/kvm.c             |   15 ++++++++++-----
>  3 files changed, 18 insertions(+), 17 deletions(-)
> 
> diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
> index 48d31ce..71322fc 100644
> --- a/tools/kvm/hw/vesa.c
> +++ b/tools/kvm/hw/vesa.c
> @@ -8,6 +8,7 @@
>  #include "kvm/irq.h"
>  #include "kvm/kvm.h"
>  #include "kvm/pci.h"
> +#include <sys/mman.h>
>  
>  #include <sys/types.h>
>  #include <sys/ioctl.h>
> @@ -40,14 +41,6 @@ static struct pci_device_header vesa_pci_device = {
>  	.bar[1]			= VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
>  };
>  
> -static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
> -{
> -	if (!is_write)
> -		return;
> -
> -	fb__write(addr, data, len);
> -}
> -
>  static struct framebuffer vesafb;
>  
>  struct framebuffer *vesa__init(struct kvm *kvm)
> @@ -65,12 +58,12 @@ struct framebuffer *vesa__init(struct kvm *kvm)
>  	vesa_pci_device.bar[0]		= vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
>  	pci__register(&vesa_pci_device, dev);
>  
> -	kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
> -
> -	mem = calloc(1, VESA_MEM_SIZE);
> -	if (!mem)
> +	mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
> +	if (mem == MAP_FAILED)
>  		return NULL;
>  
> +	kvm__register_mem(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);
> +
>  	vesafb = (struct framebuffer) {
>  		.width			= VESA_WIDTH,
>  		.height			= VESA_HEIGHT,
> diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
> index 55551de..17b7557 100644
> --- a/tools/kvm/include/kvm/kvm.h
> +++ b/tools/kvm/include/kvm/kvm.h
> @@ -21,6 +21,8 @@ struct kvm {
>  
>  	int			nrcpus;		/* Number of cpus to run */
>  
> +	u32			mem_slots;	/* for KVM_SET_USER_MEMORY_REGION */
> +
>  	u64			ram_size;
>  	void			*ram_start;
>  
> @@ -49,6 +51,7 @@ void kvm__stop_timer(struct kvm *kvm);
>  void kvm__irq_line(struct kvm *kvm, int irq, int level);
>  bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
>  bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
> +void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr);
>  bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
>  bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
>  void kvm__pause(void);
> diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
> index 54e3203..2c56a79 100644
> --- a/tools/kvm/kvm.c
> +++ b/tools/kvm/kvm.c
> @@ -162,13 +162,18 @@ static bool kvm__cpu_supports_vm(void)
>  	return regs.ecx & (1 << feature);
>  }
>  
> -static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
> +/*
> + * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
> + * memory regions to it. Therefore, be careful if you use this function for
> + * registering memory regions for emulating hardware.
> + */
> +void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
>  {
>  	struct kvm_userspace_memory_region mem;
>  	int ret;
>  
>  	mem = (struct kvm_userspace_memory_region) {
> -		.slot			= slot,
> +		.slot			= kvm->mem_slots++,
>  		.guest_phys_addr	= guest_phys,
>  		.memory_size		= size,
>  		.userspace_addr		= (unsigned long)userspace_addr,
> @@ -200,7 +205,7 @@ void kvm__init_ram(struct kvm *kvm)
>  		phys_size  = kvm->ram_size;
>  		host_mem   = kvm->ram_start;
>  
> -		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
> +		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
>  	} else {
>  		/* First RAM range from zero to the PCI gap: */
>  
> @@ -208,7 +213,7 @@ void kvm__init_ram(struct kvm *kvm)
>  		phys_size  = KVM_32BIT_GAP_START;
>  		host_mem   = kvm->ram_start;
>  
> -		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
> +		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
>  
>  		/* Second RAM range from 4GB to the end of RAM: */
>  
> @@ -216,7 +221,7 @@ void kvm__init_ram(struct kvm *kvm)
>  		phys_size  = kvm->ram_size - phys_size;
>  		host_mem   = kvm->ram_start + phys_start;
>  
> -		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
> +		kvm__register_mem(kvm, phys_start, phys_size, host_mem);

Currently VESA was mapping itself into 0xd0000000 which means that we'll
have an overlap when we're using more than 4GB.

Maybe we should increase our PCI gap size (which starts at 0xf0000000
currently) to include that memory? This way we could prevent an overlap
if we use large amount of RAM and that extra gap won't really hurt
anything if we don't use VESA.

>  	}
>  }
>
Pekka Enberg June 6, 2011, 4:22 p.m. UTC | #2
On Mon, Jun 6, 2011 at 7:06 PM, Sasha Levin <levinsasha928@gmail.com> wrote:
> Currently VESA was mapping itself into 0xd0000000 which means that we'll
> have an overlap when we're using more than 4GB.
>
> Maybe we should increase our PCI gap size (which starts at 0xf0000000
> currently) to include that memory? This way we could prevent an overlap
> if we use large amount of RAM and that extra gap won't really hurt
> anything if we don't use VESA.

Yup. I sent a v4 that does that. I guess that we need to do this
properly as soon as the next driver that needs more hacks comes along
;-).

                         Pekka
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
index 48d31ce..71322fc 100644
--- a/tools/kvm/hw/vesa.c
+++ b/tools/kvm/hw/vesa.c
@@ -8,6 +8,7 @@ 
 #include "kvm/irq.h"
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
+#include <sys/mman.h>
 
 #include <sys/types.h>
 #include <sys/ioctl.h>
@@ -40,14 +41,6 @@  static struct pci_device_header vesa_pci_device = {
 	.bar[1]			= VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
 };
 
-static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
-{
-	if (!is_write)
-		return;
-
-	fb__write(addr, data, len);
-}
-
 static struct framebuffer vesafb;
 
 struct framebuffer *vesa__init(struct kvm *kvm)
@@ -65,12 +58,12 @@  struct framebuffer *vesa__init(struct kvm *kvm)
 	vesa_pci_device.bar[0]		= vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
 	pci__register(&vesa_pci_device, dev);
 
-	kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
-
-	mem = calloc(1, VESA_MEM_SIZE);
-	if (!mem)
+	mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
+	if (mem == MAP_FAILED)
 		return NULL;
 
+	kvm__register_mem(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);
+
 	vesafb = (struct framebuffer) {
 		.width			= VESA_WIDTH,
 		.height			= VESA_HEIGHT,
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index 55551de..17b7557 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -21,6 +21,8 @@  struct kvm {
 
 	int			nrcpus;		/* Number of cpus to run */
 
+	u32			mem_slots;	/* for KVM_SET_USER_MEMORY_REGION */
+
 	u64			ram_size;
 	void			*ram_start;
 
@@ -49,6 +51,7 @@  void kvm__stop_timer(struct kvm *kvm);
 void kvm__irq_line(struct kvm *kvm, int irq, int level);
 bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
 bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
+void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr);
 bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
 bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
 void kvm__pause(void);
diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
index 54e3203..2c56a79 100644
--- a/tools/kvm/kvm.c
+++ b/tools/kvm/kvm.c
@@ -162,13 +162,18 @@  static bool kvm__cpu_supports_vm(void)
 	return regs.ecx & (1 << feature);
 }
 
-static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
+/*
+ * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
+ * memory regions to it. Therefore, be careful if you use this function for
+ * registering memory regions for emulating hardware.
+ */
+void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
 {
 	struct kvm_userspace_memory_region mem;
 	int ret;
 
 	mem = (struct kvm_userspace_memory_region) {
-		.slot			= slot,
+		.slot			= kvm->mem_slots++,
 		.guest_phys_addr	= guest_phys,
 		.memory_size		= size,
 		.userspace_addr		= (unsigned long)userspace_addr,
@@ -200,7 +205,7 @@  void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 	} else {
 		/* First RAM range from zero to the PCI gap: */
 
@@ -208,7 +213,7 @@  void kvm__init_ram(struct kvm *kvm)
 		phys_size  = KVM_32BIT_GAP_START;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 
 		/* Second RAM range from 4GB to the end of RAM: */
 
@@ -216,7 +221,7 @@  void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size - phys_size;
 		host_mem   = kvm->ram_start + phys_start;
 
-		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 	}
 }