diff mbox series

[v2,06/11] KVM: selftests: dirty_log_test: Remove create_vm

Message ID 20201111122636.73346-7-drjones@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Cleanups, take 2 | expand

Commit Message

Andrew Jones Nov. 11, 2020, 12:26 p.m. UTC
Use vm_create_with_vcpus instead of create_vm and do
some minor cleanups around it.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 tools/testing/selftests/kvm/dirty_log_test.c | 56 ++++++--------------
 1 file changed, 16 insertions(+), 40 deletions(-)

Comments

Ben Gardon Nov. 11, 2020, 10:46 p.m. UTC | #1
On Wed, Nov 11, 2020 at 4:27 AM Andrew Jones <drjones@redhat.com> wrote:
>
> Use vm_create_with_vcpus instead of create_vm and do
> some minor cleanups around it.
>
> Signed-off-by: Andrew Jones <drjones@redhat.com>

Reviewed-by: Ben Gardon <bgardon@google.com>

> ---
>  tools/testing/selftests/kvm/dirty_log_test.c | 56 ++++++--------------
>  1 file changed, 16 insertions(+), 40 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
> index 1b7375d2acea..2e0dcd453ef0 100644
> --- a/tools/testing/selftests/kvm/dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_test.c
> @@ -5,8 +5,6 @@
>   * Copyright (C) 2018, Red Hat, Inc.
>   */
>
> -#define _GNU_SOURCE /* for program_invocation_name */
> -
>  #include <stdio.h>
>  #include <stdlib.h>
>  #include <pthread.h>
> @@ -20,6 +18,9 @@
>
>  #define VCPU_ID                                1
>
> +#define DIRTY_MEM_BITS                 30 /* 1G */
> +#define DIRTY_MEM_SIZE                 (1UL << 30)
> +
>  /* The memory slot index to track dirty pages */
>  #define TEST_MEM_SLOT_INDEX            1
>
> @@ -353,27 +354,6 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
>         }
>  }
>
> -static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
> -                               uint64_t extra_mem_pages, void *guest_code)
> -{
> -       struct kvm_vm *vm;
> -       uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
> -
> -       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
> -
> -       vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
> -       kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
> -#ifdef __x86_64__
> -       vm_create_irqchip(vm);
> -#endif
> -       log_mode_create_vm_done(vm);
> -       vm_vcpu_add_default(vm, vcpuid, guest_code);
> -       return vm;
> -}
> -
> -#define DIRTY_MEM_BITS 30 /* 1G */
> -#define PAGE_SHIFT_4K  12
> -
>  struct test_params {
>         unsigned long iterations;
>         unsigned long interval;
> @@ -393,43 +373,39 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                 return;
>         }
>
> +       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
> +
>         /*
>          * We reserve page table for 2 times of extra dirty mem which
> -        * will definitely cover the original (1G+) test range.  Here
> -        * we do the calculation with 4K page size which is the
> -        * smallest so the page number will be enough for all archs
> -        * (e.g., 64K page size guest will need even less memory for
> -        * page tables).
> +        * will definitely cover the original (1G+) test range.
>          */
> -       vm = create_vm(mode, VCPU_ID,
> -                      2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
> -                      guest_code);
> +       vm = vm_create_with_vcpus(mode, 1,
> +                       vm_calc_num_guest_pages(mode, DIRTY_MEM_SIZE * 2),
> +                       0, guest_code, (uint32_t []){ VCPU_ID });
> +
> +       log_mode_create_vm_done(vm);
>
>         guest_page_size = vm_get_page_size(vm);
> +       host_page_size = getpagesize();
> +
>         /*
>          * A little more than 1G of guest page sized pages.  Cover the
>          * case where the size is not aligned to 64 pages.
>          */
> -       guest_num_pages = (1ul << (DIRTY_MEM_BITS -
> -                                  vm_get_page_shift(vm))) + 3;
> -       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
> -
> -       host_page_size = getpagesize();
> +       guest_num_pages = vm_adjust_num_guest_pages(mode,
> +                               (1ul << (DIRTY_MEM_BITS - vm_get_page_shift(vm))) + 3);
>         host_num_pages = vm_num_host_pages(mode, guest_num_pages);
>
>         if (!p->phys_offset) {
> -               guest_test_phys_mem = (vm_get_max_gfn(vm) -
> -                                      guest_num_pages) * guest_page_size;
> +               guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size;
>                 guest_test_phys_mem &= ~(host_page_size - 1);
>         } else {
>                 guest_test_phys_mem = p->phys_offset;
>         }
> -
>  #ifdef __s390x__
>         /* Align to 1M (segment size) */
>         guest_test_phys_mem &= ~((1 << 20) - 1);
>  #endif
> -
>         pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
>
>         bmap = bitmap_alloc(host_num_pages);
> --
> 2.26.2
>
Peter Xu Nov. 12, 2020, 6:20 p.m. UTC | #2
On Wed, Nov 11, 2020 at 01:26:31PM +0100, Andrew Jones wrote:
> +#define DIRTY_MEM_BITS			30 /* 1G */
> +#define DIRTY_MEM_SIZE			(1UL << 30)

Nit: could do s/30/DIRTY_MEM_BITS/

Reviewed-by: Peter Xu <peterx@redhat.com>
Paolo Bonzini Nov. 13, 2020, 4:42 p.m. UTC | #3
On 11/11/20 13:26, Andrew Jones wrote:
> Use vm_create_with_vcpus instead of create_vm and do
> some minor cleanups around it.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>   tools/testing/selftests/kvm/dirty_log_test.c | 56 ++++++--------------
>   1 file changed, 16 insertions(+), 40 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
> index 1b7375d2acea..2e0dcd453ef0 100644
> --- a/tools/testing/selftests/kvm/dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_test.c
> @@ -5,8 +5,6 @@
>    * Copyright (C) 2018, Red Hat, Inc.
>    */
>   
> -#define _GNU_SOURCE /* for program_invocation_name */
> -
>   #include <stdio.h>
>   #include <stdlib.h>
>   #include <pthread.h>
> @@ -20,6 +18,9 @@
>   
>   #define VCPU_ID				1
>   
> +#define DIRTY_MEM_BITS			30 /* 1G */
> +#define DIRTY_MEM_SIZE			(1UL << 30)
> +
>   /* The memory slot index to track dirty pages */
>   #define TEST_MEM_SLOT_INDEX		1
>   
> @@ -353,27 +354,6 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
>   	}
>   }
>   
> -static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
> -				uint64_t extra_mem_pages, void *guest_code)
> -{
> -	struct kvm_vm *vm;
> -	uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
> -
> -	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
> -
> -	vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
> -	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
> -#ifdef __x86_64__
> -	vm_create_irqchip(vm);
> -#endif
> -	log_mode_create_vm_done(vm);
> -	vm_vcpu_add_default(vm, vcpuid, guest_code);
> -	return vm;
> -}
> -
> -#define DIRTY_MEM_BITS 30 /* 1G */
> -#define PAGE_SHIFT_4K  12
> -
>   struct test_params {
>   	unsigned long iterations;
>   	unsigned long interval;
> @@ -393,43 +373,39 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>   		return;
>   	}
>   
> +	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
> +
>   	/*
>   	 * We reserve page table for 2 times of extra dirty mem which
> -	 * will definitely cover the original (1G+) test range.  Here
> -	 * we do the calculation with 4K page size which is the
> -	 * smallest so the page number will be enough for all archs
> -	 * (e.g., 64K page size guest will need even less memory for
> -	 * page tables).
> +	 * will definitely cover the original (1G+) test range.
>   	 */
> -	vm = create_vm(mode, VCPU_ID,
> -		       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
> -		       guest_code);
> +	vm = vm_create_with_vcpus(mode, 1,
> +			vm_calc_num_guest_pages(mode, DIRTY_MEM_SIZE * 2),
> +			0, guest_code, (uint32_t []){ VCPU_ID });
> +
> +	log_mode_create_vm_done(vm);
>   
>   	guest_page_size = vm_get_page_size(vm);
> +	host_page_size = getpagesize();
> +
>   	/*
>   	 * A little more than 1G of guest page sized pages.  Cover the
>   	 * case where the size is not aligned to 64 pages.
>   	 */
> -	guest_num_pages = (1ul << (DIRTY_MEM_BITS -
> -				   vm_get_page_shift(vm))) + 3;
> -	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
> -
> -	host_page_size = getpagesize();
> +	guest_num_pages = vm_adjust_num_guest_pages(mode,
> +				(1ul << (DIRTY_MEM_BITS - vm_get_page_shift(vm))) + 3);
>   	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
>   
>   	if (!p->phys_offset) {
> -		guest_test_phys_mem = (vm_get_max_gfn(vm) -
> -				       guest_num_pages) * guest_page_size;
> +		guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size;
>   		guest_test_phys_mem &= ~(host_page_size - 1);
>   	} else {
>   		guest_test_phys_mem = p->phys_offset;
>   	}
> -
>   #ifdef __s390x__
>   	/* Align to 1M (segment size) */
>   	guest_test_phys_mem &= ~((1 << 20) - 1);
>   #endif
> -
>   	pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
>   
>   	bmap = bitmap_alloc(host_num_pages);
> 

This one (even after fixing conflicts) breaks the dirty ring test.

Paolo
Andrew Jones Nov. 16, 2020, 12:16 p.m. UTC | #4
On Fri, Nov 13, 2020 at 05:42:01PM +0100, Paolo Bonzini wrote:
> > 
> 
> This one (even after fixing conflicts) breaks the dirty ring test.
>

Maybe the problem was patch 3/11 was missing? For me after rebasing
3/11 this patch applied cleaning and worked. The only change I made
was to address Peter's nit.

Thanks,
drew
Paolo Bonzini Nov. 16, 2020, 12:24 p.m. UTC | #5
On 16/11/20 13:16, Andrew Jones wrote:
>> This one (even after fixing conflicts) breaks the dirty ring test.
>>
> Maybe the problem was patch 3/11 was missing? For me after rebasing
> 3/11 this patch applied cleaning and worked. The only change I made
> was to address Peter's nit.

Yes, the conflicts in patch 3 were a bit too large so I dropped it.  It 
wasn't clear from the commit message that it was required later on.

Paolo
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 1b7375d2acea..2e0dcd453ef0 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -5,8 +5,6 @@ 
  * Copyright (C) 2018, Red Hat, Inc.
  */
 
-#define _GNU_SOURCE /* for program_invocation_name */
-
 #include <stdio.h>
 #include <stdlib.h>
 #include <pthread.h>
@@ -20,6 +18,9 @@ 
 
 #define VCPU_ID				1
 
+#define DIRTY_MEM_BITS			30 /* 1G */
+#define DIRTY_MEM_SIZE			(1UL << 30)
+
 /* The memory slot index to track dirty pages */
 #define TEST_MEM_SLOT_INDEX		1
 
@@ -353,27 +354,6 @@  static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
 	}
 }
 
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
-				uint64_t extra_mem_pages, void *guest_code)
-{
-	struct kvm_vm *vm;
-	uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
-
-	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
-	vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
-	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-#ifdef __x86_64__
-	vm_create_irqchip(vm);
-#endif
-	log_mode_create_vm_done(vm);
-	vm_vcpu_add_default(vm, vcpuid, guest_code);
-	return vm;
-}
-
-#define DIRTY_MEM_BITS 30 /* 1G */
-#define PAGE_SHIFT_4K  12
-
 struct test_params {
 	unsigned long iterations;
 	unsigned long interval;
@@ -393,43 +373,39 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 		return;
 	}
 
+	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
 	/*
 	 * We reserve page table for 2 times of extra dirty mem which
-	 * will definitely cover the original (1G+) test range.  Here
-	 * we do the calculation with 4K page size which is the
-	 * smallest so the page number will be enough for all archs
-	 * (e.g., 64K page size guest will need even less memory for
-	 * page tables).
+	 * will definitely cover the original (1G+) test range.
 	 */
-	vm = create_vm(mode, VCPU_ID,
-		       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
-		       guest_code);
+	vm = vm_create_with_vcpus(mode, 1,
+			vm_calc_num_guest_pages(mode, DIRTY_MEM_SIZE * 2),
+			0, guest_code, (uint32_t []){ VCPU_ID });
+
+	log_mode_create_vm_done(vm);
 
 	guest_page_size = vm_get_page_size(vm);
+	host_page_size = getpagesize();
+
 	/*
 	 * A little more than 1G of guest page sized pages.  Cover the
 	 * case where the size is not aligned to 64 pages.
 	 */
-	guest_num_pages = (1ul << (DIRTY_MEM_BITS -
-				   vm_get_page_shift(vm))) + 3;
-	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
-	host_page_size = getpagesize();
+	guest_num_pages = vm_adjust_num_guest_pages(mode,
+				(1ul << (DIRTY_MEM_BITS - vm_get_page_shift(vm))) + 3);
 	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 
 	if (!p->phys_offset) {
-		guest_test_phys_mem = (vm_get_max_gfn(vm) -
-				       guest_num_pages) * guest_page_size;
+		guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size;
 		guest_test_phys_mem &= ~(host_page_size - 1);
 	} else {
 		guest_test_phys_mem = p->phys_offset;
 	}
-
 #ifdef __s390x__
 	/* Align to 1M (segment size) */
 	guest_test_phys_mem &= ~((1 << 20) - 1);
 #endif
-
 	pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
 
 	bmap = bitmap_alloc(host_num_pages);