diff mbox series

[5/8] KVM: selftests: Introduce vm_create_[default_]_with_vcpus

Message ID 20201110204802.417521-6-drjones@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Cleanups, take 2 | expand

Commit Message

Andrew Jones Nov. 10, 2020, 8:47 p.m. UTC
Introduce new vm_create variants that also takes a number of vcpus,
an amount of per-vcpu pages, and optionally a list of vcpuids. These
variants will create default VMs with enough additional pages to
cover the vcpu stacks, per-vcpu pages, and pagetable pages for all.
The new 'default' variant uses VM_MODE_DEFAULT, whereas the other
new variant accepts the mode as a parameter.

Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 .../testing/selftests/kvm/include/kvm_util.h  | 10 ++++++
 tools/testing/selftests/kvm/lib/kvm_util.c    | 35 ++++++++++++++++---
 2 files changed, 40 insertions(+), 5 deletions(-)

Comments

Ben Gardon Nov. 10, 2020, 10:13 p.m. UTC | #1
On Tue, Nov 10, 2020 at 12:48 PM Andrew Jones <drjones@redhat.com> wrote:
>
> Introduce new vm_create variants that also takes a number of vcpus,
> an amount of per-vcpu pages, and optionally a list of vcpuids. These
> variants will create default VMs with enough additional pages to
> cover the vcpu stacks, per-vcpu pages, and pagetable pages for all.
> The new 'default' variant uses VM_MODE_DEFAULT, whereas the other
> new variant accepts the mode as a parameter.
>
> Reviewed-by: Peter Xu <peterx@redhat.com>
> Reviewed-by: Ben Gardon <bgardon@google.com>
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  .../testing/selftests/kvm/include/kvm_util.h  | 10 ++++++
>  tools/testing/selftests/kvm/lib/kvm_util.c    | 35 ++++++++++++++++---
>  2 files changed, 40 insertions(+), 5 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
> index 48b48a0014e2..bc8db80309f5 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util.h
> @@ -261,6 +261,16 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
>  struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
>                                  void *guest_code);
>
> +/* Same as vm_create_default, but can be used for more than one vcpu */
> +struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
> +                                           uint32_t num_percpu_pages, void *guest_code,
> +                                           uint32_t vcpuids[]);
> +
> +/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
> +struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
> +                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
> +                                   void *guest_code, uint32_t vcpuids[]);
> +
>  /*
>   * Adds a vCPU with reasonable defaults (e.g. a stack)
>   *
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index a7e28e33fc3b..b31a4e988a5d 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -272,8 +272,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
>         return vm;
>  }
>
> -struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
> -                                void *guest_code)
> +struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
> +                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
> +                                   void *guest_code, uint32_t vcpuids[])
>  {
>         /* The maximum page table size for a memory region will be when the
>          * smallest pages are used. Considering each page contains x page
> @@ -281,10 +282,18 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
>          * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
>          * than N/x*2.
>          */
> -       uint64_t extra_pg_pages = (extra_mem_pages / PTES_PER_MIN_PAGE) * 2;
> +       uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
> +       uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
> +       uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
>         struct kvm_vm *vm;
> +       int i;
> +
> +       TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
> +                   "nr_vcpus = %d too large for host, max-vcpus = %d",
> +                   nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
>
> -       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
> +       pages = vm_adjust_num_guest_pages(mode, pages);
> +       vm = vm_create(mode, pages, O_RDWR);

I think this will substantially change the behavior of this function
to create a much larger memslot 0. In the existing code, the memslot
created in vm_create is just sized large enough for the stacks and
page tables. Another memslot is then created for the memory under
test.

I think separating the memslots is a good arrangement because it
limits the extent to which kernel bugs could screw up the test and
makes it easier to debug if you're testing something like dirty
logging. It's also useful if you wanted to back the memslot under test
with a different kind of memory from memslot 0. e.g. memslot 0 could
use anonymous pages and the slot(s) under test could use hugetlbfs.
You might also want multiple memslots to assign them to different NUMA
nodes.

Is that change intentional? I would suggest not adding vcpu_pages to
the calculation for pages above, similar to what it was before:
uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_pg_pages;

>
>         kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
>
> @@ -292,11 +301,27 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
>         vm_create_irqchip(vm);
>  #endif
>
> -       vm_vcpu_add_default(vm, vcpuid, guest_code);
> +       for (i = 0; i < nr_vcpus; ++i)
> +               vm_vcpu_add_default(vm, vcpuids ? vcpuids[i] : i, guest_code);
>
>         return vm;
>  }
>
> +struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
> +                                           uint32_t num_percpu_pages, void *guest_code,
> +                                           uint32_t vcpuids[])
> +{
> +       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
> +                                   num_percpu_pages, guest_code, vcpuids);
> +}
> +
> +struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
> +                                void *guest_code)
> +{
> +       return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
> +                                           (uint32_t []){ vcpuid });
> +}
> +
>  /*
>   * VM Restart
>   *
> --
> 2.26.2
>
Andrew Jones Nov. 11, 2020, 8:29 a.m. UTC | #2
On Tue, Nov 10, 2020 at 02:13:14PM -0800, Ben Gardon wrote:
> On Tue, Nov 10, 2020 at 12:48 PM Andrew Jones <drjones@redhat.com> wrote:
> >
> > Introduce new vm_create variants that also takes a number of vcpus,
> > an amount of per-vcpu pages, and optionally a list of vcpuids. These
> > variants will create default VMs with enough additional pages to
> > cover the vcpu stacks, per-vcpu pages, and pagetable pages for all.
> > The new 'default' variant uses VM_MODE_DEFAULT, whereas the other
> > new variant accepts the mode as a parameter.
> >
> > Reviewed-by: Peter Xu <peterx@redhat.com>
> > Reviewed-by: Ben Gardon <bgardon@google.com>
> > Signed-off-by: Andrew Jones <drjones@redhat.com>
> > ---
> >  .../testing/selftests/kvm/include/kvm_util.h  | 10 ++++++
> >  tools/testing/selftests/kvm/lib/kvm_util.c    | 35 ++++++++++++++++---
> >  2 files changed, 40 insertions(+), 5 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
> > index 48b48a0014e2..bc8db80309f5 100644
> > --- a/tools/testing/selftests/kvm/include/kvm_util.h
> > +++ b/tools/testing/selftests/kvm/include/kvm_util.h
> > @@ -261,6 +261,16 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> >  struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
> >                                  void *guest_code);
> >
> > +/* Same as vm_create_default, but can be used for more than one vcpu */
> > +struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
> > +                                           uint32_t num_percpu_pages, void *guest_code,
> > +                                           uint32_t vcpuids[]);
> > +
> > +/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
> > +struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
> > +                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
> > +                                   void *guest_code, uint32_t vcpuids[]);
> > +
> >  /*
> >   * Adds a vCPU with reasonable defaults (e.g. a stack)
> >   *
> > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > index a7e28e33fc3b..b31a4e988a5d 100644
> > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > @@ -272,8 +272,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
> >         return vm;
> >  }
> >
> > -struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
> > -                                void *guest_code)
> > +struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
> > +                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
> > +                                   void *guest_code, uint32_t vcpuids[])
> >  {
> >         /* The maximum page table size for a memory region will be when the
> >          * smallest pages are used. Considering each page contains x page
> > @@ -281,10 +282,18 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
> >          * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
> >          * than N/x*2.
> >          */
> > -       uint64_t extra_pg_pages = (extra_mem_pages / PTES_PER_MIN_PAGE) * 2;
> > +       uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
> > +       uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
> > +       uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
> >         struct kvm_vm *vm;
> > +       int i;
> > +
> > +       TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
> > +                   "nr_vcpus = %d too large for host, max-vcpus = %d",
> > +                   nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
> >
> > -       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
> > +       pages = vm_adjust_num_guest_pages(mode, pages);
> > +       vm = vm_create(mode, pages, O_RDWR);
> 
> I think this will substantially change the behavior of this function
> to create a much larger memslot 0. In the existing code, the memslot
> created in vm_create is just sized large enough for the stacks and
> page tables. Another memslot is then created for the memory under
> test.
> 
> I think separating the memslots is a good arrangement because it
> limits the extent to which kernel bugs could screw up the test and
> makes it easier to debug if you're testing something like dirty
> logging. It's also useful if you wanted to back the memslot under test
> with a different kind of memory from memslot 0. e.g. memslot 0 could
> use anonymous pages and the slot(s) under test could use hugetlbfs.
> You might also want multiple memslots to assign them to different NUMA
> nodes.
> 
> Is that change intentional? I would suggest not adding vcpu_pages to
> the calculation for pages above, similar to what it was before:
> uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_pg_pages;

It was definitely intentional to create API that allows us to allocate
per-vcpu pages for the VM created with vcpus. But, to do what you want,
we just need to change the

 vm = vm_create_with_vcpus(mode, vcpus, 0,
                           vcpu_memory_bytes / perf_test_args.guest_page_size,
                           guest_code, NULL);

line in perf_test_util.h in the "KVM: selftests: Use vm_create_with_vcpus
in create_vm" patch to


 vm = vm_create_with_vcpus(mode, vcpus,
                           (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
                           0, guest_code, NULL);


Notice how that moves the per-vcpu pages to the extra_mem_pages, which
only get used to calculate page table pages.

I'll do that in v2.

Thanks,
drew
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 48b48a0014e2..bc8db80309f5 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -261,6 +261,16 @@  vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
 				 void *guest_code);
 
+/* Same as vm_create_default, but can be used for more than one vcpu */
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+					    uint32_t num_percpu_pages, void *guest_code,
+					    uint32_t vcpuids[]);
+
+/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+				    uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+				    void *guest_code, uint32_t vcpuids[]);
+
 /*
  * Adds a vCPU with reasonable defaults (e.g. a stack)
  *
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index a7e28e33fc3b..b31a4e988a5d 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -272,8 +272,9 @@  struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
 	return vm;
 }
 
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
-				 void *guest_code)
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+				    uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+				    void *guest_code, uint32_t vcpuids[])
 {
 	/* The maximum page table size for a memory region will be when the
 	 * smallest pages are used. Considering each page contains x page
@@ -281,10 +282,18 @@  struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
 	 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
 	 * than N/x*2.
 	 */
-	uint64_t extra_pg_pages = (extra_mem_pages / PTES_PER_MIN_PAGE) * 2;
+	uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+	uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+	uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
 	struct kvm_vm *vm;
+	int i;
+
+	TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
+		    "nr_vcpus = %d too large for host, max-vcpus = %d",
+		    nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
 
-	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+	pages = vm_adjust_num_guest_pages(mode, pages);
+	vm = vm_create(mode, pages, O_RDWR);
 
 	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
 
@@ -292,11 +301,27 @@  struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
 	vm_create_irqchip(vm);
 #endif
 
-	vm_vcpu_add_default(vm, vcpuid, guest_code);
+	for (i = 0; i < nr_vcpus; ++i)
+		vm_vcpu_add_default(vm, vcpuids ? vcpuids[i] : i, guest_code);
 
 	return vm;
 }
 
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+					    uint32_t num_percpu_pages, void *guest_code,
+					    uint32_t vcpuids[])
+{
+	return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
+				    num_percpu_pages, guest_code, vcpuids);
+}
+
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+				 void *guest_code)
+{
+	return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
+					    (uint32_t []){ vcpuid });
+}
+
 /*
  * VM Restart
  *