diff mbox series

[v2,1/3] KVM: selftests: Create source of randomness for guest code.

Message ID 20220817214146.3285106-2-coltonlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Randomize memory access of dirty_log_perf_test | expand

Commit Message

Colton Lewis Aug. 17, 2022, 9:41 p.m. UTC
Create for each vcpu an array of random numbers to be used as a source
of randomness when executing guest code. Randomness should be useful
for approaching more realistic conditions in dirty_log_perf_test.

Create a -r argument to specify a random seed. If no argument
is provided, the seed defaults to the current Unix timestamp.

The random arrays are allocated and filled as part of VM creation. The
additional memory used is one 32-bit number per guest VM page to
ensure one number for each iteration of the inner loop of guest_code.

Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
 .../selftests/kvm/dirty_log_perf_test.c       | 11 ++++-
 .../selftests/kvm/include/perf_test_util.h    |  3 ++
 .../selftests/kvm/lib/perf_test_util.c        | 45 ++++++++++++++++++-
 3 files changed, 55 insertions(+), 4 deletions(-)

Comments

David Matlack Aug. 26, 2022, 9:58 p.m. UTC | #1
On Wed, Aug 17, 2022 at 09:41:44PM +0000, Colton Lewis wrote:
> Create for each vcpu an array of random numbers to be used as a source
> of randomness when executing guest code. Randomness should be useful
> for approaching more realistic conditions in dirty_log_perf_test.
> 
> Create a -r argument to specify a random seed. If no argument
> is provided, the seed defaults to the current Unix timestamp.
> 
> The random arrays are allocated and filled as part of VM creation. The
> additional memory used is one 32-bit number per guest VM page to
> ensure one number for each iteration of the inner loop of guest_code.

nit: I find it helpful to put this in more practical terms as well. e.g.

  The random arrays are allocated and filled as part of VM creation. The
  additional memory used is one 32-bit number per guest VM page (so
  1MiB of additional memory when using the default 1GiB per-vCPU region
  size) to ensure one number for each iteration of the inner loop of
  guest_code.

Speaking of, this commit always allocates the random arrays, even if
they are not used. I think that's probably fine but it deserves to be
called out in the commit description with an explanation of why it is
the way it is.

> 
> Signed-off-by: Colton Lewis <coltonlewis@google.com>
> ---
>  .../selftests/kvm/dirty_log_perf_test.c       | 11 ++++-
>  .../selftests/kvm/include/perf_test_util.h    |  3 ++
>  .../selftests/kvm/lib/perf_test_util.c        | 45 ++++++++++++++++++-
>  3 files changed, 55 insertions(+), 4 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> index f99e39a672d3..362b946019e9 100644
> --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> @@ -132,6 +132,7 @@ struct test_params {
>  	bool partition_vcpu_memory_access;
>  	enum vm_mem_backing_src_type backing_src;
>  	int slots;
> +	uint32_t random_seed;
>  };
>  
>  static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
> @@ -225,6 +226,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>  				 p->slots, p->backing_src,
>  				 p->partition_vcpu_memory_access);
>  
> +	perf_test_set_random_seed(vm, p->random_seed);
>  	perf_test_set_wr_fract(vm, p->wr_fract);
>  
>  	guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
> @@ -352,7 +354,7 @@ static void help(char *name)
>  {
>  	puts("");
>  	printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
> -	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
> +	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
>  	       "[-x memslots]\n", name);
>  	puts("");
>  	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
> @@ -380,6 +382,7 @@ static void help(char *name)
>  	printf(" -v: specify the number of vCPUs to run.\n");
>  	printf(" -o: Overlap guest memory accesses instead of partitioning\n"
>  	       "     them into a separate region of memory for each vCPU.\n");
> +	printf(" -r: specify the starting random seed.\n");
>  	backing_src_help("-s");
>  	printf(" -x: Split the memory region into this number of memslots.\n"
>  	       "     (default: 1)\n");
> @@ -396,6 +399,7 @@ int main(int argc, char *argv[])
>  		.partition_vcpu_memory_access = true,
>  		.backing_src = DEFAULT_VM_MEM_SRC,
>  		.slots = 1,
> +		.random_seed = time(NULL),
>  	};
>  	int opt;
>  
> @@ -406,7 +410,7 @@ int main(int argc, char *argv[])
>  
>  	guest_modes_append_default();
>  
> -	while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:os:x:")) != -1) {
> +	while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:or:s:x:")) != -1) {
>  		switch (opt) {
>  		case 'e':
>  			/* 'e' is for evil. */
> @@ -442,6 +446,9 @@ int main(int argc, char *argv[])
>  		case 'o':
>  			p.partition_vcpu_memory_access = false;
>  			break;
> +		case 'r':
> +			p.random_seed = atoi(optarg);

Putting the random seed in test_params seems unnecessary. This flag
could just set perf_test_args.randome_seed directly. Doing this would
avoid the need for duplicating the time(NULL) initialization, and allow
you to drop perf_test_set_random_seed().

> +			break;
>  		case 's':
>  			p.backing_src = parse_backing_src_type(optarg);
>  			break;
> diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
> index eaa88df0555a..9517956424fc 100644
> --- a/tools/testing/selftests/kvm/include/perf_test_util.h
> +++ b/tools/testing/selftests/kvm/include/perf_test_util.h
> @@ -23,6 +23,7 @@ struct perf_test_vcpu_args {
>  	uint64_t gpa;
>  	uint64_t gva;
>  	uint64_t pages;
> +	vm_vaddr_t random_array;
>  
>  	/* Only used by the host userspace part of the vCPU thread */
>  	struct kvm_vcpu *vcpu;
> @@ -35,6 +36,7 @@ struct perf_test_args {
>  	uint64_t gpa;
>  	uint64_t size;
>  	uint64_t guest_page_size;
> +	uint32_t random_seed;
>  	int wr_fract;
>  
>  	/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
> @@ -52,6 +54,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>  void perf_test_destroy_vm(struct kvm_vm *vm);
>  
>  void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
> +void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
>  
>  void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
>  void perf_test_join_vcpu_threads(int vcpus);
> diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
> index 9618b37c66f7..8d85923acb4e 100644
> --- a/tools/testing/selftests/kvm/lib/perf_test_util.c
> +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
> @@ -70,6 +70,35 @@ void perf_test_guest_code(uint32_t vcpu_idx)
>  	}
>  }
>  
> +void perf_test_alloc_random_arrays(uint32_t nr_vcpus, uint32_t nr_randoms)
> +{
> +	struct perf_test_args *pta = &perf_test_args;
> +
> +	for (uint32_t i = 0; i < nr_vcpus; i++) {
> +		pta->vcpu_args[i].random_array = vm_vaddr_alloc(
> +			pta->vm,
> +			nr_randoms * sizeof(uint32_t),
> +			KVM_UTIL_MIN_VADDR);

> +		pr_debug("Random row %d vaddr: %p.\n", i, (void *)pta->vcpu_args[i].random_array);
> +	}
> +}
> +
> +void perf_test_fill_random_arrays(uint32_t nr_vcpus, uint32_t nr_randoms)
> +{
> +	struct perf_test_args *pta = &perf_test_args;
> +	uint32_t *host_row;

"host_row" is a confusing variable name here since you are no longer
operating on a 2D array. Each vCPU has it's own array.

> +
> +	for (uint32_t i = 0; i < nr_vcpus; i++) {
> +		host_row = addr_gva2hva(pta->vm, pta->vcpu_args[i].random_array);
> +
> +		for (uint32_t j = 0; j < nr_randoms; j++)
> +			host_row[j] = random();
> +
> +		pr_debug("New randoms row %d: %d, %d, %d...\n",
> +			 i, host_row[0], host_row[1], host_row[2]);
> +	}
> +}
> +
>  void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
>  			   struct kvm_vcpu *vcpus[],
>  			   uint64_t vcpu_memory_bytes,
> @@ -120,8 +149,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>  
>  	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
>  
> -	/* By default vCPUs will write to memory. */
> -	pta->wr_fract = 1;
> +	/* Set perf_test_args defaults. */
> +	pta->wr_fract = 100;
> +	pta->random_seed = time(NULL);

Won't this override write the random_seed provided by the test?

>  
>  	/*
>  	 * Snapshot the non-huge page size.  This is used by the guest code to
> @@ -211,6 +241,11 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>  
>  	ucall_init(vm, NULL);
>  
> +	srandom(perf_test_args.random_seed);
> +	pr_info("Random seed: %d\n", perf_test_args.random_seed);
> +	perf_test_alloc_random_arrays(nr_vcpus, vcpu_memory_bytes >> vm->page_shift);
> +	perf_test_fill_random_arrays(nr_vcpus, vcpu_memory_bytes >> vm->page_shift);

I think this is broken if !partition_vcpu_memory_access. nr_randoms
(per-vCPU) should be `nr_vcpus * vcpu_memory_bytes >> vm->page_shift`.

Speaking of, this should probably just be done in
perf_test_setup_vcpus(). That way you can just use vcpu_args->pages for
nr_randoms, and you don't have to add another for-each-vcpu loop.

> +
>  	/* Export the shared variables to the guest. */
>  	sync_global_to_guest(vm, perf_test_args);
>  
> @@ -229,6 +264,12 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
>  	sync_global_to_guest(vm, perf_test_args);
>  }
>  
> +void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
> +{
> +	perf_test_args.random_seed = random_seed;
> +	sync_global_to_guest(vm, perf_test_args);

sync_global_to_guest() is unnecessary here since the guest does not need
to access perf_test_args.random_seed (and I can't imagine it ever will).

That being said, I think you can drop this function (see earlier
comment).

> +}
> +
>  uint64_t __weak perf_test_nested_pages(int nr_vcpus)
>  {
>  	return 0;
> -- 
> 2.37.1.595.g718a3a8f04-goog
>
Ricardo Koller Sept. 1, 2022, 5:37 p.m. UTC | #2
On Wed, Aug 17, 2022 at 09:41:44PM +0000, Colton Lewis wrote:
> Create for each vcpu an array of random numbers to be used as a source
> of randomness when executing guest code. Randomness should be useful
> for approaching more realistic conditions in dirty_log_perf_test.
> 
> Create a -r argument to specify a random seed. If no argument
> is provided, the seed defaults to the current Unix timestamp.
> 
> The random arrays are allocated and filled as part of VM creation. The
> additional memory used is one 32-bit number per guest VM page to
> ensure one number for each iteration of the inner loop of guest_code.
> 
> Signed-off-by: Colton Lewis <coltonlewis@google.com>
> ---
>  .../selftests/kvm/dirty_log_perf_test.c       | 11 ++++-
>  .../selftests/kvm/include/perf_test_util.h    |  3 ++
>  .../selftests/kvm/lib/perf_test_util.c        | 45 ++++++++++++++++++-
>  3 files changed, 55 insertions(+), 4 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> index f99e39a672d3..362b946019e9 100644
> --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> @@ -132,6 +132,7 @@ struct test_params {
>  	bool partition_vcpu_memory_access;
>  	enum vm_mem_backing_src_type backing_src;
>  	int slots;
> +	uint32_t random_seed;
>  };
>  
>  static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
> @@ -225,6 +226,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>  				 p->slots, p->backing_src,
>  				 p->partition_vcpu_memory_access);
>  
> +	perf_test_set_random_seed(vm, p->random_seed);
>  	perf_test_set_wr_fract(vm, p->wr_fract);
>  
>  	guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
> @@ -352,7 +354,7 @@ static void help(char *name)
>  {
>  	puts("");
>  	printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
> -	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
> +	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
>  	       "[-x memslots]\n", name);
>  	puts("");
>  	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
> @@ -380,6 +382,7 @@ static void help(char *name)
>  	printf(" -v: specify the number of vCPUs to run.\n");
>  	printf(" -o: Overlap guest memory accesses instead of partitioning\n"
>  	       "     them into a separate region of memory for each vCPU.\n");
> +	printf(" -r: specify the starting random seed.\n");
>  	backing_src_help("-s");
>  	printf(" -x: Split the memory region into this number of memslots.\n"
>  	       "     (default: 1)\n");
> @@ -396,6 +399,7 @@ int main(int argc, char *argv[])
>  		.partition_vcpu_memory_access = true,
>  		.backing_src = DEFAULT_VM_MEM_SRC,
>  		.slots = 1,
> +		.random_seed = time(NULL),
>  	};
>  	int opt;
>  
> @@ -406,7 +410,7 @@ int main(int argc, char *argv[])
>  
>  	guest_modes_append_default();
>  
> -	while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:os:x:")) != -1) {
> +	while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:or:s:x:")) != -1) {
>  		switch (opt) {
>  		case 'e':
>  			/* 'e' is for evil. */
> @@ -442,6 +446,9 @@ int main(int argc, char *argv[])
>  		case 'o':
>  			p.partition_vcpu_memory_access = false;
>  			break;
> +		case 'r':
> +			p.random_seed = atoi(optarg);
> +			break;
>  		case 's':
>  			p.backing_src = parse_backing_src_type(optarg);
>  			break;
> diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
> index eaa88df0555a..9517956424fc 100644
> --- a/tools/testing/selftests/kvm/include/perf_test_util.h
> +++ b/tools/testing/selftests/kvm/include/perf_test_util.h
> @@ -23,6 +23,7 @@ struct perf_test_vcpu_args {
>  	uint64_t gpa;
>  	uint64_t gva;
>  	uint64_t pages;
> +	vm_vaddr_t random_array;
>  
>  	/* Only used by the host userspace part of the vCPU thread */
>  	struct kvm_vcpu *vcpu;
> @@ -35,6 +36,7 @@ struct perf_test_args {
>  	uint64_t gpa;
>  	uint64_t size;
>  	uint64_t guest_page_size;
> +	uint32_t random_seed;
>  	int wr_fract;
>  
>  	/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
> @@ -52,6 +54,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>  void perf_test_destroy_vm(struct kvm_vm *vm);
>  
>  void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
> +void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
>  
>  void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
>  void perf_test_join_vcpu_threads(int vcpus);
> diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
> index 9618b37c66f7..8d85923acb4e 100644
> --- a/tools/testing/selftests/kvm/lib/perf_test_util.c
> +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
> @@ -70,6 +70,35 @@ void perf_test_guest_code(uint32_t vcpu_idx)
>  	}
>  }
>  
> +void perf_test_alloc_random_arrays(uint32_t nr_vcpus, uint32_t nr_randoms)
> +{
> +	struct perf_test_args *pta = &perf_test_args;
> +
> +	for (uint32_t i = 0; i < nr_vcpus; i++) {
> +		pta->vcpu_args[i].random_array = vm_vaddr_alloc(
> +			pta->vm,
> +			nr_randoms * sizeof(uint32_t),
> +			KVM_UTIL_MIN_VADDR);
> +		pr_debug("Random row %d vaddr: %p.\n", i, (void *)pta->vcpu_args[i].random_array);
> +	}
> +}
> +
> +void perf_test_fill_random_arrays(uint32_t nr_vcpus, uint32_t nr_randoms)
> +{
> +	struct perf_test_args *pta = &perf_test_args;
> +	uint32_t *host_row;
> +
> +	for (uint32_t i = 0; i < nr_vcpus; i++) {
> +		host_row = addr_gva2hva(pta->vm, pta->vcpu_args[i].random_array);
> +
> +		for (uint32_t j = 0; j < nr_randoms; j++)
> +			host_row[j] = random();
> +
> +		pr_debug("New randoms row %d: %d, %d, %d...\n",
> +			 i, host_row[0], host_row[1], host_row[2]);
> +	}
> +}
> +
>  void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
>  			   struct kvm_vcpu *vcpus[],
>  			   uint64_t vcpu_memory_bytes,
> @@ -120,8 +149,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>  
>  	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
>  
> -	/* By default vCPUs will write to memory. */
> -	pta->wr_fract = 1;
> +	/* Set perf_test_args defaults. */
> +	pta->wr_fract = 100;

This is not needed in this commit. It's changing the behavior at the
current commit, as it's now defaulting to 0% reads (using the current
check in guest_code()).

> +	pta->random_seed = time(NULL);
>  
>  	/*
>  	 * Snapshot the non-huge page size.  This is used by the guest code to
> @@ -211,6 +241,11 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>  
>  	ucall_init(vm, NULL);
>  
> +	srandom(perf_test_args.random_seed);
> +	pr_info("Random seed: %d\n", perf_test_args.random_seed);
> +	perf_test_alloc_random_arrays(nr_vcpus, vcpu_memory_bytes >> vm->page_shift);
> +	perf_test_fill_random_arrays(nr_vcpus, vcpu_memory_bytes >> vm->page_shift);
> +
>  	/* Export the shared variables to the guest. */
>  	sync_global_to_guest(vm, perf_test_args);
>  
> @@ -229,6 +264,12 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
>  	sync_global_to_guest(vm, perf_test_args);
>  }
>  
> +void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
> +{
> +	perf_test_args.random_seed = random_seed;
> +	sync_global_to_guest(vm, perf_test_args);
> +}
> +
>  uint64_t __weak perf_test_nested_pages(int nr_vcpus)
>  {
>  	return 0;
> -- 
> 2.37.1.595.g718a3a8f04-goog
>
Ricardo Koller Sept. 1, 2022, 5:52 p.m. UTC | #3
On Tue, Aug 30, 2022 at 07:01:57PM +0000, Colton Lewis wrote:
> David Matlack <dmatlack@google.com> writes:
> 
> > On Wed, Aug 17, 2022 at 09:41:44PM +0000, Colton Lewis wrote:
> > > Create for each vcpu an array of random numbers to be used as a source
> > > of randomness when executing guest code. Randomness should be useful
> > > for approaching more realistic conditions in dirty_log_perf_test.
> 
> > > Create a -r argument to specify a random seed. If no argument
> > > is provided, the seed defaults to the current Unix timestamp.
> 
> > > The random arrays are allocated and filled as part of VM creation. The
> > > additional memory used is one 32-bit number per guest VM page to
> > > ensure one number for each iteration of the inner loop of guest_code.
> 
> > nit: I find it helpful to put this in more practical terms as well. e.g.
> 
> >    The random arrays are allocated and filled as part of VM creation. The
> >    additional memory used is one 32-bit number per guest VM page (so
> >    1MiB of additional memory when using the default 1GiB per-vCPU region
> >    size) to ensure one number for each iteration of the inner loop of
> >    guest_code.
> 
> > Speaking of, this commit always allocates the random arrays, even if
> > they are not used. I think that's probably fine but it deserves to be
> > called out in the commit description with an explanation of why it is
> > the way it is.
> 
> 
> I'll add a concrete example and explain always allocating.
> 
> Technically, the random array will always be accessed even if it never
> changes the code path when write_percent = 0 or write_percent =
> 100. Always allocating avoids extra code that adds complexity for no
> benefit.
> 
> > > @@ -442,6 +446,9 @@ int main(int argc, char *argv[])
> > >   		case 'o':
> > >   			p.partition_vcpu_memory_access = false;
> > >   			break;
> > > +		case 'r':
> > > +			p.random_seed = atoi(optarg);
> 
> > Putting the random seed in test_params seems unnecessary. This flag
> > could just set perf_test_args.randome_seed directly. Doing this would
> > avoid the need for duplicating the time(NULL) initialization, and allow
> > you to drop perf_test_set_random_seed().
> 
> 
> I understand there is some redundancy in this approach and had
> originally done what you suggest. My reason for changing was based on
> the consideration that perf_test_util.c is library code that is used for
> other tests and could be used for more in the future, so it should
> always initialize everything in perf_test_args rather than rely on the
> test to do it. This is what is done for wr_fract (renamed write_percent
> later in this patch). perf_test_set_random_seed is there for interface
> consistency with the other perf_test_set* functions.
> 
> But per the point below, moving random generation to VM creation means
> we are dependent on the seed being set before then. So I do need a
> change here, but I'd rather keep the redundancy and
> perf_test_set_random_seed.
> 
> > > +void perf_test_fill_random_arrays(uint32_t nr_vcpus, uint32_t
> > > nr_randoms)
> > > +{
> > > +	struct perf_test_args *pta = &perf_test_args;
> > > +	uint32_t *host_row;
> 
> > "host_row" is a confusing variable name here since you are no longer
> > operating on a 2D array. Each vCPU has it's own array.
> 
> 
> Agreed.
> 
> > > @@ -120,8 +149,9 @@ struct kvm_vm *perf_test_create_vm(enum
> > > vm_guest_mode mode, int nr_vcpus,
> 
> > >   	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
> 
> > > -	/* By default vCPUs will write to memory. */
> > > -	pta->wr_fract = 1;
> > > +	/* Set perf_test_args defaults. */
> > > +	pta->wr_fract = 100;
> > > +	pta->random_seed = time(NULL);
> 
> > Won't this override write the random_seed provided by the test?
> 
> 
> Yes. I had thought I accounted for that by setting random_seed later in
> run_test, but now realize that has no effect because I moved the
> generation of all the random numbers to happen before then.
> 
> 
> > >   	/*
> > >   	 * Snapshot the non-huge page size.  This is used by the guest code to
> > > @@ -211,6 +241,11 @@ struct kvm_vm *perf_test_create_vm(enum
> > > vm_guest_mode mode, int nr_vcpus,
> 
> > >   	ucall_init(vm, NULL);
> 
> > > +	srandom(perf_test_args.random_seed);
> > > +	pr_info("Random seed: %d\n", perf_test_args.random_seed);
> > > +	perf_test_alloc_random_arrays(nr_vcpus, vcpu_memory_bytes >>
> > > vm->page_shift);
> > > +	perf_test_fill_random_arrays(nr_vcpus, vcpu_memory_bytes >>
> > > vm->page_shift);
> 
> > I think this is broken if !partition_vcpu_memory_access. nr_randoms
> > (per-vCPU) should be `nr_vcpus * vcpu_memory_bytes >> vm->page_shift`.
> 
> 
> Agree it will break then and should not. But allocating that many more
> random numbers may eat too much memory. In a test with 64 vcpus, it would
> try to allocate 64x64 times as many random numbers. I'll try it but may
> need something different in that case.

You might want to reconsider the idea of using a random number generator
inside the guest. IRC the reasons against it were: quality of the random
numbers, and that some random generators use floating-point numbers. I
don't think the first one is a big issue. The second one might be an
issue if we want to generate non-uniform distributions (e.g., poisson);
but not a problem for now.

> 
> > Speaking of, this should probably just be done in
> > perf_test_setup_vcpus(). That way you can just use vcpu_args->pages for
> > nr_randoms, and you don't have to add another for-each-vcpu loop.
> 
> 
> Agreed.
> 
> > > +
> > >   	/* Export the shared variables to the guest. */
> > >   	sync_global_to_guest(vm, perf_test_args);
> 
> > > @@ -229,6 +264,12 @@ void perf_test_set_wr_fract(struct kvm_vm *vm,
> > > int wr_fract)
> > >   	sync_global_to_guest(vm, perf_test_args);
> > >   }
> 
> > > +void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
> > > +{
> > > +	perf_test_args.random_seed = random_seed;
> > > +	sync_global_to_guest(vm, perf_test_args);
> 
> > sync_global_to_guest() is unnecessary here since the guest does not need
> > to access perf_test_args.random_seed (and I can't imagine it ever will).
> 
> > That being said, I think you can drop this function (see earlier
> > comment).
> 
> 
> See earlier response about consistency.
Sean Christopherson Sept. 1, 2022, 6:22 p.m. UTC | #4
On Thu, Sep 01, 2022, Ricardo Koller wrote:
> On Tue, Aug 30, 2022 at 07:01:57PM +0000, Colton Lewis wrote:
> > David Matlack <dmatlack@google.com> writes:
> > > I think this is broken if !partition_vcpu_memory_access. nr_randoms
> > > (per-vCPU) should be `nr_vcpus * vcpu_memory_bytes >> vm->page_shift`.
> > 
> > 
> > Agree it will break then and should not. But allocating that many more
> > random numbers may eat too much memory. In a test with 64 vcpus, it would
> > try to allocate 64x64 times as many random numbers. I'll try it but may
> > need something different in that case.
> 
> You might want to reconsider the idea of using a random number generator
> inside the guest. IRC the reasons against it were: quality of the random
> numbers, and that some random generators use floating-point numbers. I
> don't think the first one is a big issue. The second one might be an
> issue if we want to generate non-uniform distributions (e.g., poisson);
> but not a problem for now.

I'm pretty I had coded up a pseudo-RNG framework for selftests at one point, but
I cant find the code in my morass of branches and stashes :-(
 
Whatever we do, make sure the randomness and thus any failures are easily
reproducible, i.e. the RNG needs to be seeded pseudo-RNG, not fully random.
Andrew Jones Sept. 2, 2022, 11:20 a.m. UTC | #5
On Thu, Sep 01, 2022 at 06:22:05PM +0000, Sean Christopherson wrote:
> On Thu, Sep 01, 2022, Ricardo Koller wrote:
> > On Tue, Aug 30, 2022 at 07:01:57PM +0000, Colton Lewis wrote:
> > > David Matlack <dmatlack@google.com> writes:
> > > > I think this is broken if !partition_vcpu_memory_access. nr_randoms
> > > > (per-vCPU) should be `nr_vcpus * vcpu_memory_bytes >> vm->page_shift`.
> > > 
> > > 
> > > Agree it will break then and should not. But allocating that many more
> > > random numbers may eat too much memory. In a test with 64 vcpus, it would
> > > try to allocate 64x64 times as many random numbers. I'll try it but may
> > > need something different in that case.
> > 
> > You might want to reconsider the idea of using a random number generator
> > inside the guest. IRC the reasons against it were: quality of the random
> > numbers, and that some random generators use floating-point numbers. I
> > don't think the first one is a big issue. The second one might be an
> > issue if we want to generate non-uniform distributions (e.g., poisson);
> > but not a problem for now.
> 
> I'm pretty I had coded up a pseudo-RNG framework for selftests at one point, but
> I cant find the code in my morass of branches and stashes :-(

Here's an option that was once proposed by Alex Bennee for kvm-unit-tests.
(Someday we need to revisit that MTTCG series...)

https://gitlab.com/rhdrjones/kvm-unit-tests/-/commit/2fc51790577c9f91214f72ef51ee5d0e8bbb1b7e

Thanks,
drew

>  
> Whatever we do, make sure the randomness and thus any failures are easily
> reproducible, i.e. the RNG needs to be seeded pseudo-RNG, not fully random.
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index f99e39a672d3..362b946019e9 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -132,6 +132,7 @@  struct test_params {
 	bool partition_vcpu_memory_access;
 	enum vm_mem_backing_src_type backing_src;
 	int slots;
+	uint32_t random_seed;
 };
 
 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
@@ -225,6 +226,7 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 				 p->slots, p->backing_src,
 				 p->partition_vcpu_memory_access);
 
+	perf_test_set_random_seed(vm, p->random_seed);
 	perf_test_set_wr_fract(vm, p->wr_fract);
 
 	guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
@@ -352,7 +354,7 @@  static void help(char *name)
 {
 	puts("");
 	printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
-	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
+	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
 	       "[-x memslots]\n", name);
 	puts("");
 	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
@@ -380,6 +382,7 @@  static void help(char *name)
 	printf(" -v: specify the number of vCPUs to run.\n");
 	printf(" -o: Overlap guest memory accesses instead of partitioning\n"
 	       "     them into a separate region of memory for each vCPU.\n");
+	printf(" -r: specify the starting random seed.\n");
 	backing_src_help("-s");
 	printf(" -x: Split the memory region into this number of memslots.\n"
 	       "     (default: 1)\n");
@@ -396,6 +399,7 @@  int main(int argc, char *argv[])
 		.partition_vcpu_memory_access = true,
 		.backing_src = DEFAULT_VM_MEM_SRC,
 		.slots = 1,
+		.random_seed = time(NULL),
 	};
 	int opt;
 
@@ -406,7 +410,7 @@  int main(int argc, char *argv[])
 
 	guest_modes_append_default();
 
-	while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:os:x:")) != -1) {
+	while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:or:s:x:")) != -1) {
 		switch (opt) {
 		case 'e':
 			/* 'e' is for evil. */
@@ -442,6 +446,9 @@  int main(int argc, char *argv[])
 		case 'o':
 			p.partition_vcpu_memory_access = false;
 			break;
+		case 'r':
+			p.random_seed = atoi(optarg);
+			break;
 		case 's':
 			p.backing_src = parse_backing_src_type(optarg);
 			break;
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index eaa88df0555a..9517956424fc 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -23,6 +23,7 @@  struct perf_test_vcpu_args {
 	uint64_t gpa;
 	uint64_t gva;
 	uint64_t pages;
+	vm_vaddr_t random_array;
 
 	/* Only used by the host userspace part of the vCPU thread */
 	struct kvm_vcpu *vcpu;
@@ -35,6 +36,7 @@  struct perf_test_args {
 	uint64_t gpa;
 	uint64_t size;
 	uint64_t guest_page_size;
+	uint32_t random_seed;
 	int wr_fract;
 
 	/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
@@ -52,6 +54,7 @@  struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 void perf_test_destroy_vm(struct kvm_vm *vm);
 
 void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
+void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
 
 void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
 void perf_test_join_vcpu_threads(int vcpus);
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 9618b37c66f7..8d85923acb4e 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -70,6 +70,35 @@  void perf_test_guest_code(uint32_t vcpu_idx)
 	}
 }
 
+void perf_test_alloc_random_arrays(uint32_t nr_vcpus, uint32_t nr_randoms)
+{
+	struct perf_test_args *pta = &perf_test_args;
+
+	for (uint32_t i = 0; i < nr_vcpus; i++) {
+		pta->vcpu_args[i].random_array = vm_vaddr_alloc(
+			pta->vm,
+			nr_randoms * sizeof(uint32_t),
+			KVM_UTIL_MIN_VADDR);
+		pr_debug("Random row %d vaddr: %p.\n", i, (void *)pta->vcpu_args[i].random_array);
+	}
+}
+
+void perf_test_fill_random_arrays(uint32_t nr_vcpus, uint32_t nr_randoms)
+{
+	struct perf_test_args *pta = &perf_test_args;
+	uint32_t *host_row;
+
+	for (uint32_t i = 0; i < nr_vcpus; i++) {
+		host_row = addr_gva2hva(pta->vm, pta->vcpu_args[i].random_array);
+
+		for (uint32_t j = 0; j < nr_randoms; j++)
+			host_row[j] = random();
+
+		pr_debug("New randoms row %d: %d, %d, %d...\n",
+			 i, host_row[0], host_row[1], host_row[2]);
+	}
+}
+
 void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
 			   struct kvm_vcpu *vcpus[],
 			   uint64_t vcpu_memory_bytes,
@@ -120,8 +149,9 @@  struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 
 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
-	/* By default vCPUs will write to memory. */
-	pta->wr_fract = 1;
+	/* Set perf_test_args defaults. */
+	pta->wr_fract = 100;
+	pta->random_seed = time(NULL);
 
 	/*
 	 * Snapshot the non-huge page size.  This is used by the guest code to
@@ -211,6 +241,11 @@  struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 
 	ucall_init(vm, NULL);
 
+	srandom(perf_test_args.random_seed);
+	pr_info("Random seed: %d\n", perf_test_args.random_seed);
+	perf_test_alloc_random_arrays(nr_vcpus, vcpu_memory_bytes >> vm->page_shift);
+	perf_test_fill_random_arrays(nr_vcpus, vcpu_memory_bytes >> vm->page_shift);
+
 	/* Export the shared variables to the guest. */
 	sync_global_to_guest(vm, perf_test_args);
 
@@ -229,6 +264,12 @@  void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
 	sync_global_to_guest(vm, perf_test_args);
 }
 
+void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
+{
+	perf_test_args.random_seed = random_seed;
+	sync_global_to_guest(vm, perf_test_args);
+}
+
 uint64_t __weak perf_test_nested_pages(int nr_vcpus)
 {
 	return 0;