diff mbox series

[27/34] KVM: selftests: Introduce VM "shape" to allow tests to specify the VM type

Message ID 20231105163040.14904-28-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: guest_memfd() and per-page attributes | expand

Commit Message

Paolo Bonzini Nov. 5, 2023, 4:30 p.m. UTC
From: Sean Christopherson <seanjc@google.com>

Add a "vm_shape" structure to encapsulate the selftests-defined "mode",
along with the KVM-defined "type" for use when creating a new VM.  "mode"
tracks physical and virtual address properties, as well as the preferred
backing memory type, while "type" corresponds to the VM type.

Taking the VM type will allow adding tests for KVM_CREATE_GUEST_MEMFD,
a.k.a. guest private memory, without needing an entirely separate set of
helpers.  Guest private memory is effectively usable only by confidential
VM types, and it's expected that x86 will double down and require unique
VM types for TDX and SNP guests.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20231027182217.3615211-30-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 tools/testing/selftests/kvm/dirty_log_test.c  |  2 +-
 .../selftests/kvm/include/kvm_util_base.h     | 54 +++++++++++++++----
 .../selftests/kvm/kvm_page_table_test.c       |  2 +-
 tools/testing/selftests/kvm/lib/kvm_util.c    | 43 +++++++--------
 tools/testing/selftests/kvm/lib/memstress.c   |  3 +-
 .../kvm/x86_64/ucna_injection_test.c          |  2 +-
 6 files changed, 72 insertions(+), 34 deletions(-)

Comments

Fuad Tabba Nov. 6, 2023, 11:54 a.m. UTC | #1
On Sun, Nov 5, 2023 at 4:34 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> From: Sean Christopherson <seanjc@google.com>
>
> Add a "vm_shape" structure to encapsulate the selftests-defined "mode",
> along with the KVM-defined "type" for use when creating a new VM.  "mode"
> tracks physical and virtual address properties, as well as the preferred
> backing memory type, while "type" corresponds to the VM type.
>
> Taking the VM type will allow adding tests for KVM_CREATE_GUEST_MEMFD,
> a.k.a. guest private memory, without needing an entirely separate set of
> helpers.  Guest private memory is effectively usable only by confidential
> VM types, and it's expected that x86 will double down and require unique
> VM types for TDX and SNP guests.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> Message-Id: <20231027182217.3615211-30-seanjc@google.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---

nit: as in a prior selftest commit messages, references in the commit
message to guest _private_ memory. Should these be changed to just
guest memory?

Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>

Cheers,
/fuad

>  tools/testing/selftests/kvm/dirty_log_test.c  |  2 +-
>  .../selftests/kvm/include/kvm_util_base.h     | 54 +++++++++++++++----
>  .../selftests/kvm/kvm_page_table_test.c       |  2 +-
>  tools/testing/selftests/kvm/lib/kvm_util.c    | 43 +++++++--------
>  tools/testing/selftests/kvm/lib/memstress.c   |  3 +-
>  .../kvm/x86_64/ucna_injection_test.c          |  2 +-
>  6 files changed, 72 insertions(+), 34 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
> index 936f3a8d1b83..6cbecf499767 100644
> --- a/tools/testing/selftests/kvm/dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_test.c
> @@ -699,7 +699,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
>
>         pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
>
> -       vm = __vm_create(mode, 1, extra_mem_pages);
> +       vm = __vm_create(VM_SHAPE(mode), 1, extra_mem_pages);
>
>         log_mode_create_vm_done(vm);
>         *vcpu = vm_vcpu_add(vm, 0, guest_code);
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index 1441fca6c273..157508c071f3 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -188,6 +188,23 @@ enum vm_guest_mode {
>         NUM_VM_MODES,
>  };
>
> +struct vm_shape {
> +       enum vm_guest_mode mode;
> +       unsigned int type;
> +};
> +
> +#define VM_TYPE_DEFAULT                        0
> +
> +#define VM_SHAPE(__mode)                       \
> +({                                             \
> +       struct vm_shape shape = {               \
> +               .mode = (__mode),               \
> +               .type = VM_TYPE_DEFAULT         \
> +       };                                      \
> +                                               \
> +       shape;                                  \
> +})
> +
>  #if defined(__aarch64__)
>
>  extern enum vm_guest_mode vm_mode_default;
> @@ -220,6 +237,8 @@ extern enum vm_guest_mode vm_mode_default;
>
>  #endif
>
> +#define VM_SHAPE_DEFAULT       VM_SHAPE(VM_MODE_DEFAULT)
> +
>  #define MIN_PAGE_SIZE          (1U << MIN_PAGE_SHIFT)
>  #define PTES_PER_MIN_PAGE      ptes_per_page(MIN_PAGE_SIZE)
>
> @@ -784,21 +803,21 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
>   * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
>   * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
>   */
> -struct kvm_vm *____vm_create(enum vm_guest_mode mode);
> -struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
> +struct kvm_vm *____vm_create(struct vm_shape shape);
> +struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
>                            uint64_t nr_extra_pages);
>
>  static inline struct kvm_vm *vm_create_barebones(void)
>  {
> -       return ____vm_create(VM_MODE_DEFAULT);
> +       return ____vm_create(VM_SHAPE_DEFAULT);
>  }
>
>  static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
>  {
> -       return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
> +       return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
>  }
>
> -struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
> +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
>                                       uint64_t extra_mem_pages,
>                                       void *guest_code, struct kvm_vcpu *vcpus[]);
>
> @@ -806,17 +825,27 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
>                                                   void *guest_code,
>                                                   struct kvm_vcpu *vcpus[])
>  {
> -       return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
> +       return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
>                                       guest_code, vcpus);
>  }
>
> +
> +struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
> +                                              struct kvm_vcpu **vcpu,
> +                                              uint64_t extra_mem_pages,
> +                                              void *guest_code);
> +
>  /*
>   * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
>   * additional pages of guest memory.  Returns the VM and vCPU (via out param).
>   */
> -struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
> -                                        uint64_t extra_mem_pages,
> -                                        void *guest_code);
> +static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
> +                                                      uint64_t extra_mem_pages,
> +                                                      void *guest_code)
> +{
> +       return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
> +                                              extra_mem_pages, guest_code);
> +}
>
>  static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
>                                                      void *guest_code)
> @@ -824,6 +853,13 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
>         return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
>  }
>
> +static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
> +                                                          struct kvm_vcpu **vcpu,
> +                                                          void *guest_code)
> +{
> +       return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
> +}
> +
>  struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
>
>  void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
> diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
> index 69f26d80c821..e37dc9c21888 100644
> --- a/tools/testing/selftests/kvm/kvm_page_table_test.c
> +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
> @@ -254,7 +254,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
>
>         /* Create a VM with enough guest pages */
>         guest_num_pages = test_mem_size / guest_page_size;
> -       vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
> +       vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages,
>                                     guest_code, test_args.vcpus);
>
>         /* Align down GPA of the testing memslot */
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 95a553400ea9..1c74310f1d44 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -209,7 +209,7 @@ __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
>                 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
>  }
>
> -struct kvm_vm *____vm_create(enum vm_guest_mode mode)
> +struct kvm_vm *____vm_create(struct vm_shape shape)
>  {
>         struct kvm_vm *vm;
>
> @@ -221,13 +221,13 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
>         vm->regions.hva_tree = RB_ROOT;
>         hash_init(vm->regions.slot_hash);
>
> -       vm->mode = mode;
> -       vm->type = 0;
> +       vm->mode = shape.mode;
> +       vm->type = shape.type;
>
> -       vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
> -       vm->va_bits = vm_guest_mode_params[mode].va_bits;
> -       vm->page_size = vm_guest_mode_params[mode].page_size;
> -       vm->page_shift = vm_guest_mode_params[mode].page_shift;
> +       vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
> +       vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
> +       vm->page_size = vm_guest_mode_params[vm->mode].page_size;
> +       vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
>
>         /* Setup mode specific traits. */
>         switch (vm->mode) {
> @@ -265,7 +265,7 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
>                 /*
>                  * Ignore KVM support for 5-level paging (vm->va_bits == 57),
>                  * it doesn't take effect unless a CR4.LA57 is set, which it
> -                * isn't for this VM_MODE.
> +                * isn't for this mode (48-bit virtual address space).
>                  */
>                 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
>                             "Linear address width (%d bits) not supported",
> @@ -285,10 +285,11 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
>                 vm->pgtable_levels = 5;
>                 break;
>         default:
> -               TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
> +               TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
>         }
>
>  #ifdef __aarch64__
> +       TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
>         if (vm->pa_bits != 40)
>                 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
>  #endif
> @@ -347,19 +348,19 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
>         return vm_adjust_num_guest_pages(mode, nr_pages);
>  }
>
> -struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
> +struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
>                            uint64_t nr_extra_pages)
>  {
> -       uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
> +       uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
>                                                  nr_extra_pages);
>         struct userspace_mem_region *slot0;
>         struct kvm_vm *vm;
>         int i;
>
> -       pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
> -                vm_guest_mode_string(mode), nr_pages);
> +       pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
> +                vm_guest_mode_string(shape.mode), shape.type, nr_pages);
>
> -       vm = ____vm_create(mode);
> +       vm = ____vm_create(shape);
>
>         vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
>         for (i = 0; i < NR_MEM_REGIONS; i++)
> @@ -400,7 +401,7 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
>   * extra_mem_pages is only used to calculate the maximum page table size,
>   * no real memory allocation for non-slot0 memory in this function.
>   */
> -struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
> +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
>                                       uint64_t extra_mem_pages,
>                                       void *guest_code, struct kvm_vcpu *vcpus[])
>  {
> @@ -409,7 +410,7 @@ struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
>
>         TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
>
> -       vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
> +       vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
>
>         for (i = 0; i < nr_vcpus; ++i)
>                 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
> @@ -417,15 +418,15 @@ struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
>         return vm;
>  }
>
> -struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
> -                                        uint64_t extra_mem_pages,
> -                                        void *guest_code)
> +struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
> +                                              struct kvm_vcpu **vcpu,
> +                                              uint64_t extra_mem_pages,
> +                                              void *guest_code)
>  {
>         struct kvm_vcpu *vcpus[1];
>         struct kvm_vm *vm;
>
> -       vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
> -                                   guest_code, vcpus);
> +       vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
>
>         *vcpu = vcpus[0];
>         return vm;
> diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
> index df457452d146..d05487e5a371 100644
> --- a/tools/testing/selftests/kvm/lib/memstress.c
> +++ b/tools/testing/selftests/kvm/lib/memstress.c
> @@ -168,7 +168,8 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
>          * The memory is also added to memslot 0, but that's a benign side
>          * effect as KVM allows aliasing HVAs in meslots.
>          */
> -       vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
> +       vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
> +                                   slot0_pages + guest_num_pages,
>                                     memstress_guest_code, vcpus);
>
>         args->vm = vm;
> diff --git a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
> index 85f34ca7e49e..0ed32ec903d0 100644
> --- a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
> @@ -271,7 +271,7 @@ int main(int argc, char *argv[])
>
>         kvm_check_cap(KVM_CAP_MCE);
>
> -       vm = __vm_create(VM_MODE_DEFAULT, 3, 0);
> +       vm = __vm_create(VM_SHAPE_DEFAULT, 3, 0);
>
>         kvm_ioctl(vm->kvm_fd, KVM_X86_GET_MCE_CAP_SUPPORTED,
>                   &supported_mcg_caps);
> --
> 2.39.1
>
>
Sean Christopherson Nov. 6, 2023, 4:04 p.m. UTC | #2
On Mon, Nov 06, 2023, Fuad Tabba wrote:
> On Sun, Nov 5, 2023 at 4:34 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
> >
> > From: Sean Christopherson <seanjc@google.com>
> >
> > Add a "vm_shape" structure to encapsulate the selftests-defined "mode",
> > along with the KVM-defined "type" for use when creating a new VM.  "mode"
> > tracks physical and virtual address properties, as well as the preferred
> > backing memory type, while "type" corresponds to the VM type.
> >
> > Taking the VM type will allow adding tests for KVM_CREATE_GUEST_MEMFD,
> > a.k.a. guest private memory, without needing an entirely separate set of
> > helpers.  Guest private memory is effectively usable only by confidential
> > VM types, and it's expected that x86 will double down and require unique
> > VM types for TDX and SNP guests.
> >
> > Signed-off-by: Sean Christopherson <seanjc@google.com>
> > Message-Id: <20231027182217.3615211-30-seanjc@google.com>
> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> > ---
> 
> nit: as in a prior selftest commit messages, references in the commit
> message to guest _private_ memory. Should these be changed to just
> guest memory?

Hmm, no, "private" is mostly appropriate here.  At this point in time, only x86
supports KVM_CREATE_GUEST_MEMFD, and x86 only supports it for private memory.
And the purpose of letting x86 selftests specify KVM_X86_SW_PROTECTED_VM, i.e.
the reason this patch exists, is purely to get private memory.

Maybe tweak the second paragraph to this?

Taking the VM type will allow adding tests for KVM_CREATE_GUEST_MEMFD
without needing an entirely separate set of helpers.  At this time,
guest_memfd is effectively usable only by confidential VM types in the
form of guest private memory, and it's expected that x86 will double down
and require unique VM types for TDX and SNP guests.
Fuad Tabba Nov. 6, 2023, 4:17 p.m. UTC | #3
On Mon, Nov 6, 2023 at 4:04 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, Nov 06, 2023, Fuad Tabba wrote:
> > On Sun, Nov 5, 2023 at 4:34 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
> > >
> > > From: Sean Christopherson <seanjc@google.com>
> > >
> > > Add a "vm_shape" structure to encapsulate the selftests-defined "mode",
> > > along with the KVM-defined "type" for use when creating a new VM.  "mode"
> > > tracks physical and virtual address properties, as well as the preferred
> > > backing memory type, while "type" corresponds to the VM type.
> > >
> > > Taking the VM type will allow adding tests for KVM_CREATE_GUEST_MEMFD,
> > > a.k.a. guest private memory, without needing an entirely separate set of
> > > helpers.  Guest private memory is effectively usable only by confidential
> > > VM types, and it's expected that x86 will double down and require unique
> > > VM types for TDX and SNP guests.
> > >
> > > Signed-off-by: Sean Christopherson <seanjc@google.com>
> > > Message-Id: <20231027182217.3615211-30-seanjc@google.com>
> > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> > > ---
> >
> > nit: as in a prior selftest commit messages, references in the commit
> > message to guest _private_ memory. Should these be changed to just
> > guest memory?
>
> Hmm, no, "private" is mostly appropriate here.  At this point in time, only x86
> supports KVM_CREATE_GUEST_MEMFD, and x86 only supports it for private memory.
> And the purpose of letting x86 selftests specify KVM_X86_SW_PROTECTED_VM, i.e.
> the reason this patch exists, is purely to get private memory.
>
> Maybe tweak the second paragraph to this?
>
> Taking the VM type will allow adding tests for KVM_CREATE_GUEST_MEMFD
> without needing an entirely separate set of helpers.  At this time,
> guest_memfd is effectively usable only by confidential VM types in the
> form of guest private memory, and it's expected that x86 will double down
> and require unique VM types for TDX and SNP guests.

sgtm
/fuad
Anish Moorthy Nov. 8, 2023, 5 p.m. UTC | #4
This commit breaks the arm64 selftests build btw: looks like a simple oversight?

$ cd ${LINUX_ROOT}/tools/testing/selftests/kvm
$ CROSS_COMPILE=aarch64-linux-gnu- ARCH=arm64 make
# ...
aarch64/page_fault_test.c: In function ‘run_test’:
aarch64/page_fault_test.c:708:28: error: incompatible type for
argument 1 of ‘____vm_create’
  708 |         vm = ____vm_create(mode);
         |                            ^~~~
         |                            |
         |                            enum vm_guest_mode
In file included from include/kvm_util.h:10,
                 from aarch64/page_fault_test.c:14:
include/kvm_util_base.h:806:46: note: expected ‘struct vm_shape’ but
argument is of type ‘enum vm_guest_mode’
  806 | struct kvm_vm *____vm_create(struct vm_shape shape);
Anish Moorthy Nov. 8, 2023, 11:37 p.m. UTC | #5
On Wed, Nov 8, 2023 at 9:00 AM Anish Moorthy <amoorthy@google.com> wrote:
>
> This commit breaks the arm64 selftests build btw: looks like a simple oversight?

Yup, fix is a one-liner. Posted below.

diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
index eb4217b7c768..08a5ca5bed56 100644
--- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
@@ -705,7 +705,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
 	print_test_banner(mode, p);
 
-	vm = ____vm_create(mode);
+	vm = ____vm_create(VM_SHAPE(mode));
 	setup_memslots(vm, p);
 	kvm_vm_elf_load(vm, program_invocation_name);
 	setup_ucall(vm);
Paolo Bonzini Nov. 9, 2023, 8:25 a.m. UTC | #6
On 11/9/23 00:37, Anish Moorthy wrote:
> On Wed, Nov 8, 2023 at 9:00 AM Anish Moorthy <amoorthy@google.com> wrote:
>>
>> This commit breaks the arm64 selftests build btw: looks like a simple oversight?
> 
> Yup, fix is a one-liner. Posted below.
> 
> diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
> index eb4217b7c768..08a5ca5bed56 100644
> --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
> +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
> @@ -705,7 +705,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>   
>   	print_test_banner(mode, p);
>   
> -	vm = ____vm_create(mode);
> +	vm = ____vm_create(VM_SHAPE(mode));

Yes, this is similar to the s390 patch I sent yesterday 
(https://patchew.org/linux/20231108094055.221234-1-pbonzini@redhat.com/).

Paolo
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 936f3a8d1b83..6cbecf499767 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -699,7 +699,7 @@  static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
 
 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
-	vm = __vm_create(mode, 1, extra_mem_pages);
+	vm = __vm_create(VM_SHAPE(mode), 1, extra_mem_pages);
 
 	log_mode_create_vm_done(vm);
 	*vcpu = vm_vcpu_add(vm, 0, guest_code);
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index 1441fca6c273..157508c071f3 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -188,6 +188,23 @@  enum vm_guest_mode {
 	NUM_VM_MODES,
 };
 
+struct vm_shape {
+	enum vm_guest_mode mode;
+	unsigned int type;
+};
+
+#define VM_TYPE_DEFAULT			0
+
+#define VM_SHAPE(__mode)			\
+({						\
+	struct vm_shape shape = {		\
+		.mode = (__mode),		\
+		.type = VM_TYPE_DEFAULT		\
+	};					\
+						\
+	shape;					\
+})
+
 #if defined(__aarch64__)
 
 extern enum vm_guest_mode vm_mode_default;
@@ -220,6 +237,8 @@  extern enum vm_guest_mode vm_mode_default;
 
 #endif
 
+#define VM_SHAPE_DEFAULT	VM_SHAPE(VM_MODE_DEFAULT)
+
 #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
 #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
 
@@ -784,21 +803,21 @@  vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
  */
-struct kvm_vm *____vm_create(enum vm_guest_mode mode);
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *____vm_create(struct vm_shape shape);
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
 			   uint64_t nr_extra_pages);
 
 static inline struct kvm_vm *vm_create_barebones(void)
 {
-	return ____vm_create(VM_MODE_DEFAULT);
+	return ____vm_create(VM_SHAPE_DEFAULT);
 }
 
 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
 {
-	return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
+	return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
 }
 
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
 				      uint64_t extra_mem_pages,
 				      void *guest_code, struct kvm_vcpu *vcpus[]);
 
@@ -806,17 +825,27 @@  static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
 						  void *guest_code,
 						  struct kvm_vcpu *vcpus[])
 {
-	return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
+	return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
 				      guest_code, vcpus);
 }
 
+
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+					       struct kvm_vcpu **vcpu,
+					       uint64_t extra_mem_pages,
+					       void *guest_code);
+
 /*
  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
  */
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-					 uint64_t extra_mem_pages,
-					 void *guest_code);
+static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+						       uint64_t extra_mem_pages,
+						       void *guest_code)
+{
+	return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
+					       extra_mem_pages, guest_code);
+}
 
 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 						     void *guest_code)
@@ -824,6 +853,13 @@  static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
 }
 
+static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
+							   struct kvm_vcpu **vcpu,
+							   void *guest_code)
+{
+	return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
+}
+
 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
 
 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 69f26d80c821..e37dc9c21888 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -254,7 +254,7 @@  static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 
 	/* Create a VM with enough guest pages */
 	guest_num_pages = test_mem_size / guest_page_size;
-	vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
+	vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages,
 				    guest_code, test_args.vcpus);
 
 	/* Align down GPA of the testing memslot */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 95a553400ea9..1c74310f1d44 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -209,7 +209,7 @@  __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
 		(1ULL << (vm->va_bits - 1)) >> vm->page_shift);
 }
 
-struct kvm_vm *____vm_create(enum vm_guest_mode mode)
+struct kvm_vm *____vm_create(struct vm_shape shape)
 {
 	struct kvm_vm *vm;
 
@@ -221,13 +221,13 @@  struct kvm_vm *____vm_create(enum vm_guest_mode mode)
 	vm->regions.hva_tree = RB_ROOT;
 	hash_init(vm->regions.slot_hash);
 
-	vm->mode = mode;
-	vm->type = 0;
+	vm->mode = shape.mode;
+	vm->type = shape.type;
 
-	vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
-	vm->va_bits = vm_guest_mode_params[mode].va_bits;
-	vm->page_size = vm_guest_mode_params[mode].page_size;
-	vm->page_shift = vm_guest_mode_params[mode].page_shift;
+	vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
+	vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
+	vm->page_size = vm_guest_mode_params[vm->mode].page_size;
+	vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
 
 	/* Setup mode specific traits. */
 	switch (vm->mode) {
@@ -265,7 +265,7 @@  struct kvm_vm *____vm_create(enum vm_guest_mode mode)
 		/*
 		 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
 		 * it doesn't take effect unless a CR4.LA57 is set, which it
-		 * isn't for this VM_MODE.
+		 * isn't for this mode (48-bit virtual address space).
 		 */
 		TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
 			    "Linear address width (%d bits) not supported",
@@ -285,10 +285,11 @@  struct kvm_vm *____vm_create(enum vm_guest_mode mode)
 		vm->pgtable_levels = 5;
 		break;
 	default:
-		TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
+		TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
 	}
 
 #ifdef __aarch64__
+	TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
 	if (vm->pa_bits != 40)
 		vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
 #endif
@@ -347,19 +348,19 @@  static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
 	return vm_adjust_num_guest_pages(mode, nr_pages);
 }
 
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
 			   uint64_t nr_extra_pages)
 {
-	uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
+	uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
 						 nr_extra_pages);
 	struct userspace_mem_region *slot0;
 	struct kvm_vm *vm;
 	int i;
 
-	pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
-		 vm_guest_mode_string(mode), nr_pages);
+	pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
+		 vm_guest_mode_string(shape.mode), shape.type, nr_pages);
 
-	vm = ____vm_create(mode);
+	vm = ____vm_create(shape);
 
 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
 	for (i = 0; i < NR_MEM_REGIONS; i++)
@@ -400,7 +401,7 @@  struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
  * extra_mem_pages is only used to calculate the maximum page table size,
  * no real memory allocation for non-slot0 memory in this function.
  */
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
 				      uint64_t extra_mem_pages,
 				      void *guest_code, struct kvm_vcpu *vcpus[])
 {
@@ -409,7 +410,7 @@  struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
 
 	TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
 
-	vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
+	vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
 
 	for (i = 0; i < nr_vcpus; ++i)
 		vcpus[i] = vm_vcpu_add(vm, i, guest_code);
@@ -417,15 +418,15 @@  struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
 	return vm;
 }
 
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-					 uint64_t extra_mem_pages,
-					 void *guest_code)
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+					       struct kvm_vcpu **vcpu,
+					       uint64_t extra_mem_pages,
+					       void *guest_code)
 {
 	struct kvm_vcpu *vcpus[1];
 	struct kvm_vm *vm;
 
-	vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
-				    guest_code, vcpus);
+	vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
 
 	*vcpu = vcpus[0];
 	return vm;
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index df457452d146..d05487e5a371 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -168,7 +168,8 @@  struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 	 * The memory is also added to memslot 0, but that's a benign side
 	 * effect as KVM allows aliasing HVAs in meslots.
 	 */
-	vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
+	vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
+				    slot0_pages + guest_num_pages,
 				    memstress_guest_code, vcpus);
 
 	args->vm = vm;
diff --git a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
index 85f34ca7e49e..0ed32ec903d0 100644
--- a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
+++ b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
@@ -271,7 +271,7 @@  int main(int argc, char *argv[])
 
 	kvm_check_cap(KVM_CAP_MCE);
 
-	vm = __vm_create(VM_MODE_DEFAULT, 3, 0);
+	vm = __vm_create(VM_SHAPE_DEFAULT, 3, 0);
 
 	kvm_ioctl(vm->kvm_fd, KVM_X86_GET_MCE_CAP_SUPPORTED,
 		  &supported_mcg_caps);