diff mbox series

[V1,4/6] KVM: selftests: x86: Execute VMs with private memory

Message ID 20221111014244.1714148-5-vannapurve@google.com (mailing list archive)
State New
Headers show
Series selftests: KVM: selftests for fd-based private memory | expand

Commit Message

Vishal Annapurve Nov. 11, 2022, 1:42 a.m. UTC
Introduce a set of APIs to execute VM with private memslots.

Host userspace APIs for:
1) Setting up and executing VM having private memslots
2) Backing/unbacking guest private memory

Guest APIs for:
1) Changing memory mapping type

Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../kvm/include/x86_64/private_mem.h          |  37 +++
 .../selftests/kvm/lib/x86_64/private_mem.c    | 211 ++++++++++++++++++
 3 files changed, 249 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/include/x86_64/private_mem.h
 create mode 100644 tools/testing/selftests/kvm/lib/x86_64/private_mem.c

Comments

Peter Gonda Nov. 14, 2022, 7:37 p.m. UTC | #1
On Thu, Nov 10, 2022 at 6:43 PM Vishal Annapurve <vannapurve@google.com> wrote:
>
> Introduce a set of APIs to execute VM with private memslots.
>
> Host userspace APIs for:
> 1) Setting up and executing VM having private memslots
> 2) Backing/unbacking guest private memory
>
> Guest APIs for:
> 1) Changing memory mapping type
>
> Signed-off-by: Vishal Annapurve <vannapurve@google.com>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../kvm/include/x86_64/private_mem.h          |  37 +++
>  .../selftests/kvm/lib/x86_64/private_mem.c    | 211 ++++++++++++++++++
>  3 files changed, 249 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/include/x86_64/private_mem.h
>  create mode 100644 tools/testing/selftests/kvm/lib/x86_64/private_mem.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 0172eb6cb6ee..57385ad58527 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -53,6 +53,7 @@ LIBKVM_STRING += lib/string_override.c
>  LIBKVM_x86_64 += lib/x86_64/apic.c
>  LIBKVM_x86_64 += lib/x86_64/handlers.S
>  LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
> +LIBKVM_x86_64 += lib/x86_64/private_mem.c
>  LIBKVM_x86_64 += lib/x86_64/processor.c
>  LIBKVM_x86_64 += lib/x86_64/svm.c
>  LIBKVM_x86_64 += lib/x86_64/ucall.c
> diff --git a/tools/testing/selftests/kvm/include/x86_64/private_mem.h b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
> new file mode 100644
> index 000000000000..e556ded971fd
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
> @@ -0,0 +1,37 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2022, Google LLC.
> + */
> +
> +#ifndef SELFTEST_KVM_PRIVATE_MEM_H
> +#define SELFTEST_KVM_PRIVATE_MEM_H
> +
> +#include <stdint.h>
> +#include <kvm_util.h>
> +
> +void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size);
> +void kvm_hypercall_map_private(uint64_t gpa, uint64_t size);
> +
> +void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
> +
> +void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
> +
> +typedef void (*guest_code_fn)(void);
> +typedef void (*io_exit_handler)(struct kvm_vm *vm, uint32_t uc_arg1);
> +
> +struct test_setup_info {
> +       uint64_t test_area_gpa;
> +       uint64_t test_area_size;
> +       uint32_t test_area_slot;
> +};
> +
> +struct vm_setup_info {
> +       enum vm_mem_backing_src_type test_mem_src;
> +       struct test_setup_info test_info;
> +       guest_code_fn guest_fn;
> +       io_exit_handler ioexit_cb;
> +};
> +
> +void execute_vm_with_private_test_mem(struct vm_setup_info *info);
> +
> +#endif /* SELFTEST_KVM_PRIVATE_MEM_H */
> diff --git a/tools/testing/selftests/kvm/lib/x86_64/private_mem.c b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
> new file mode 100644
> index 000000000000..3076cae81804
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
> @@ -0,0 +1,211 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * tools/testing/selftests/kvm/lib/kvm_util.c
> + *
> + * Copyright (C) 2022, Google LLC.
> + */
> +#define _GNU_SOURCE /* for program_invocation_name */
> +#include <fcntl.h>
> +#include <limits.h>
> +#include <sched.h>
> +#include <signal.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <sys/ioctl.h>
> +
> +#include <linux/compiler.h>
> +#include <linux/kernel.h>
> +#include <linux/kvm_para.h>
> +
> +#include <test_util.h>
> +#include <kvm_util.h>
> +#include <private_mem.h>
> +#include <processor.h>
> +
> +static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
> +       uint64_t flags)
> +{
> +       return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
> +}
> +
> +static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
> +       uint64_t flags)
> +{
> +       uint64_t ret;
> +
> +       GUEST_ASSERT_2(IS_PAGE_ALIGNED(gpa) && IS_PAGE_ALIGNED(size), gpa, size);
> +
> +       ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
> +       GUEST_ASSERT_1(!ret, ret);
> +}
> +
> +void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size)
> +{
> +       kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_DECRYPTED);
> +}
> +
> +void kvm_hypercall_map_private(uint64_t gpa, uint64_t size)
> +{
> +       kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_ENCRYPTED);
> +}
> +
> +static void vm_update_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
> +       bool unback_mem)
> +{
> +       int restricted_fd;
> +       uint64_t restricted_fd_offset, guest_phys_base, fd_offset;
> +       struct kvm_enc_region enc_region;
> +       struct kvm_userspace_memory_region_ext *region_ext;
> +       struct kvm_userspace_memory_region *region;
> +       int fallocate_mode = 0;
> +       int ret;
> +
> +       region_ext = kvm_userspace_memory_region_ext_find(vm, gpa, gpa + size);
> +       TEST_ASSERT(region_ext != NULL, "Region not found");
> +       region = &region_ext->region;
> +       TEST_ASSERT(region->flags & KVM_MEM_PRIVATE,
> +               "Can not update private memfd for non-private memslot\n");
> +       restricted_fd = region_ext->restricted_fd;
> +       restricted_fd_offset = region_ext->restricted_offset;
> +       guest_phys_base = region->guest_phys_addr;
> +       fd_offset = restricted_fd_offset + (gpa - guest_phys_base);
> +
> +       if (unback_mem)
> +               fallocate_mode = (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
> +
> +       printf("restricted_fd %d fallocate_mode 0x%x for offset 0x%lx size 0x%lx\n",
> +               restricted_fd, fallocate_mode, fd_offset, size);
> +       ret = fallocate(restricted_fd, fallocate_mode, fd_offset, size);
> +       TEST_ASSERT(ret == 0, "fallocate failed\n");
> +       enc_region.addr = gpa;
> +       enc_region.size = size;
> +       if (unback_mem) {
> +               printf("undoing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
> +               vm_ioctl(vm, KVM_MEMORY_ENCRYPT_UNREG_REGION, &enc_region);
> +       } else {
> +               printf("doing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
> +               vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &enc_region);
> +       }
> +}
> +
> +void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
> +{
> +       vm_update_private_mem(vm, gpa, size, true);
> +}
> +
> +void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
> +{
> +       vm_update_private_mem(vm, gpa, size, false);
> +}
> +
> +static void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm,
> +                               struct kvm_vcpu *vcpu)
> +{
> +       uint64_t gpa, npages, attrs, size;
> +
> +       TEST_ASSERT(vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE,
> +               "Unhandled Hypercall %lld\n", vcpu->run->hypercall.nr);
> +       gpa = vcpu->run->hypercall.args[0];
> +       npages = vcpu->run->hypercall.args[1];
> +       size = npages << MIN_PAGE_SHIFT;
> +       attrs = vcpu->run->hypercall.args[2];
> +       pr_info("Explicit conversion off 0x%lx size 0x%lx to %s\n", gpa, size,
> +               (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) ? "private" : "shared");
> +
> +       if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
> +               vm_allocate_private_mem(vm, gpa, size);
> +       else
> +               vm_unback_private_mem(vm, gpa, size);
> +
> +       vcpu->run->hypercall.ret = 0;
> +}
> +
> +static void vcpu_work(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
> +       struct vm_setup_info *info)
> +{
> +       struct ucall uc;
> +       uint64_t cmd;
> +
> +       /*
> +        * Loop until the guest is done.
> +        */
> +
> +       while (true) {
> +               vcpu_run(vcpu);
> +
> +               if (vcpu->run->exit_reason == KVM_EXIT_IO) {
> +                       cmd = get_ucall(vcpu, &uc);
> +                       if (cmd != UCALL_SYNC)
> +                               break;
> +
> +                       TEST_ASSERT(info->ioexit_cb, "ioexit cb not present");
> +                       info->ioexit_cb(vm, uc.args[1]);
> +                       continue;
> +               }

Should this be integrated into the ucall library directly somehow?
That way users of VMs with private memory do not need special
handling?

After Sean's series:
https://lore.kernel.org/linux-arm-kernel/20220825232522.3997340-3-seanjc@google.com/
we have a common get_ucall() that this check could be integrated into?

> +
> +               if (vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) {
> +                       handle_vm_exit_map_gpa_hypercall(vm, vcpu);
> +                       continue;
> +               }
> +
> +               TEST_FAIL("Unhandled VCPU exit reason %d\n",
> +                       vcpu->run->exit_reason);
> +               break;
> +       }
> +
> +       if (vcpu->run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
> +               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
> +                         __FILE__, uc.args[1], uc.args[2]);
> +}
> +
> +/*
> + * Execute guest vm with private memory memslots.
> + *
> + * Input Args:
> + *   info - pointer to a structure containing information about setting up a VM
> + *     with private memslots
> + *
> + * Output Args: None
> + *
> + * Return: None
> + *
> + * Function called by host userspace logic in selftests to execute guest vm
> + * logic. It will install test_mem_slot : containing the region of memory that
> + * would be used to test private/shared memory accesses to a memory backed by
> + * private memslots
> + */
> +void execute_vm_with_private_test_mem(struct vm_setup_info *info)
> +{
> +       struct kvm_vm *vm;
> +       struct kvm_enable_cap cap;
> +       struct kvm_vcpu *vcpu;
> +       uint64_t test_area_gpa, test_area_size;
> +       struct test_setup_info *test_info = &info->test_info;
> +
> +       TEST_ASSERT(info->guest_fn, "guest_fn not present");
> +       vm = vm_create_with_one_vcpu(&vcpu, info->guest_fn);

I am a little confused with how this library is going to work for SEV
VMs that want to have UPM private memory eventually.

Why should users of UPM be forced to use this very specific VM
creation and vCPU run loop. In the patch
https://lore.kernel.org/lkml/20220829171021.701198-1-pgonda@google.com/T/#m033ebc32df47a172bc6c46d4398b6c4387b7934d
SEV VMs need to be created specially vm_sev_create_with_one_vcpu() but
then callers can run the VM's vCPUs like other selftests.

How do you see this working with SEV VMs?



> +
> +       vm_check_cap(vm, KVM_CAP_EXIT_HYPERCALL);
> +       cap.cap = KVM_CAP_EXIT_HYPERCALL;
> +       cap.flags = 0;
> +       cap.args[0] = (1 << KVM_HC_MAP_GPA_RANGE);
> +       vm_ioctl(vm, KVM_ENABLE_CAP, &cap);
> +
> +       TEST_ASSERT(test_info->test_area_size, "Test mem size not present");
> +
> +       test_area_size = test_info->test_area_size;
> +       test_area_gpa = test_info->test_area_gpa;
> +       vm_userspace_mem_region_add(vm, info->test_mem_src, test_area_gpa,
> +               test_info->test_area_slot, test_area_size / vm->page_size,
> +               KVM_MEM_PRIVATE);
> +       vm_allocate_private_mem(vm, test_area_gpa, test_area_size);
> +
> +       pr_info("Mapping test memory pages 0x%zx page_size 0x%x\n",
> +               test_area_size/vm->page_size, vm->page_size);
> +       virt_map(vm, test_area_gpa, test_area_gpa, test_area_size/vm->page_size);
> +
> +       vcpu_work(vm, vcpu, info);
> +
> +       kvm_vm_free(vm);
> +}
> --
> 2.38.1.431.g37b22c650d-goog
>
Vishal Annapurve Nov. 15, 2022, 1:53 a.m. UTC | #2
On Mon, Nov 14, 2022 at 11:37 AM Peter Gonda <pgonda@google.com> wrote:
>...
> > +static void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm,
> > +                               struct kvm_vcpu *vcpu)
> > +{
> > +       uint64_t gpa, npages, attrs, size;
> > +
> > +       TEST_ASSERT(vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE,
> > +               "Unhandled Hypercall %lld\n", vcpu->run->hypercall.nr);
> > +       gpa = vcpu->run->hypercall.args[0];
> > +       npages = vcpu->run->hypercall.args[1];
> > +       size = npages << MIN_PAGE_SHIFT;
> > +       attrs = vcpu->run->hypercall.args[2];
> > +       pr_info("Explicit conversion off 0x%lx size 0x%lx to %s\n", gpa, size,
> > +               (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) ? "private" : "shared");
> > +
> > +       if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
> > +               vm_allocate_private_mem(vm, gpa, size);
> > +       else
> > +               vm_unback_private_mem(vm, gpa, size);
> > +
> > +       vcpu->run->hypercall.ret = 0;
> > +}
> > +
> > +static void vcpu_work(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
> > +       struct vm_setup_info *info)
> > +{
> > +       struct ucall uc;
> > +       uint64_t cmd;
> > +
> > +       /*
> > +        * Loop until the guest is done.
> > +        */
> > +
> > +       while (true) {
> > +               vcpu_run(vcpu);
> > +
> > +               if (vcpu->run->exit_reason == KVM_EXIT_IO) {
> > +                       cmd = get_ucall(vcpu, &uc);
> > +                       if (cmd != UCALL_SYNC)
> > +                               break;
> > +
> > +                       TEST_ASSERT(info->ioexit_cb, "ioexit cb not present");
> > +                       info->ioexit_cb(vm, uc.args[1]);
> > +                       continue;
> > +               }
>
> Should this be integrated into the ucall library directly somehow?
> That way users of VMs with private memory do not need special
> handling?
>
> After Sean's series:
> https://lore.kernel.org/linux-arm-kernel/20220825232522.3997340-3-seanjc@google.com/
> we have a common get_ucall() that this check could be integrated into?
>
> > +
> > +               if (vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) {
> > +                       handle_vm_exit_map_gpa_hypercall(vm, vcpu);
> > +                       continue;
> > +               }
> > +
> > +               TEST_FAIL("Unhandled VCPU exit reason %d\n",
> > +                       vcpu->run->exit_reason);
> > +               break;
> > +       }
> > +
> > +       if (vcpu->run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
> > +               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
> > +                         __FILE__, uc.args[1], uc.args[2]);
> > +}
> > +
> > +/*
> > + * Execute guest vm with private memory memslots.
> > + *
> > + * Input Args:
> > + *   info - pointer to a structure containing information about setting up a VM
> > + *     with private memslots
> > + *
> > + * Output Args: None
> > + *
> > + * Return: None
> > + *
> > + * Function called by host userspace logic in selftests to execute guest vm
> > + * logic. It will install test_mem_slot : containing the region of memory that
> > + * would be used to test private/shared memory accesses to a memory backed by
> > + * private memslots
> > + */
> > +void execute_vm_with_private_test_mem(struct vm_setup_info *info)
> > +{
> > +       struct kvm_vm *vm;
> > +       struct kvm_enable_cap cap;
> > +       struct kvm_vcpu *vcpu;
> > +       uint64_t test_area_gpa, test_area_size;
> > +       struct test_setup_info *test_info = &info->test_info;
> > +
> > +       TEST_ASSERT(info->guest_fn, "guest_fn not present");
> > +       vm = vm_create_with_one_vcpu(&vcpu, info->guest_fn);
>
> I am a little confused with how this library is going to work for SEV
> VMs that want to have UPM private memory eventually.
>
> Why should users of UPM be forced to use this very specific VM
> creation and vCPU run loop. In the patch
> https://lore.kernel.org/lkml/20220829171021.701198-1-pgonda@google.com/T/#m033ebc32df47a172bc6c46d4398b6c4387b7934d
> SEV VMs need to be created specially vm_sev_create_with_one_vcpu() but
> then callers can run the VM's vCPUs like other selftests.
>
> How do you see this working with SEV VMs?
>

This VM creation method can be useful to run the VMs whose execution
might call mapgpa to change the memory attributes. New VM creation
method specific to Sev VMs can be introduced.

I tried to reuse this framework earlier for Sev VM selftests via:
1) https://lore.kernel.org/lkml/20220830224259.412342-8-vannapurve@google.com/T/#m8164d3111c9a17ebab77f01635df8930207cc65d
2) https://lore.kernel.org/lkml/20220830224259.412342-8-vannapurve@google.com/T/#m8164d3111c9a17ebab77f01635df8930207cc65d

Though these changes need to be refreshed after this updated series.
Vishal Annapurve Dec. 8, 2022, 9:56 p.m. UTC | #3
On Mon, Nov 14, 2022 at 5:53 PM Vishal Annapurve <vannapurve@google.com> wrote:
>
> On Mon, Nov 14, 2022 at 11:37 AM Peter Gonda <pgonda@google.com> wrote:
> >...
> > > +static void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm,
> > > +                               struct kvm_vcpu *vcpu)
> > > +{
> > > +       uint64_t gpa, npages, attrs, size;
> > > +
> > > +       TEST_ASSERT(vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE,
> > > +               "Unhandled Hypercall %lld\n", vcpu->run->hypercall.nr);
> > > +       gpa = vcpu->run->hypercall.args[0];
> > > +       npages = vcpu->run->hypercall.args[1];
> > > +       size = npages << MIN_PAGE_SHIFT;
> > > +       attrs = vcpu->run->hypercall.args[2];
> > > +       pr_info("Explicit conversion off 0x%lx size 0x%lx to %s\n", gpa, size,
> > > +               (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) ? "private" : "shared");
> > > +
> > > +       if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
> > > +               vm_allocate_private_mem(vm, gpa, size);
> > > +       else
> > > +               vm_unback_private_mem(vm, gpa, size);
> > > +
> > > +       vcpu->run->hypercall.ret = 0;
> > > +}
> > > +
> > > +static void vcpu_work(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
> > > +       struct vm_setup_info *info)
> > > +{
> > > +       struct ucall uc;
> > > +       uint64_t cmd;
> > > +
> > > +       /*
> > > +        * Loop until the guest is done.
> > > +        */
> > > +
> > > +       while (true) {
> > > +               vcpu_run(vcpu);
> > > +
> > > +               if (vcpu->run->exit_reason == KVM_EXIT_IO) {
> > > +                       cmd = get_ucall(vcpu, &uc);
> > > +                       if (cmd != UCALL_SYNC)
> > > +                               break;
> > > +
> > > +                       TEST_ASSERT(info->ioexit_cb, "ioexit cb not present");
> > > +                       info->ioexit_cb(vm, uc.args[1]);
> > > +                       continue;
> > > +               }
> >
> > Should this be integrated into the ucall library directly somehow?
> > That way users of VMs with private memory do not need special
> > handling?
> >
> > After Sean's series:
> > https://lore.kernel.org/linux-arm-kernel/20220825232522.3997340-3-seanjc@google.com/
> > we have a common get_ucall() that this check could be integrated into?
> >

New patchset posted via [1] modifies the APIs to give more control in
the actual selftest implementation.

[1] https://lore.kernel.org/lkml/20221205232341.4131240-5-vannapurve@google.com/T/

> > > +
> > > +               if (vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) {
> > > +                       handle_vm_exit_map_gpa_hypercall(vm, vcpu);
> > > +                       continue;
> > > +               }
> > > +
> > > +               TEST_FAIL("Unhandled VCPU exit reason %d\n",
> > > +                       vcpu->run->exit_reason);
> > > +               break;
> > > +       }
> > > +
> > > +       if (vcpu->run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
> > > +               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
> > > +                         __FILE__, uc.args[1], uc.args[2]);
> > > +}
> > > +
> > > +/*
> > > + * Execute guest vm with private memory memslots.
> > > + *
> > > + * Input Args:
> > > + *   info - pointer to a structure containing information about setting up a VM
> > > + *     with private memslots
> > > + *
> > > + * Output Args: None
> > > + *
> > > + * Return: None
> > > + *
> > > + * Function called by host userspace logic in selftests to execute guest vm
> > > + * logic. It will install test_mem_slot : containing the region of memory that
> > > + * would be used to test private/shared memory accesses to a memory backed by
> > > + * private memslots
> > > + */
> > > +void execute_vm_with_private_test_mem(struct vm_setup_info *info)
> > > +{
> > > +       struct kvm_vm *vm;
> > > +       struct kvm_enable_cap cap;
> > > +       struct kvm_vcpu *vcpu;
> > > +       uint64_t test_area_gpa, test_area_size;
> > > +       struct test_setup_info *test_info = &info->test_info;
> > > +
> > > +       TEST_ASSERT(info->guest_fn, "guest_fn not present");
> > > +       vm = vm_create_with_one_vcpu(&vcpu, info->guest_fn);
> >
> > I am a little confused with how this library is going to work for SEV
> > VMs that want to have UPM private memory eventually.
> >
> > Why should users of UPM be forced to use this very specific VM
> > creation and vCPU run loop. In the patch
> > https://lore.kernel.org/lkml/20220829171021.701198-1-pgonda@google.com/T/#m033ebc32df47a172bc6c46d4398b6c4387b7934d
> > SEV VMs need to be created specially vm_sev_create_with_one_vcpu() but
> > then callers can run the VM's vCPUs like other selftests.
> >
> > How do you see this working with SEV VMs?
> >
>
> This VM creation method can be useful to run the VMs whose execution
> might call mapgpa to change the memory attributes. New VM creation
> method specific to Sev VMs can be introduced.
>
> I tried to reuse this framework earlier for Sev VM selftests via:
> 1) https://lore.kernel.org/lkml/20220830224259.412342-8-vannapurve@google.com/T/#m8164d3111c9a17ebab77f01635df8930207cc65d
> 2) https://lore.kernel.org/lkml/20220830224259.412342-8-vannapurve@google.com/T/#m8164d3111c9a17ebab77f01635df8930207cc65d
>
> Though these changes need to be refreshed after this updated series.
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0172eb6cb6ee..57385ad58527 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -53,6 +53,7 @@  LIBKVM_STRING += lib/string_override.c
 LIBKVM_x86_64 += lib/x86_64/apic.c
 LIBKVM_x86_64 += lib/x86_64/handlers.S
 LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
+LIBKVM_x86_64 += lib/x86_64/private_mem.c
 LIBKVM_x86_64 += lib/x86_64/processor.c
 LIBKVM_x86_64 += lib/x86_64/svm.c
 LIBKVM_x86_64 += lib/x86_64/ucall.c
diff --git a/tools/testing/selftests/kvm/include/x86_64/private_mem.h b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
new file mode 100644
index 000000000000..e556ded971fd
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
@@ -0,0 +1,37 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_PRIVATE_MEM_H
+#define SELFTEST_KVM_PRIVATE_MEM_H
+
+#include <stdint.h>
+#include <kvm_util.h>
+
+void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size);
+void kvm_hypercall_map_private(uint64_t gpa, uint64_t size);
+
+void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
+
+void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
+
+typedef void (*guest_code_fn)(void);
+typedef void (*io_exit_handler)(struct kvm_vm *vm, uint32_t uc_arg1);
+
+struct test_setup_info {
+	uint64_t test_area_gpa;
+	uint64_t test_area_size;
+	uint32_t test_area_slot;
+};
+
+struct vm_setup_info {
+	enum vm_mem_backing_src_type test_mem_src;
+	struct test_setup_info test_info;
+	guest_code_fn guest_fn;
+	io_exit_handler ioexit_cb;
+};
+
+void execute_vm_with_private_test_mem(struct vm_setup_info *info);
+
+#endif /* SELFTEST_KVM_PRIVATE_MEM_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/private_mem.c b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
new file mode 100644
index 000000000000..3076cae81804
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
@@ -0,0 +1,211 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tools/testing/selftests/kvm/lib/kvm_util.c
+ *
+ * Copyright (C) 2022, Google LLC.
+ */
+#define _GNU_SOURCE /* for program_invocation_name */
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <private_mem.h>
+#include <processor.h>
+
+static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+	uint64_t flags)
+{
+	return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
+}
+
+static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+	uint64_t flags)
+{
+	uint64_t ret;
+
+	GUEST_ASSERT_2(IS_PAGE_ALIGNED(gpa) && IS_PAGE_ALIGNED(size), gpa, size);
+
+	ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
+	GUEST_ASSERT_1(!ret, ret);
+}
+
+void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size)
+{
+	kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_DECRYPTED);
+}
+
+void kvm_hypercall_map_private(uint64_t gpa, uint64_t size)
+{
+	kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_ENCRYPTED);
+}
+
+static void vm_update_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+	bool unback_mem)
+{
+	int restricted_fd;
+	uint64_t restricted_fd_offset, guest_phys_base, fd_offset;
+	struct kvm_enc_region enc_region;
+	struct kvm_userspace_memory_region_ext *region_ext;
+	struct kvm_userspace_memory_region *region;
+	int fallocate_mode = 0;
+	int ret;
+
+	region_ext = kvm_userspace_memory_region_ext_find(vm, gpa, gpa + size);
+	TEST_ASSERT(region_ext != NULL, "Region not found");
+	region = &region_ext->region;
+	TEST_ASSERT(region->flags & KVM_MEM_PRIVATE,
+		"Can not update private memfd for non-private memslot\n");
+	restricted_fd = region_ext->restricted_fd;
+	restricted_fd_offset = region_ext->restricted_offset;
+	guest_phys_base = region->guest_phys_addr;
+	fd_offset = restricted_fd_offset + (gpa - guest_phys_base);
+
+	if (unback_mem)
+		fallocate_mode = (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
+
+	printf("restricted_fd %d fallocate_mode 0x%x for offset 0x%lx size 0x%lx\n",
+		restricted_fd, fallocate_mode, fd_offset, size);
+	ret = fallocate(restricted_fd, fallocate_mode, fd_offset, size);
+	TEST_ASSERT(ret == 0, "fallocate failed\n");
+	enc_region.addr = gpa;
+	enc_region.size = size;
+	if (unback_mem) {
+		printf("undoing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
+		vm_ioctl(vm, KVM_MEMORY_ENCRYPT_UNREG_REGION, &enc_region);
+	} else {
+		printf("doing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
+		vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &enc_region);
+	}
+}
+
+void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
+{
+	vm_update_private_mem(vm, gpa, size, true);
+}
+
+void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
+{
+	vm_update_private_mem(vm, gpa, size, false);
+}
+
+static void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm,
+				struct kvm_vcpu *vcpu)
+{
+	uint64_t gpa, npages, attrs, size;
+
+	TEST_ASSERT(vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE,
+		"Unhandled Hypercall %lld\n", vcpu->run->hypercall.nr);
+	gpa = vcpu->run->hypercall.args[0];
+	npages = vcpu->run->hypercall.args[1];
+	size = npages << MIN_PAGE_SHIFT;
+	attrs = vcpu->run->hypercall.args[2];
+	pr_info("Explicit conversion off 0x%lx size 0x%lx to %s\n", gpa, size,
+		(attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) ? "private" : "shared");
+
+	if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
+		vm_allocate_private_mem(vm, gpa, size);
+	else
+		vm_unback_private_mem(vm, gpa, size);
+
+	vcpu->run->hypercall.ret = 0;
+}
+
+static void vcpu_work(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+	struct vm_setup_info *info)
+{
+	struct ucall uc;
+	uint64_t cmd;
+
+	/*
+	 * Loop until the guest is done.
+	 */
+
+	while (true) {
+		vcpu_run(vcpu);
+
+		if (vcpu->run->exit_reason == KVM_EXIT_IO) {
+			cmd = get_ucall(vcpu, &uc);
+			if (cmd != UCALL_SYNC)
+				break;
+
+			TEST_ASSERT(info->ioexit_cb, "ioexit cb not present");
+			info->ioexit_cb(vm, uc.args[1]);
+			continue;
+		}
+
+		if (vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) {
+			handle_vm_exit_map_gpa_hypercall(vm, vcpu);
+			continue;
+		}
+
+		TEST_FAIL("Unhandled VCPU exit reason %d\n",
+			vcpu->run->exit_reason);
+		break;
+	}
+
+	if (vcpu->run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
+		TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
+			  __FILE__, uc.args[1], uc.args[2]);
+}
+
+/*
+ * Execute guest vm with private memory memslots.
+ *
+ * Input Args:
+ *   info - pointer to a structure containing information about setting up a VM
+ *     with private memslots
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Function called by host userspace logic in selftests to execute guest vm
+ * logic. It will install test_mem_slot : containing the region of memory that
+ * would be used to test private/shared memory accesses to a memory backed by
+ * private memslots
+ */
+void execute_vm_with_private_test_mem(struct vm_setup_info *info)
+{
+	struct kvm_vm *vm;
+	struct kvm_enable_cap cap;
+	struct kvm_vcpu *vcpu;
+	uint64_t test_area_gpa, test_area_size;
+	struct test_setup_info *test_info = &info->test_info;
+
+	TEST_ASSERT(info->guest_fn, "guest_fn not present");
+	vm = vm_create_with_one_vcpu(&vcpu, info->guest_fn);
+
+	vm_check_cap(vm, KVM_CAP_EXIT_HYPERCALL);
+	cap.cap = KVM_CAP_EXIT_HYPERCALL;
+	cap.flags = 0;
+	cap.args[0] = (1 << KVM_HC_MAP_GPA_RANGE);
+	vm_ioctl(vm, KVM_ENABLE_CAP, &cap);
+
+	TEST_ASSERT(test_info->test_area_size, "Test mem size not present");
+
+	test_area_size = test_info->test_area_size;
+	test_area_gpa = test_info->test_area_gpa;
+	vm_userspace_mem_region_add(vm, info->test_mem_src, test_area_gpa,
+		test_info->test_area_slot, test_area_size / vm->page_size,
+		KVM_MEM_PRIVATE);
+	vm_allocate_private_mem(vm, test_area_gpa, test_area_size);
+
+	pr_info("Mapping test memory pages 0x%zx page_size 0x%x\n",
+		test_area_size/vm->page_size, vm->page_size);
+	virt_map(vm, test_area_gpa, test_area_gpa, test_area_size/vm->page_size);
+
+	vcpu_work(vm, vcpu, info);
+
+	kvm_vm_free(vm);
+}