Message ID | 20210902181751.252227-4-pgonda@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD SEV and SEV-ES intra host migration support | expand |
On Thu, Sep 02, 2021, Peter Gonda wrote: > +/* > + * Open SEV_DEV_PATH if available, otherwise exit the entire program. > + * > + * Input Args: > + * flags - The flags to pass when opening SEV_DEV_PATH. > + * > + * Return: > + * The opened file descriptor of /dev/sev. > + */ > +static int open_sev_dev_path_or_exit(int flags) > +{ > + static int fd; > + > + if (fd != 0) > + return fd; Caching the file here is unnecessary, it's used in exactly one function. > + fd = open(SEV_DEV_PATH, flags); > + if (fd < 0) { > + print_skip("%s not available, is SEV not enabled? (errno: %d)", > + SEV_DEV_PATH, errno); > + exit(KSFT_SKIP); > + } > + > + return fd; > +} Rather than copy-paste _open_kvm_dev_path_or_exit(), it's probably worth factoring out a helper in a separate patch, e.g. diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 10a8ed691c66..06a6c04010fb 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -31,6 +31,19 @@ static void *align(void *x, size_t size) return (void *) (((size_t) x + mask) & ~mask); } +int open_path_or_exit(const char *path, int flags) +{ + int fd; + + fd = open(path, flags); + if (fd < 0) { + print_skip("%s not available (errno: %d)", path, errno); + exit(KSFT_SKIP); + } + + return fd; +} + /* * Open KVM_DEV_PATH if available, otherwise exit the entire program. * @@ -42,16 +55,7 @@ static void *align(void *x, size_t size) */ static int _open_kvm_dev_path_or_exit(int flags) { - int fd; - - fd = open(KVM_DEV_PATH, flags); - if (fd < 0) { - print_skip("%s not available, is KVM loaded? (errno: %d)", - KVM_DEV_PATH, errno); - exit(KSFT_SKIP); - } - - return fd; + return open_path_or_exit(KVM_DEV_PATH, flags); } int open_kvm_dev_path_or_exit(void) > + > +static void sev_ioctl(int vm_fd, int cmd_id, void *data) > +{ > + struct kvm_sev_cmd cmd = { > + .id = cmd_id, > + .data = (uint64_t)data, > + .sev_fd = open_sev_dev_path_or_exit(0), > + }; > + int ret; > + > + TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX && cmd_id >= 0, > + "Unknown SEV CMD : %d\n", cmd_id); LOL, I like sanity checks, but asserting that the test itself isn't horrendously broken is a bit much. And someone manages to screw up that badly, the ioctl() below will fail. > + ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd); > + TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS), > + "%d failed: return code: %d, errno: %d, fw error: %d", > + cmd_id, ret, errno, cmd.error); > +} > + > +static struct kvm_vm *sev_vm_create(bool es) > +{ > + struct kvm_vm *vm; > + struct kvm_sev_launch_start start = { 0 }; > + int i; Rather than cache /dev/sev in a helper, you can do: int sev_fd = open_path_or_exit(SEV_DEV_PATH, 0); sev_ioctl(vm, sev_fd, ...); > + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); > + sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL); > + for (i = 0; i < MIGRATE_TEST_NUM_VCPUS; ++i) > + vm_vcpu_add(vm, i); > + start.policy |= (es) << 2; I had to go spelunking to confirm this is the "ES" policy, please do: if (es) start.policy |= SEV_POLICY_ES; > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start); > + if (es) > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); And with sev_fd scoped to this function: close(sev_fd); which I think is legal? > + return vm; > +} > + > +static void test_sev_migrate_from(bool es) > +{ > + struct kvm_vm *vms[MIGRATE_TEST_VMS]; Prefix this and LOCK_TESTING_THREAD with NR_ so that it's clear these are arbitrary numbers of things. And I guess s/MIGRATE_TEST_NUM_VCPUS/NR_MIGRATE_TEST_VCPUS to be consistent. > + struct kvm_enable_cap cap = { > + .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM > + }; > + int i; > + > + for (i = 0; i < MIGRATE_TEST_VMS; ++i) { > + vms[i] = sev_vm_create(es); It doesn't really matter, but closing these fds tests that KVM doesn't explode when VMs are destroyed without the process exiting. > + if (i > 0) { > + cap.args[0] = vms[i - 1]->fd; > + vm_enable_cap(vms[i], &cap); > + } > + } For giggles, we can also test migrating back (with some feedback from below mixed in): /* Initial migration from the src to the first dst. */ sev_migrate_from(dst_vms[0]->fd, src_vm->fd); for (i = 1; i < NR_MIGRATE_TEST_VMS; i++) sev_migrate_from(vms[i]->fd, vms[i - 1]->fd); /* Migrate the guest back to the original VM. */ sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); > +} > + > +struct locking_thread_input { > + struct kvm_vm *vm; > + int source_fds[LOCK_TESTING_THREADS]; > +}; > + > +static void *locking_test_thread(void *arg) > +{ > + /* > + * This test case runs a number of threads all trying to use the intra > + * host migration ioctls. This tries to detect if a deadlock exists. > + */ > + struct kvm_enable_cap cap = { > + .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM > + }; > + int i, j; > + struct locking_thread_input *input = (struct locking_test_thread *)arg; > + > + for (i = 0; i < LOCK_TESTING_ITERATIONS; ++i) { > + j = input->source_fds[i % LOCK_TESTING_THREADS]; > + cap.args[0] = input->source_fds[j]; This looks wrong, it's indexing source_fds with a value from source_fds. Did you intend? j = i % LOCK_TESTING_THREADS; cap.args[0] = input->source_fds[j]; > + /* > + * Call IOCTL directly without checking return code or > + * asserting. We are * simply trying to confirm there is no > + * deadlock from userspace * not check correctness of > + * migration here. > + */ > + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap); For readability and future extensibility, I'd say create a single helper and use it even in the happy case, e.g. static int __sev_migrate_from(int dst_fd, int src_fd) { struct kvm_enable_cap cap = { .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM, .args = { src_fd } // No idea if this is correct syntax }; return ioctl(dst_fd, KVM_ENABLE_CAP, &cap); } static void sev_migrate_from(...) { ret = __sev_migrate_from(...); TEST_ASSERT(!ret, "Migration failed, blah blah blah"); } > + } > +} > + > +static void test_sev_migrate_locking(void) > +{ > + struct locking_thread_input input[LOCK_TESTING_THREADS]; > + pthread_t pt[LOCK_TESTING_THREADS]; > + int i; > + > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) { With a bit of refactoring, the same VMs from the happy case can be reused for the locking test, and we can also get concurrent SEV+SEV-ES migration (see below). > + input[i].vm = sev_vm_create(/* es= */ false); > + input[0].source_fds[i] = input[i].vm->fd; > + } > + for (i = 1; i < LOCK_TESTING_THREADS; ++i) > + memcpy(input[i].source_fds, input[0].source_fds, > + sizeof(input[i].source_fds)); > + > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) > + pthread_create(&pt[i], NULL, locking_test_thread, &input[i]); > + > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) > + pthread_join(pt[i], NULL); > +} > + > +int main(int argc, char *argv[]) > +{ > + test_sev_migrate_from(/* es= */ false); > + test_sev_migrate_from(/* es= */ true); > + test_sev_migrate_locking(); With a little refactoring, this can add other tests, e.g. illegal dst. Assuming KVM requires the dst to be !SEV, SEV and SEV-ES can use the same set of destination VMs. And the locking test can take 'em all. E.g. something like: struct kvm_vm *sev_vm, *sev_es_vm; sev_vm = sev_vm_create(false); sev_es_vm = sev_vm_create(true); for (i = 0; i < NR_MIGRATE_TEST_VMS; i++) dst_vms[i] = sev_dst_vm_create(); test_sev_migrate_from(sev_vms, dst_vms); test_sev_migrate_from(sev_es_vms, dst_vms); ret = __sev_migrate_from(sev_es_vms[0], sev_vms[0]); TEST_ASSERT(ret == -EINVAL, ...); ret = __sev_migrate_from(sev_vms[0], sev_es_vms[0]); TEST_ASSERT(ret == -EINVAL, ...); ret = __sev_migrate_from(dst_vms[0], dst_vms[1]); TEST_ASSERT(ret == -EINVAL, ....); test_sev_migrate_locking(sev_vm, sev_es_vm, dst_vms); > + return 0; > +} > -- > 2.33.0.153.gba50c8fa24-goog >
On Fri, Sep 10, 2021 at 11:16 AM Sean Christopherson <seanjc@google.com> wrote: > > On Thu, Sep 02, 2021, Peter Gonda wrote: > > +/* > > + * Open SEV_DEV_PATH if available, otherwise exit the entire program. > > + * > > + * Input Args: > > + * flags - The flags to pass when opening SEV_DEV_PATH. > > + * > > + * Return: > > + * The opened file descriptor of /dev/sev. > > + */ > > +static int open_sev_dev_path_or_exit(int flags) > > +{ > > + static int fd; > > + > > + if (fd != 0) > > + return fd; > > Caching the file here is unnecessary, it's used in exactly one function. > > > + fd = open(SEV_DEV_PATH, flags); > > + if (fd < 0) { > > + print_skip("%s not available, is SEV not enabled? (errno: %d)", > > + SEV_DEV_PATH, errno); > > + exit(KSFT_SKIP); > > + } > > + > > + return fd; > > +} > > Rather than copy-paste _open_kvm_dev_path_or_exit(), it's probably worth factoring > out a helper in a separate patch, e.g. So the suggestion would be to move open_sev_dev_path_or_exit into tools/testing/selftests/kvm/include/x86_64/svm_util.h If so, wouldn't it make sense to keep the caching of the FD? > > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c > index 10a8ed691c66..06a6c04010fb 100644 > --- a/tools/testing/selftests/kvm/lib/kvm_util.c > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c > @@ -31,6 +31,19 @@ static void *align(void *x, size_t size) > return (void *) (((size_t) x + mask) & ~mask); > } > > +int open_path_or_exit(const char *path, int flags) > +{ > + int fd; > + > + fd = open(path, flags); > + if (fd < 0) { > + print_skip("%s not available (errno: %d)", path, errno); > + exit(KSFT_SKIP); > + } > + > + return fd; > +} > + > /* > * Open KVM_DEV_PATH if available, otherwise exit the entire program. > * > @@ -42,16 +55,7 @@ static void *align(void *x, size_t size) > */ > static int _open_kvm_dev_path_or_exit(int flags) > { > - int fd; > - > - fd = open(KVM_DEV_PATH, flags); > - if (fd < 0) { > - print_skip("%s not available, is KVM loaded? (errno: %d)", > - KVM_DEV_PATH, errno); > - exit(KSFT_SKIP); > - } > - > - return fd; > + return open_path_or_exit(KVM_DEV_PATH, flags); > } > > int open_kvm_dev_path_or_exit(void) > > > > + > > +static void sev_ioctl(int vm_fd, int cmd_id, void *data) > > +{ > > + struct kvm_sev_cmd cmd = { > > + .id = cmd_id, > > + .data = (uint64_t)data, > > + .sev_fd = open_sev_dev_path_or_exit(0), > > + }; > > + int ret; > > + > > + TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX && cmd_id >= 0, > > + "Unknown SEV CMD : %d\n", cmd_id); > > LOL, I like sanity checks, but asserting that the test itself isn't horrendously > broken is a bit much. And someone manages to screw up that badly, the ioctl() > below will fail. Ack. I'll remove this. > > > + ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd); > > + TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS), > > + "%d failed: return code: %d, errno: %d, fw error: %d", > > + cmd_id, ret, errno, cmd.error); > > +} > > + > > +static struct kvm_vm *sev_vm_create(bool es) > > +{ > > + struct kvm_vm *vm; > > + struct kvm_sev_launch_start start = { 0 }; > > + int i; > > Rather than cache /dev/sev in a helper, you can do: > > int sev_fd = open_path_or_exit(SEV_DEV_PATH, 0); > > sev_ioctl(vm, sev_fd, ...); > > > + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); > > + sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL); > > + for (i = 0; i < MIGRATE_TEST_NUM_VCPUS; ++i) > > + vm_vcpu_add(vm, i); > > + start.policy |= (es) << 2; > > I had to go spelunking to confirm this is the "ES" policy, please do: > > if (es) > start.policy |= SEV_POLICY_ES; > > > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start); > > + if (es) > > + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); > > > And with sev_fd scoped to this function: > > close(sev_fd); > > which I think is legal? > > > + return vm; > > +} > > + > > +static void test_sev_migrate_from(bool es) > > +{ > > + struct kvm_vm *vms[MIGRATE_TEST_VMS]; > > Prefix this and LOCK_TESTING_THREAD with NR_ so that it's clear these are arbitrary > numbers of things. And I guess s/MIGRATE_TEST_NUM_VCPUS/NR_MIGRATE_TEST_VCPUS to > be consistent. > > > + struct kvm_enable_cap cap = { > > + .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM > > + }; > > + int i; > > + > > + for (i = 0; i < MIGRATE_TEST_VMS; ++i) { > > + vms[i] = sev_vm_create(es); > > It doesn't really matter, but closing these fds tests that KVM doesn't explode > when VMs are destroyed without the process exiting. > Can do, I spot checked a couple other tests and didn't see any close calls so didn't clutter the test here. > > + if (i > 0) { > > + cap.args[0] = vms[i - 1]->fd; > > + vm_enable_cap(vms[i], &cap); > > + } > > + } > > For giggles, we can also test migrating back (with some feedback from below > mixed in): > > /* Initial migration from the src to the first dst. */ > sev_migrate_from(dst_vms[0]->fd, src_vm->fd); > > for (i = 1; i < NR_MIGRATE_TEST_VMS; i++) > sev_migrate_from(vms[i]->fd, vms[i - 1]->fd); > > /* Migrate the guest back to the original VM. */ > sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); > > > +} > > + > > +struct locking_thread_input { > > + struct kvm_vm *vm; > > + int source_fds[LOCK_TESTING_THREADS]; > > +}; > > + > > +static void *locking_test_thread(void *arg) > > +{ > > + /* > > + * This test case runs a number of threads all trying to use the intra > > + * host migration ioctls. This tries to detect if a deadlock exists. > > + */ > > + struct kvm_enable_cap cap = { > > + .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM > > + }; > > + int i, j; > > + struct locking_thread_input *input = (struct locking_test_thread *)arg; > > + > > + for (i = 0; i < LOCK_TESTING_ITERATIONS; ++i) { > > + j = input->source_fds[i % LOCK_TESTING_THREADS]; > > + cap.args[0] = input->source_fds[j]; > > This looks wrong, it's indexing source_fds with a value from source_fds. Did > you intend? > > j = i % LOCK_TESTING_THREADS; > cap.args[0] = input->source_fds[j]; > Yup that's wrong I'll update. > > + /* > > + * Call IOCTL directly without checking return code or > > + * asserting. We are * simply trying to confirm there is no > > + * deadlock from userspace * not check correctness of > > + * migration here. > > + */ > > + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap); > > For readability and future extensibility, I'd say create a single helper and use > it even in the happy case, e.g. > > static int __sev_migrate_from(int dst_fd, int src_fd) > { > struct kvm_enable_cap cap = { > .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM, > .args = { src_fd } // No idea if this is correct syntax > }; > > return ioctl(dst_fd, KVM_ENABLE_CAP, &cap); > } > > > static void sev_migrate_from(...) > { > ret = __sev_migrate_from(...); > TEST_ASSERT(!ret, "Migration failed, blah blah blah"); > } > > > + } > > +} > > + > > +static void test_sev_migrate_locking(void) > > +{ > > + struct locking_thread_input input[LOCK_TESTING_THREADS]; > > + pthread_t pt[LOCK_TESTING_THREADS]; > > + int i; > > + > > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) { > > With a bit of refactoring, the same VMs from the happy case can be reused for > the locking test, and we can also get concurrent SEV+SEV-ES migration (see below). > > > + input[i].vm = sev_vm_create(/* es= */ false); > > + input[0].source_fds[i] = input[i].vm->fd; > > + } > > + for (i = 1; i < LOCK_TESTING_THREADS; ++i) > > + memcpy(input[i].source_fds, input[0].source_fds, > > + sizeof(input[i].source_fds)); > > + > > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) > > + pthread_create(&pt[i], NULL, locking_test_thread, &input[i]); > > + > > + for (i = 0; i < LOCK_TESTING_THREADS; ++i) > > + pthread_join(pt[i], NULL); > > +} > > + > > +int main(int argc, char *argv[]) > > +{ > > + test_sev_migrate_from(/* es= */ false); > > + test_sev_migrate_from(/* es= */ true); > > + test_sev_migrate_locking(); > > > With a little refactoring, this can add other tests, e.g. illegal dst. Assuming > KVM requires the dst to be !SEV, SEV and SEV-ES can use the same set of destination > VMs. And the locking test can take 'em all. E.g. something like: > > struct kvm_vm *sev_vm, *sev_es_vm; > > sev_vm = sev_vm_create(false); > sev_es_vm = sev_vm_create(true); > > for (i = 0; i < NR_MIGRATE_TEST_VMS; i++) > dst_vms[i] = sev_dst_vm_create(); > > test_sev_migrate_from(sev_vms, dst_vms); > test_sev_migrate_from(sev_es_vms, dst_vms); > > ret = __sev_migrate_from(sev_es_vms[0], sev_vms[0]); > TEST_ASSERT(ret == -EINVAL, ...); > > ret = __sev_migrate_from(sev_vms[0], sev_es_vms[0]); > TEST_ASSERT(ret == -EINVAL, ...); > > ret = __sev_migrate_from(dst_vms[0], dst_vms[1]); > TEST_ASSERT(ret == -EINVAL, ....); > > test_sev_migrate_locking(sev_vm, sev_es_vm, dst_vms); > Ack. I'll add these parameter validation tests. > > + return 0; > > +} > > -- > > 2.33.0.153.gba50c8fa24-goog > >
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c103873531e0..44fd3566fb51 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -72,6 +72,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test +TEST_GEN_PROGS_x86_64 += x86_64/sev_vm_tests TEST_GEN_PROGS_x86_64 += access_tracking_perf_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c new file mode 100644 index 000000000000..8ce8dd63ca85 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/sev_vm_tests.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/kvm.h> +#include <linux/psp-sev.h> +#include <stdio.h> +#include <sys/ioctl.h> +#include <stdlib.h> +#include <errno.h> +#include <pthread.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "svm_util.h" +#include "kselftest.h" +#include "../lib/kvm_util_internal.h" + +#define SEV_DEV_PATH "/dev/sev" + +#define MIGRATE_TEST_NUM_VCPUS 4 +#define MIGRATE_TEST_VMS 3 +#define LOCK_TESTING_THREADS 3 +#define LOCK_TESTING_ITERATIONS 10000 + +/* + * Open SEV_DEV_PATH if available, otherwise exit the entire program. + * + * Input Args: + * flags - The flags to pass when opening SEV_DEV_PATH. + * + * Return: + * The opened file descriptor of /dev/sev. + */ +static int open_sev_dev_path_or_exit(int flags) +{ + static int fd; + + if (fd != 0) + return fd; + + fd = open(SEV_DEV_PATH, flags); + if (fd < 0) { + print_skip("%s not available, is SEV not enabled? (errno: %d)", + SEV_DEV_PATH, errno); + exit(KSFT_SKIP); + } + + return fd; +} + +static void sev_ioctl(int vm_fd, int cmd_id, void *data) +{ + struct kvm_sev_cmd cmd = { + .id = cmd_id, + .data = (uint64_t)data, + .sev_fd = open_sev_dev_path_or_exit(0), + }; + int ret; + + TEST_ASSERT(cmd_id < KVM_SEV_NR_MAX && cmd_id >= 0, + "Unknown SEV CMD : %d\n", cmd_id); + + ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd); + TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS), + "%d failed: return code: %d, errno: %d, fw error: %d", + cmd_id, ret, errno, cmd.error); +} + +static struct kvm_vm *sev_vm_create(bool es) +{ + struct kvm_vm *vm; + struct kvm_sev_launch_start start = { 0 }; + int i; + + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL); + for (i = 0; i < MIGRATE_TEST_NUM_VCPUS; ++i) + vm_vcpu_add(vm, i); + start.policy |= (es) << 2; + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start); + if (es) + sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + return vm; +} + +static void test_sev_migrate_from(bool es) +{ + struct kvm_vm *vms[MIGRATE_TEST_VMS]; + struct kvm_enable_cap cap = { + .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM + }; + int i; + + for (i = 0; i < MIGRATE_TEST_VMS; ++i) { + vms[i] = sev_vm_create(es); + if (i > 0) { + cap.args[0] = vms[i - 1]->fd; + vm_enable_cap(vms[i], &cap); + } + } +} + +struct locking_thread_input { + struct kvm_vm *vm; + int source_fds[LOCK_TESTING_THREADS]; +}; + +static void *locking_test_thread(void *arg) +{ + /* + * This test case runs a number of threads all trying to use the intra + * host migration ioctls. This tries to detect if a deadlock exists. + */ + struct kvm_enable_cap cap = { + .cap = KVM_CAP_VM_MIGRATE_ENC_CONTEXT_FROM + }; + int i, j; + struct locking_thread_input *input = (struct locking_test_thread *)arg; + + for (i = 0; i < LOCK_TESTING_ITERATIONS; ++i) { + j = input->source_fds[i % LOCK_TESTING_THREADS]; + cap.args[0] = input->source_fds[j]; + /* + * Call IOCTL directly without checking return code or + * asserting. We are * simply trying to confirm there is no + * deadlock from userspace * not check correctness of + * migration here. + */ + ioctl(input->vm->fd, KVM_ENABLE_CAP, &cap); + } +} + +static void test_sev_migrate_locking(void) +{ + struct locking_thread_input input[LOCK_TESTING_THREADS]; + pthread_t pt[LOCK_TESTING_THREADS]; + int i; + + for (i = 0; i < LOCK_TESTING_THREADS; ++i) { + input[i].vm = sev_vm_create(/* es= */ false); + input[0].source_fds[i] = input[i].vm->fd; + } + for (i = 1; i < LOCK_TESTING_THREADS; ++i) + memcpy(input[i].source_fds, input[0].source_fds, + sizeof(input[i].source_fds)); + + for (i = 0; i < LOCK_TESTING_THREADS; ++i) + pthread_create(&pt[i], NULL, locking_test_thread, &input[i]); + + for (i = 0; i < LOCK_TESTING_THREADS; ++i) + pthread_join(pt[i], NULL); +} + +int main(int argc, char *argv[]) +{ + test_sev_migrate_from(/* es= */ false); + test_sev_migrate_from(/* es= */ true); + test_sev_migrate_locking(); + return 0; +}
Adds testcases for intra host migration for SEV and SEV-ES. Also adds locking test to confirm no deadlock exists. Signed-off-by: Peter Gonda <pgonda@google.com> Suggested-by: Sean Christopherson <seanjc@google.com> Cc: Marc Orr <marcorr@google.com> Cc: Sean Christopherson <seanjc@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/x86_64/sev_vm_tests.c | 159 ++++++++++++++++++ 2 files changed, 160 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/sev_vm_tests.c