@@ -174,6 +174,7 @@ TEST_GEN_PROGS_aarch64 += coalesced_io_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
+TEST_GEN_PROGS_aarch64 += guest_memfd_test
TEST_GEN_PROGS_aarch64 += guest_print_test
TEST_GEN_PROGS_aarch64 += get-reg-list
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
@@ -34,12 +34,48 @@ static void test_file_read_write(int fd)
"pwrite on a guest_mem fd should fail");
}
-static void test_mmap(int fd, size_t page_size)
+static void test_mmap_allowed(int fd, size_t total_size)
{
+ size_t page_size = getpagesize();
+ const char val = 0xaa;
+ char *mem;
+ int ret;
+ int i;
+
+ mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ TEST_ASSERT(mem != MAP_FAILED, "mmaping() guest memory should pass.");
+
+ memset(mem, val, total_size);
+ for (i = 0; i < total_size; i++)
+ TEST_ASSERT_EQ(mem[i], val);
+
+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
+ page_size);
+ TEST_ASSERT(!ret, "fallocate the first page should succeed");
+
+ for (i = 0; i < page_size; i++)
+ TEST_ASSERT_EQ(mem[i], 0x00);
+ for (; i < total_size; i++)
+ TEST_ASSERT_EQ(mem[i], val);
+
+ memset(mem, val, total_size);
+ for (i = 0; i < total_size; i++)
+ TEST_ASSERT_EQ(mem[i], val);
+
+ ret = munmap(mem, total_size);
+ TEST_ASSERT(!ret, "munmap should succeed");
+}
+
+static void test_mmap_denied(int fd, size_t total_size)
+{
+ size_t page_size = getpagesize();
char *mem;
mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
TEST_ASSERT_EQ(mem, MAP_FAILED);
+
+ mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ TEST_ASSERT_EQ(mem, MAP_FAILED);
}
static void test_file_size(int fd, size_t page_size, size_t total_size)
@@ -170,19 +206,30 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
close(fd1);
}
-int main(int argc, char *argv[])
+unsigned long get_shared_type(void)
{
- size_t page_size;
+#ifdef __x86_64__
+ return KVM_X86_SW_PROTECTED_VM;
+#endif
+#ifdef __aarch64__
+ return KVM_VM_TYPE_ARM_SW_PROTECTED;
+#endif
+ return 0;
+}
+
+void test_vm_type(unsigned long type, bool is_shared)
+{
+ struct kvm_vm *vm;
size_t total_size;
+ size_t page_size;
int fd;
- struct kvm_vm *vm;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
page_size = getpagesize();
total_size = page_size * 4;
- vm = vm_create_barebones();
+ vm = vm_create_barebones_type(type);
test_create_guest_memfd_invalid(vm);
test_create_guest_memfd_multiple(vm);
@@ -190,10 +237,26 @@ int main(int argc, char *argv[])
fd = vm_create_guest_memfd(vm, total_size, 0);
test_file_read_write(fd);
- test_mmap(fd, page_size);
+
+ if (is_shared)
+ test_mmap_allowed(fd, total_size);
+ else
+ test_mmap_denied(fd, total_size);
+
test_file_size(fd, page_size, total_size);
test_fallocate(fd, page_size, total_size);
test_invalid_punch_hole(fd, page_size, total_size);
close(fd);
+ kvm_vm_release(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ test_vm_type(VM_TYPE_DEFAULT, false);
+
+ if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM))
+ test_vm_type(get_shared_type(), true);
+
+ return 0;
}
@@ -347,9 +347,8 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
}
#ifdef __aarch64__
- TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
if (vm->pa_bits != 40)
- vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
+ vm->type |= KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
#endif
vm_open(vm);
Expand the guest_memfd selftests to include testing mapping guest memory for VM types that support it. Also, build the guest_memfd selftest for aarch64. Signed-off-by: Fuad Tabba <tabba@google.com> --- tools/testing/selftests/kvm/Makefile | 1 + .../testing/selftests/kvm/guest_memfd_test.c | 75 +++++++++++++++++-- tools/testing/selftests/kvm/lib/kvm_util.c | 3 +- 3 files changed, 71 insertions(+), 8 deletions(-)