diff mbox series

[V1,2/6] KVM: Selftests: Add support for private memory

Message ID 20221111014244.1714148-3-vannapurve@google.com (mailing list archive)
State New
Headers show
Series selftests: KVM: selftests for fd-based private memory | expand

Commit Message

Vishal Annapurve Nov. 11, 2022, 1:42 a.m. UTC
Add support for registering private memory with kvm using
KVM_SET_USER_MEMORY_REGION ioctl.

Helper function to query extended userspace mem region is introduced to
allow memory conversion.

vm_mem_backing_src types is extended to contain additional guest memory
source types to cover the cases where guest memory can be backed by both
anonymous memory and restricted memfd.

Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 .../selftests/kvm/include/kvm_util_base.h     | 12 +++-
 .../testing/selftests/kvm/include/test_util.h |  4 ++
 tools/testing/selftests/kvm/lib/kvm_util.c    | 58 +++++++++++++++++--
 tools/testing/selftests/kvm/lib/test_util.c   | 12 ++++
 4 files changed, 79 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index e42a09cd24a0..5e30f5b461bf 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -30,7 +30,10 @@  typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
 
 struct userspace_mem_region {
-	struct kvm_userspace_memory_region region;
+	union {
+		struct kvm_userspace_memory_region region;
+		struct kvm_userspace_memory_region_ext region_ext;
+	};
 	struct sparsebit *unused_phy_pages;
 	int fd;
 	off_t offset;
@@ -194,7 +197,7 @@  static inline bool kvm_has_cap(long cap)
 
 #define kvm_do_ioctl(fd, cmd, arg)						\
 ({										\
-	static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd), "");	\
+	static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) >= _IOC_SIZE(cmd), "");	\
 	ioctl(fd, cmd, arg);							\
 })
 
@@ -382,6 +385,7 @@  void vm_userspace_mem_region_add(struct kvm_vm *vm,
 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
+
 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
@@ -708,6 +712,10 @@  struct kvm_userspace_memory_region *
 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
 				 uint64_t end);
 
+struct kvm_userspace_memory_region_ext *
+kvm_userspace_memory_region_ext_find(struct kvm_vm *vm, uint64_t start,
+				 uint64_t end);
+
 #define sync_global_to_guest(vm, g) ({				\
 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
 	memcpy(_p, &(g), sizeof(g));				\
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index befc754ce9b3..140a61f68fe5 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -96,6 +96,8 @@  enum vm_mem_backing_src_type {
 	VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB,
 	VM_MEM_SRC_SHMEM,
 	VM_MEM_SRC_SHARED_HUGETLB,
+	VM_MEM_SRC_ANONYMOUS_AND_RESTRICTED_MEMFD,
+	VM_MEM_SRC_ANON_HTLB2M_AND_RESTRICTED_MEMFD,
 	NUM_SRC_TYPES,
 };
 
@@ -103,7 +105,9 @@  enum vm_mem_backing_src_type {
 
 struct vm_mem_backing_src_alias {
 	const char *name;
+	/* Flags applicable for normal host accessible guest memory */
 	uint32_t flag;
+	uint32_t need_restricted_memfd;
 };
 
 #define MIN_RUN_DELAY_NS	200000UL
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index f1cb1627161f..5990250ec40b 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -31,6 +31,11 @@  int open_path_or_exit(const char *path, int flags)
 	return fd;
 }
 
+static int memfd_restricted(unsigned int flags)
+{
+	return syscall(__NR_memfd_restricted, flags);
+}
+
 /*
  * Open KVM_DEV_PATH if available, otherwise exit the entire program.
  *
@@ -519,6 +524,35 @@  __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
 
 }
 
+/*
+ * KVM Userspace Memory Region Ext Find
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   start - Starting VM physical address
+ *   end - Ending VM physical address, inclusive.
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Pointer to overlapping ext region, NULL if no such region.
+ *
+ * Public interface to userspace_mem_region_find. Allows tests to look up
+ * the memslot datastructure for a given range of guest physical memory.
+ */
+struct kvm_userspace_memory_region_ext *
+kvm_userspace_memory_region_ext_find(struct kvm_vm *vm, uint64_t start,
+				 uint64_t end)
+{
+	struct userspace_mem_region *region;
+
+	region = userspace_mem_region_find(vm, start, end);
+	if (!region)
+		return NULL;
+
+	return &region->region_ext;
+}
+
 /*
  * VM VCPU Remove
  *
@@ -818,6 +852,7 @@  void vm_userspace_mem_region_add(struct kvm_vm *vm,
 	struct userspace_mem_region *region;
 	size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
 	size_t alignment;
+	int restricted_memfd = -1;
 
 	TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
 		"Number of guest pages is not compatible with the host. "
@@ -915,14 +950,24 @@  void vm_userspace_mem_region_add(struct kvm_vm *vm,
 
 	/* As needed perform madvise */
 	if ((src_type == VM_MEM_SRC_ANONYMOUS ||
-	     src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
+		src_type == VM_MEM_SRC_ANONYMOUS_THP ||
+		src_type == VM_MEM_SRC_ANONYMOUS_AND_RESTRICTED_MEMFD) &&
+		thp_configured()) {
 		ret = madvise(region->host_mem, npages * vm->page_size,
-			      src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
+			(src_type == VM_MEM_SRC_ANONYMOUS_THP) ?
+				MADV_HUGEPAGE : MADV_NOHUGEPAGE);
 		TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
 			    region->host_mem, npages * vm->page_size,
 			    vm_mem_backing_src_alias(src_type)->name);
 	}
 
+	if (vm_mem_backing_src_alias(src_type)->need_restricted_memfd) {
+		restricted_memfd = memfd_restricted(0);
+		TEST_ASSERT(restricted_memfd != -1,
+			"Failed to create restricted memfd");
+		flags |= KVM_MEM_PRIVATE;
+	}
+
 	region->unused_phy_pages = sparsebit_alloc();
 	sparsebit_set_num(region->unused_phy_pages,
 		guest_paddr >> vm->page_shift, npages);
@@ -931,13 +976,16 @@  void vm_userspace_mem_region_add(struct kvm_vm *vm,
 	region->region.guest_phys_addr = guest_paddr;
 	region->region.memory_size = npages * vm->page_size;
 	region->region.userspace_addr = (uintptr_t) region->host_mem;
-	ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
+	region->region_ext.restricted_fd = restricted_memfd;
+	region->region_ext.restricted_offset = 0;
+	ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region_ext);
 	TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
 		"  rc: %i errno: %i\n"
 		"  slot: %u flags: 0x%x\n"
-		"  guest_phys_addr: 0x%lx size: 0x%lx",
+		"  guest_phys_addr: 0x%lx size: 0x%lx restricted fd: %d\n",
 		ret, errno, slot, flags,
-		guest_paddr, (uint64_t) region->region.memory_size);
+		guest_paddr, (uint64_t) region->region.memory_size,
+		restricted_memfd);
 
 	/* Add to quick lookup data structures */
 	vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 6d23878bbfe1..ebbac8246016 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -254,6 +254,16 @@  const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
 			 */
 			.flag = MAP_SHARED,
 		},
+		[VM_MEM_SRC_ANONYMOUS_AND_RESTRICTED_MEMFD] = {
+			.name = "anonymous_and_restricted_memfd",
+			.flag = ANON_FLAGS,
+			.need_restricted_memfd = 1,
+		},
+		[VM_MEM_SRC_ANON_HTLB2M_AND_RESTRICTED_MEMFD] = {
+			.name = "anonymous_hugetlb_2mb_and_restricted_memfd",
+			.flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
+			.need_restricted_memfd = 1,
+		},
 	};
 	_Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
 		       "Missing new backing src types?");
@@ -272,11 +282,13 @@  size_t get_backing_src_pagesz(uint32_t i)
 	switch (i) {
 	case VM_MEM_SRC_ANONYMOUS:
 	case VM_MEM_SRC_SHMEM:
+	case VM_MEM_SRC_ANONYMOUS_AND_RESTRICTED_MEMFD:
 		return getpagesize();
 	case VM_MEM_SRC_ANONYMOUS_THP:
 		return get_trans_hugepagesz();
 	case VM_MEM_SRC_ANONYMOUS_HUGETLB:
 	case VM_MEM_SRC_SHARED_HUGETLB:
+	case VM_MEM_SRC_ANON_HTLB2M_AND_RESTRICTED_MEMFD:
 		return get_def_hugetlb_pagesz();
 	default:
 		return MAP_HUGE_PAGE_SIZE(flag);