diff mbox series

[RFC,V2,3/8] selftests: kvm: priv_memfd_test: Add support for memory conversion

Message ID 20220511000811.384766-4-vannapurve@google.com (mailing list archive)
State New
Headers show
Series selftests: KVM: selftests for fd-based approach of supporting private memory | expand

Commit Message

Vishal Annapurve May 11, 2022, 12:08 a.m. UTC
Add handling of explicit private/shared memory conversion using
KVM_HC_MAP_GPA_RANGE and implicit memory conversion by handling
KVM_EXIT_MEMORY_ERROR.

Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 tools/testing/selftests/kvm/priv_memfd_test.c | 87 +++++++++++++++++++
 1 file changed, 87 insertions(+)

Comments

Shuah Khan May 12, 2022, 5:40 p.m. UTC | #1
On 5/10/22 6:08 PM, Vishal Annapurve wrote:
> Add handling of explicit private/shared memory conversion using
> KVM_HC_MAP_GPA_RANGE and implicit memory conversion by handling
> KVM_EXIT_MEMORY_ERROR.
> 
> Signed-off-by: Vishal Annapurve <vannapurve@google.com>
> ---
>   tools/testing/selftests/kvm/priv_memfd_test.c | 87 +++++++++++++++++++
>   1 file changed, 87 insertions(+)
> 
> diff --git a/tools/testing/selftests/kvm/priv_memfd_test.c b/tools/testing/selftests/kvm/priv_memfd_test.c
> index bbb58c62e186..55e24c893b07 100644
> --- a/tools/testing/selftests/kvm/priv_memfd_test.c
> +++ b/tools/testing/selftests/kvm/priv_memfd_test.c
> @@ -155,6 +155,83 @@ static struct test_run_helper priv_memfd_testsuite[] = {
>   	},
>   };
>   
> +static void handle_vm_exit_hypercall(struct kvm_run *run,
> +	uint32_t test_id)
> +{
> +	uint64_t gpa, npages, attrs;
> +	int priv_memfd =
> +		priv_memfd_testsuite[test_id].priv_memfd;

Do you need this on a separate line? Doesn't looks like it will exceed
the limit with the tab?

> +	int ret;
> +	int fallocate_mode;
> +
> +	if (run->hypercall.nr != KVM_HC_MAP_GPA_RANGE) {
> +		TEST_FAIL("Unhandled Hypercall %lld\n",
> +					run->hypercall.nr);

Is this considered test fail or skip because of unmet dependency?
Also do you need run->hypercall.nr os a separate line?

> +	}
> +
> +	gpa = run->hypercall.args[0];
> +	npages = run->hypercall.args[1];
> +	attrs = run->hypercall.args[2];
> +
> +	if ((gpa < TEST_MEM_GPA) || ((gpa +
> +		(npages << MIN_PAGE_SHIFT)) > TEST_MEM_END)) {
> +		TEST_FAIL("Unhandled gpa 0x%lx npages %ld\n",
> +			gpa, npages);

Same question here about gpa, npages on a separate line? Also
align it with the previous line for readability.

TEST_FAIL("Unhandled gpa 0x%lx npages %ld\n",
	  gpa, npages);
  
> +	}
> +
> +	if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
> +		fallocate_mode = 0;
> +	else {
> +		fallocate_mode = (FALLOC_FL_PUNCH_HOLE |
> +			FALLOC_FL_KEEP_SIZE);
> +	}
> +	pr_info("Converting off 0x%lx pages 0x%lx to %s\n",
> +		(gpa - TEST_MEM_GPA), npages,
> +		fallocate_mode ?
> +			"shared" : "private");
> +	ret = fallocate(priv_memfd, fallocate_mode,
> +		(gpa - TEST_MEM_GPA),
> +		npages << MIN_PAGE_SHIFT);
> +	TEST_ASSERT(ret != -1,
> +		"fallocate failed in hc handling");
> +	run->hypercall.ret = 0;
> +}
> +
> +static void handle_vm_exit_memory_error(struct kvm_run *run,
> +	uint32_t test_id)
> +{
> +	uint64_t gpa, size, flags;
> +	int ret;
> +	int priv_memfd =
> +		priv_memfd_testsuite[test_id].priv_memfd;
> +	int fallocate_mode;
> +
> +	gpa = run->memory.gpa;
> +	size = run->memory.size;
> +	flags = run->memory.flags;
> +
> +	if ((gpa < TEST_MEM_GPA) || ((gpa + size)
> +					> TEST_MEM_END)) {
> +		TEST_FAIL("Unhandled gpa 0x%lx size 0x%lx\n",
> +			gpa, size);
> +	}
> +
> +	if (flags & KVM_MEMORY_EXIT_FLAG_PRIVATE)
> +		fallocate_mode = 0;
> +	else {
> +		fallocate_mode = (FALLOC_FL_PUNCH_HOLE |
> +				FALLOC_FL_KEEP_SIZE);
> +	}
> +	pr_info("Converting off 0x%lx size 0x%lx to %s\n",
> +		(gpa - TEST_MEM_GPA), size,
> +		fallocate_mode ?
> +			"shared" : "private");
> +	ret = fallocate(priv_memfd, fallocate_mode,
> +		(gpa - TEST_MEM_GPA), size);
> +	TEST_ASSERT(ret != -1,
> +		"fallocate failed in memory error handling");
> +}
> +
>   static void vcpu_work(struct kvm_vm *vm, uint32_t test_id)
>   {
>   	struct kvm_run *run;
> @@ -181,6 +258,16 @@ static void vcpu_work(struct kvm_vm *vm, uint32_t test_id)
>   			continue;
>   		}
>   
> +		if (run->exit_reason == KVM_EXIT_HYPERCALL) {
> +			handle_vm_exit_hypercall(run, test_id);
> +			continue;
> +		}
> +
> +		if (run->exit_reason == KVM_EXIT_MEMORY_ERROR) {
> +			handle_vm_exit_memory_error(run, test_id);
> +			continue;
> +		}
> +
>   		TEST_FAIL("Unhandled VCPU exit reason %d\n", run->exit_reason);
>   		break;
>   	}
> 

Looks like you can easily combine lines without running into # chars limit
for several lines of code in this patch. If you haven't already, run
checkpatch to make sure coding guidelines are honored.

thanks,
-- Shuah
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/priv_memfd_test.c b/tools/testing/selftests/kvm/priv_memfd_test.c
index bbb58c62e186..55e24c893b07 100644
--- a/tools/testing/selftests/kvm/priv_memfd_test.c
+++ b/tools/testing/selftests/kvm/priv_memfd_test.c
@@ -155,6 +155,83 @@  static struct test_run_helper priv_memfd_testsuite[] = {
 	},
 };
 
+static void handle_vm_exit_hypercall(struct kvm_run *run,
+	uint32_t test_id)
+{
+	uint64_t gpa, npages, attrs;
+	int priv_memfd =
+		priv_memfd_testsuite[test_id].priv_memfd;
+	int ret;
+	int fallocate_mode;
+
+	if (run->hypercall.nr != KVM_HC_MAP_GPA_RANGE) {
+		TEST_FAIL("Unhandled Hypercall %lld\n",
+					run->hypercall.nr);
+	}
+
+	gpa = run->hypercall.args[0];
+	npages = run->hypercall.args[1];
+	attrs = run->hypercall.args[2];
+
+	if ((gpa < TEST_MEM_GPA) || ((gpa +
+		(npages << MIN_PAGE_SHIFT)) > TEST_MEM_END)) {
+		TEST_FAIL("Unhandled gpa 0x%lx npages %ld\n",
+			gpa, npages);
+	}
+
+	if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
+		fallocate_mode = 0;
+	else {
+		fallocate_mode = (FALLOC_FL_PUNCH_HOLE |
+			FALLOC_FL_KEEP_SIZE);
+	}
+	pr_info("Converting off 0x%lx pages 0x%lx to %s\n",
+		(gpa - TEST_MEM_GPA), npages,
+		fallocate_mode ?
+			"shared" : "private");
+	ret = fallocate(priv_memfd, fallocate_mode,
+		(gpa - TEST_MEM_GPA),
+		npages << MIN_PAGE_SHIFT);
+	TEST_ASSERT(ret != -1,
+		"fallocate failed in hc handling");
+	run->hypercall.ret = 0;
+}
+
+static void handle_vm_exit_memory_error(struct kvm_run *run,
+	uint32_t test_id)
+{
+	uint64_t gpa, size, flags;
+	int ret;
+	int priv_memfd =
+		priv_memfd_testsuite[test_id].priv_memfd;
+	int fallocate_mode;
+
+	gpa = run->memory.gpa;
+	size = run->memory.size;
+	flags = run->memory.flags;
+
+	if ((gpa < TEST_MEM_GPA) || ((gpa + size)
+					> TEST_MEM_END)) {
+		TEST_FAIL("Unhandled gpa 0x%lx size 0x%lx\n",
+			gpa, size);
+	}
+
+	if (flags & KVM_MEMORY_EXIT_FLAG_PRIVATE)
+		fallocate_mode = 0;
+	else {
+		fallocate_mode = (FALLOC_FL_PUNCH_HOLE |
+				FALLOC_FL_KEEP_SIZE);
+	}
+	pr_info("Converting off 0x%lx size 0x%lx to %s\n",
+		(gpa - TEST_MEM_GPA), size,
+		fallocate_mode ?
+			"shared" : "private");
+	ret = fallocate(priv_memfd, fallocate_mode,
+		(gpa - TEST_MEM_GPA), size);
+	TEST_ASSERT(ret != -1,
+		"fallocate failed in memory error handling");
+}
+
 static void vcpu_work(struct kvm_vm *vm, uint32_t test_id)
 {
 	struct kvm_run *run;
@@ -181,6 +258,16 @@  static void vcpu_work(struct kvm_vm *vm, uint32_t test_id)
 			continue;
 		}
 
+		if (run->exit_reason == KVM_EXIT_HYPERCALL) {
+			handle_vm_exit_hypercall(run, test_id);
+			continue;
+		}
+
+		if (run->exit_reason == KVM_EXIT_MEMORY_ERROR) {
+			handle_vm_exit_memory_error(run, test_id);
+			continue;
+		}
+
 		TEST_FAIL("Unhandled VCPU exit reason %d\n", run->exit_reason);
 		break;
 	}