diff mbox series

[9/9] KVM: selftests: Add option to run dirty_log_perf_test vCPUs in L2

Message ID 20220429183935.1094599-10-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Add nested support to dirty_log_perf_test | expand

Commit Message

David Matlack April 29, 2022, 6:39 p.m. UTC
Add an option to dirty_log_perf_test that configures the vCPUs to run in
L2 instead of L1. This makes it possible to benchmark the dirty logging
performance of nested virtualization, which is particularly interesting
because KVM must shadow L1's EPT/NPT tables.

For now this support only works on x86_64 CPUs with VMX. Otherwise
passing -n results in the test being skipped.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 tools/testing/selftests/kvm/Makefile          |  1 +
 .../selftests/kvm/dirty_log_perf_test.c       | 10 ++-
 .../selftests/kvm/include/perf_test_util.h    |  5 ++
 .../selftests/kvm/include/x86_64/vmx.h        |  3 +
 .../selftests/kvm/lib/perf_test_util.c        | 13 ++-
 .../selftests/kvm/lib/x86_64/perf_test_util.c | 89 +++++++++++++++++++
 tools/testing/selftests/kvm/lib/x86_64/vmx.c  | 11 +++
 7 files changed, 127 insertions(+), 5 deletions(-)
 create mode 100644 tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c

Comments

Peter Xu May 16, 2022, 10:17 p.m. UTC | #1
On Fri, Apr 29, 2022 at 06:39:35PM +0000, David Matlack wrote:
> +static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
> +{
> +#define L2_GUEST_STACK_SIZE 64
> +	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> +	unsigned long *rsp;
> +
> +	GUEST_ASSERT(vmx->vmcs_gpa);
> +	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
> +	GUEST_ASSERT(load_vmcs(vmx));
> +	GUEST_ASSERT(ept_1g_pages_supported());
> +
> +	rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
> +	*rsp = vcpu_id;
> +	prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);

Just to purely ask: is this setting the same stack pointer to all the
vcpus?

> +
> +	GUEST_ASSERT(!vmlaunch());
> +	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
> +	GUEST_DONE();
> +}

[...]

> +/* Identity map the entire guest physical address space with 1GiB Pages. */
> +void nested_map_all_1g(struct vmx_pages *vmx, struct kvm_vm *vm)
> +{
> +	__nested_map(vmx, vm, 0, 0, vm->max_gfn << vm->page_shift, PG_LEVEL_1G);
> +}

Could max_gfn be large?  Could it consumes a bunch of pages even if mapping
1G only?

Thanks,
David Matlack May 16, 2022, 10:34 p.m. UTC | #2
On Mon, May 16, 2022 at 3:17 PM Peter Xu <peterx@redhat.com> wrote:
>
> On Fri, Apr 29, 2022 at 06:39:35PM +0000, David Matlack wrote:
> > +static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
> > +{
> > +#define L2_GUEST_STACK_SIZE 64
> > +     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> > +     unsigned long *rsp;
> > +
> > +     GUEST_ASSERT(vmx->vmcs_gpa);
> > +     GUEST_ASSERT(prepare_for_vmx_operation(vmx));
> > +     GUEST_ASSERT(load_vmcs(vmx));
> > +     GUEST_ASSERT(ept_1g_pages_supported());
> > +
> > +     rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
> > +     *rsp = vcpu_id;
> > +     prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
>
> Just to purely ask: is this setting the same stack pointer to all the
> vcpus?

No, but I understand the confusion since typically selftests use
symbols like "l2_guest_code" that are global. But "l2_guest_stack" is
actually a local variable so it will be allocated on the stack. Each
vCPU runs on a separate stack, so they will each run with their own
"l2_guest_stack".

>
> > +
> > +     GUEST_ASSERT(!vmlaunch());
> > +     GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
> > +     GUEST_DONE();
> > +}
>
> [...]
>
> > +/* Identity map the entire guest physical address space with 1GiB Pages. */
> > +void nested_map_all_1g(struct vmx_pages *vmx, struct kvm_vm *vm)
> > +{
> > +     __nested_map(vmx, vm, 0, 0, vm->max_gfn << vm->page_shift, PG_LEVEL_1G);
> > +}
>
> Could max_gfn be large?  Could it consumes a bunch of pages even if mapping
> 1G only?

Since the selftests only support 4-level EPT, this will use at most
513 pages. If we add support for 5-level EPT we may need to revisit
this approach.

>
> Thanks,
>
> --
> Peter Xu
>
Peter Xu May 16, 2022, 11:42 p.m. UTC | #3
On Mon, May 16, 2022 at 03:34:28PM -0700, David Matlack wrote:
> On Mon, May 16, 2022 at 3:17 PM Peter Xu <peterx@redhat.com> wrote:
> >
> > On Fri, Apr 29, 2022 at 06:39:35PM +0000, David Matlack wrote:
> > > +static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
> > > +{
> > > +#define L2_GUEST_STACK_SIZE 64
> > > +     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> > > +     unsigned long *rsp;
> > > +
> > > +     GUEST_ASSERT(vmx->vmcs_gpa);
> > > +     GUEST_ASSERT(prepare_for_vmx_operation(vmx));
> > > +     GUEST_ASSERT(load_vmcs(vmx));
> > > +     GUEST_ASSERT(ept_1g_pages_supported());
> > > +
> > > +     rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
> > > +     *rsp = vcpu_id;
> > > +     prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
> >
> > Just to purely ask: is this setting the same stack pointer to all the
> > vcpus?
> 
> No, but I understand the confusion since typically selftests use
> symbols like "l2_guest_code" that are global. But "l2_guest_stack" is
> actually a local variable so it will be allocated on the stack. Each
> vCPU runs on a separate stack, so they will each run with their own
> "l2_guest_stack".

Ahh that's correct!

> 
> >
> > > +
> > > +     GUEST_ASSERT(!vmlaunch());
> > > +     GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
> > > +     GUEST_DONE();
> > > +}
> >
> > [...]
> >
> > > +/* Identity map the entire guest physical address space with 1GiB Pages. */
> > > +void nested_map_all_1g(struct vmx_pages *vmx, struct kvm_vm *vm)
> > > +{
> > > +     __nested_map(vmx, vm, 0, 0, vm->max_gfn << vm->page_shift, PG_LEVEL_1G);
> > > +}
> >
> > Could max_gfn be large?  Could it consumes a bunch of pages even if mapping
> > 1G only?
> 
> Since the selftests only support 4-level EPT, this will use at most
> 513 pages. If we add support for 5-level EPT we may need to revisit
> this approach.

It's just that AFAICT vm_alloc_page_table() is fetching from slot 0 for all
kinds of pgtables including EPT.  I'm not sure whether there can be some
failures conditionally with this because when creating the vm we're not
aware of this consumption, so maybe we'd reserve the pages somehow so that
we'll be sure to have those pages at least?
David Matlack May 16, 2022, 11:47 p.m. UTC | #4
On Mon, May 16, 2022 at 4:42 PM Peter Xu <peterx@redhat.com> wrote:
>
> On Mon, May 16, 2022 at 03:34:28PM -0700, David Matlack wrote:
> > On Mon, May 16, 2022 at 3:17 PM Peter Xu <peterx@redhat.com> wrote:
> > >
> > > On Fri, Apr 29, 2022 at 06:39:35PM +0000, David Matlack wrote:
> > > > +static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
> > > > +{
> > > > +#define L2_GUEST_STACK_SIZE 64
> > > > +     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> > > > +     unsigned long *rsp;
> > > > +
> > > > +     GUEST_ASSERT(vmx->vmcs_gpa);
> > > > +     GUEST_ASSERT(prepare_for_vmx_operation(vmx));
> > > > +     GUEST_ASSERT(load_vmcs(vmx));
> > > > +     GUEST_ASSERT(ept_1g_pages_supported());
> > > > +
> > > > +     rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
> > > > +     *rsp = vcpu_id;
> > > > +     prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
> > >
> > > Just to purely ask: is this setting the same stack pointer to all the
> > > vcpus?
> >
> > No, but I understand the confusion since typically selftests use
> > symbols like "l2_guest_code" that are global. But "l2_guest_stack" is
> > actually a local variable so it will be allocated on the stack. Each
> > vCPU runs on a separate stack, so they will each run with their own
> > "l2_guest_stack".
>
> Ahh that's correct!
>
> >
> > >
> > > > +
> > > > +     GUEST_ASSERT(!vmlaunch());
> > > > +     GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
> > > > +     GUEST_DONE();
> > > > +}
> > >
> > > [...]
> > >
> > > > +/* Identity map the entire guest physical address space with 1GiB Pages. */
> > > > +void nested_map_all_1g(struct vmx_pages *vmx, struct kvm_vm *vm)
> > > > +{
> > > > +     __nested_map(vmx, vm, 0, 0, vm->max_gfn << vm->page_shift, PG_LEVEL_1G);
> > > > +}
> > >
> > > Could max_gfn be large?  Could it consumes a bunch of pages even if mapping
> > > 1G only?
> >
> > Since the selftests only support 4-level EPT, this will use at most
> > 513 pages. If we add support for 5-level EPT we may need to revisit
> > this approach.
>
> It's just that AFAICT vm_alloc_page_table() is fetching from slot 0 for all
> kinds of pgtables including EPT.  I'm not sure whether there can be some
> failures conditionally with this because when creating the vm we're not
> aware of this consumption, so maybe we'd reserve the pages somehow so that
> we'll be sure to have those pages at least?

So far in my tests perf_test_util seemed to allocate enough pages in
slot 0 that this just worked, so I didn't bother to explicitly reserve
the extra pages. But that's just an accident waiting to happen as you
point out, so I'll fix that in v2.

>
> --
> Peter Xu
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 1ba0d01362bd..9b342239a6dd 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -49,6 +49,7 @@  LIBKVM += lib/test_util.c
 
 LIBKVM_x86_64 += lib/x86_64/apic.c
 LIBKVM_x86_64 += lib/x86_64/handlers.S
+LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
 LIBKVM_x86_64 += lib/x86_64/processor.c
 LIBKVM_x86_64 += lib/x86_64/svm.c
 LIBKVM_x86_64 += lib/x86_64/ucall.c
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 7b47ae4f952e..d60a34cdfaee 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -336,8 +336,8 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 static void help(char *name)
 {
 	puts("");
-	printf("usage: %s [-h] [-i iterations] [-p offset] [-g]"
-	       "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
+	printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
+	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
 	       "[-x memslots]\n", name);
 	puts("");
 	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
@@ -351,6 +351,7 @@  static void help(char *name)
 	printf(" -p: specify guest physical test memory offset\n"
 	       "     Warning: a low offset can conflict with the loaded test code.\n");
 	guest_modes_help();
+	printf(" -n: Run the vCPUs in nested mode (L2)\n");
 	printf(" -b: specify the size of the memory region which should be\n"
 	       "     dirtied by each vCPU. e.g. 10M or 3G.\n"
 	       "     (default: 1G)\n");
@@ -387,7 +388,7 @@  int main(int argc, char *argv[])
 
 	guest_modes_append_default();
 
-	while ((opt = getopt(argc, argv, "ghi:p:m:b:f:v:os:x:")) != -1) {
+	while ((opt = getopt(argc, argv, "ghi:p:m:nb:f:v:os:x:")) != -1) {
 		switch (opt) {
 		case 'g':
 			dirty_log_manual_caps = 0;
@@ -401,6 +402,9 @@  int main(int argc, char *argv[])
 		case 'm':
 			guest_modes_cmdline(optarg);
 			break;
+		case 'n':
+			perf_test_args.nested = true;
+			break;
 		case 'b':
 			guest_percpu_mem_size = parse_size(optarg);
 			break;
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index a86f953d8d36..1dfdaec43321 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -34,6 +34,9 @@  struct perf_test_args {
 	uint64_t guest_page_size;
 	int wr_fract;
 
+	/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
+	bool nested;
+
 	struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
 };
 
@@ -49,5 +52,7 @@  void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
 
 void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
 void perf_test_join_vcpu_threads(int vcpus);
+void perf_test_guest_code(uint32_t vcpu_id);
+void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus);
 
 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 3b1794baa97c..17d712503a36 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -96,6 +96,7 @@ 
 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK	0x0000001f
 #define VMX_MISC_SAVE_EFER_LMA			0x00000020
 
+#define VMX_EPT_VPID_CAP_1G_PAGES		0x00020000
 #define VMX_EPT_VPID_CAP_AD_BITS		0x00200000
 
 #define EXIT_REASON_FAILED_VMENTRY	0x80000000
@@ -608,6 +609,7 @@  bool load_vmcs(struct vmx_pages *vmx);
 
 bool nested_vmx_supported(void);
 void nested_vmx_check_supported(void);
+bool ept_1g_pages_supported(void);
 
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
 		   uint64_t nested_paddr, uint64_t paddr);
@@ -615,6 +617,7 @@  void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
 		 uint64_t nested_paddr, uint64_t paddr, uint64_t size);
 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
 			uint32_t memslot);
+void nested_map_all_1g(struct vmx_pages *vmx, struct kvm_vm *vm);
 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
 		  uint32_t eptp_memslot);
 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 722df3a28791..6e15c93a3577 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -40,7 +40,7 @@  static bool all_vcpu_threads_running;
  * Continuously write to the first 8 bytes of each page in the
  * specified region.
  */
-static void guest_code(uint32_t vcpu_id)
+void perf_test_guest_code(uint32_t vcpu_id)
 {
 	struct perf_test_args *pta = &perf_test_args;
 	struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
@@ -140,7 +140,7 @@  struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 	 * effect as KVM allows aliasing HVAs in meslots.
 	 */
 	vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
-				  guest_num_pages, 0, guest_code, NULL);
+				  guest_num_pages, 0, perf_test_guest_code, NULL);
 
 	pta->vm = vm;
 
@@ -178,6 +178,9 @@  struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 
 	perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
 
+	if (pta->nested)
+		perf_test_setup_nested(vm, vcpus);
+
 	ucall_init(vm, NULL);
 
 	/* Export the shared variables to the guest. */
@@ -198,6 +201,12 @@  void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
 	sync_global_to_guest(vm, perf_test_args);
 }
 
+void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus)
+{
+	pr_info("%s() not support on this architecture, skipping.\n", __func__);
+	exit(KSFT_SKIP);
+}
+
 static void *vcpu_thread_main(void *data)
 {
 	struct vcpu_thread *vcpu = data;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
new file mode 100644
index 000000000000..ba20a1499263
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
@@ -0,0 +1,89 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * x86_64-specific extensions to perf_test_util.c.
+ *
+ * Copyright (C) 2022, Google, Inc.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
+#include "vmx.h"
+
+void perf_test_l2_guest_code(uint64_t vcpu_id)
+{
+	perf_test_guest_code(vcpu_id);
+	vmcall();
+}
+
+extern char perf_test_l2_guest_entry[];
+__asm__(
+"perf_test_l2_guest_entry:"
+"	mov (%rsp), %rdi;"
+"	call perf_test_l2_guest_code;"
+"	ud2;"
+);
+
+static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+{
+#define L2_GUEST_STACK_SIZE 64
+	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+	unsigned long *rsp;
+
+	GUEST_ASSERT(vmx->vmcs_gpa);
+	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
+	GUEST_ASSERT(load_vmcs(vmx));
+	GUEST_ASSERT(ept_1g_pages_supported());
+
+	rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
+	*rsp = vcpu_id;
+	prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
+
+	GUEST_ASSERT(!vmlaunch());
+	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+	GUEST_DONE();
+}
+
+void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus)
+{
+	struct vmx_pages *vmx, *vmx0 = NULL;
+	struct kvm_regs regs;
+	vm_vaddr_t vmx_gva;
+	int vcpu_id;
+
+	nested_vmx_check_supported();
+
+	for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+		vmx = vcpu_alloc_vmx(vm, &vmx_gva);
+
+		if (vcpu_id == 0) {
+			prepare_eptp(vmx, vm, 0);
+			/*
+			 * Identity map L2 with 1G pages so that KVM can shadow
+			 * the EPT12 with huge pages.
+			 */
+			nested_map_all_1g(vmx, vm);
+			vmx0 = vmx;
+		} else {
+			/* Share the same EPT table across all vCPUs. */
+			vmx->eptp = vmx0->eptp;
+			vmx->eptp_hva = vmx0->eptp_hva;
+			vmx->eptp_gpa = vmx0->eptp_gpa;
+		}
+
+		/*
+		 * Override the vCPU to run perf_test_l1_guest_code() which will
+		 * bounce it into L2 before calling perf_test_guest_code().
+		 */
+		vcpu_regs_get(vm, vcpu_id, &regs);
+		regs.rip = (unsigned long) perf_test_l1_guest_code;
+		vcpu_regs_set(vm, vcpu_id, &regs);
+		vcpu_args_set(vm, vcpu_id, 2, vmx_gva, vcpu_id);
+	}
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 3862d93a18ac..32374a0f002c 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -203,6 +203,11 @@  static bool ept_vpid_cap_supported(uint64_t mask)
 	return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
 }
 
+bool ept_1g_pages_supported(void)
+{
+	return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES);
+}
+
 /*
  * Initialize the control fields to the most basic settings possible.
  */
@@ -546,6 +551,12 @@  void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
 	}
 }
 
+/* Identity map the entire guest physical address space with 1GiB Pages. */
+void nested_map_all_1g(struct vmx_pages *vmx, struct kvm_vm *vm)
+{
+	__nested_map(vmx, vm, 0, 0, vm->max_gfn << vm->page_shift, PG_LEVEL_1G);
+}
+
 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
 		  uint32_t eptp_memslot)
 {