diff mbox series

[V1,5/5] selftests: kvm: Execute vmcall/vmmcall as per cpu type

Message ID 20220903012849.938069-6-vannapurve@google.com (mailing list archive)
State New, archived
Headers show
Series Execute hypercalls from guests according to cpu type | expand

Commit Message

Vishal Annapurve Sept. 3, 2022, 1:28 a.m. UTC
Modify existing tests to execute vmcall/vmmcall instructions as per the
cpu type.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 .../selftests/kvm/include/x86_64/processor.h  | 10 +++
 .../selftests/kvm/include/x86_64/vmx.h        |  9 ---
 .../selftests/kvm/lib/x86_64/perf_test_util.c |  2 +-
 .../selftests/kvm/lib/x86_64/processor.c      | 15 +++++
 tools/testing/selftests/kvm/x86_64/smm_test.c |  2 +-
 .../testing/selftests/kvm/x86_64/state_test.c |  8 +--
 .../kvm/x86_64/vmx_apic_access_test.c         |  2 +-
 .../selftests/kvm/x86_64/vmx_dirty_log_test.c |  2 +-
 .../kvm/x86_64/vmx_nested_tsc_scaling_test.c  |  2 +-
 .../kvm/x86_64/vmx_preemption_timer_test.c    |  2 +-
 .../kvm/x86_64/vmx_tsc_adjust_test.c          |  2 +-
 .../selftests/kvm/x86_64/xen_shinfo_test.c    | 64 +++++++------------
 .../selftests/kvm/x86_64/xen_vmcall_test.c    | 14 ++--
 13 files changed, 70 insertions(+), 64 deletions(-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 0cbc71b7af50..74893a7a80f8 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -833,6 +833,16 @@  void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
 		       uint64_t a3);
 
+/*
+ * Execute vmcall instruction.
+ */
+static inline void vmcall(void)
+{
+	kvm_hypercall(0, 0, 0, 0, 0);
+}
+
+void nested_guest_vmcall(void);
+
 void __vm_xsave_require_permission(int bit, const char *name);
 
 #define vm_xsave_require_permission(perm)	\
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 99fa1410964c..d8d4fd3353e5 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -480,15 +480,6 @@  static inline int vmresume(void)
 	return ret;
 }
 
-static inline void vmcall(void)
-{
-	/* Currently, L1 destroys our GPRs during vmexits.  */
-	__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
-			     "rax", "rbx", "rcx", "rdx",
-			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
-			     "r13", "r14", "r15");
-}
-
 static inline int vmread(uint64_t encoding, uint64_t *value)
 {
 	uint64_t tmp;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
index 0f344a7c89c4..b420b35b7f45 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
@@ -18,7 +18,7 @@ 
 void perf_test_l2_guest_code(uint64_t vcpu_id)
 {
 	perf_test_guest_code(vcpu_id);
-	vmcall();
+	nested_guest_vmcall();
 }
 
 extern char perf_test_l2_guest_entry[];
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index ac104653ab43..5227b2c48a50 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1195,6 +1195,21 @@  uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
 	return r;
 }
 
+void nested_guest_vmcall(void)
+{
+	/* Currently, L1 destroys our GPRs during vmexits.  */
+	if (is_amd_cpu())
+		__asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp" : : :
+			     "rax", "rbx", "rcx", "rdx",
+			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
+			     "r13", "r14", "r15");
+	else
+		__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
+			     "rax", "rbx", "rcx", "rdx",
+			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
+			     "r13", "r14", "r15");
+}
+
 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
 {
 	static struct kvm_cpuid2 *cpuid;
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index 5f8ebe7795c1..26db9b18c1ca 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -61,7 +61,7 @@  static void l2_guest_code(void)
 
 	sync_with_host(10);
 
-	vmcall();
+	nested_guest_vmcall();
 }
 
 static void guest_code(void *arg)
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 3d42c18d4c5f..889f73e38af1 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -26,10 +26,10 @@  void svm_l2_guest_code(void)
 {
 	GUEST_SYNC(4);
 	/* Exit to L1 */
-	vmcall();
+	nested_guest_vmcall();
 	GUEST_SYNC(6);
 	/* Done, exit to L1 and never come back.  */
-	vmcall();
+	nested_guest_vmcall();
 }
 
 static void svm_l1_guest_code(struct svm_test_data *svm)
@@ -57,7 +57,7 @@  void vmx_l2_guest_code(void)
 	GUEST_SYNC(6);
 
 	/* Exit to L1 */
-	vmcall();
+	nested_guest_vmcall();
 
 	/* L1 has now set up a shadow VMCS for us.  */
 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
@@ -70,7 +70,7 @@  void vmx_l2_guest_code(void)
 	GUEST_SYNC(12);
 
 	/* Done, exit to L1 and never come back.  */
-	vmcall();
+	nested_guest_vmcall();
 }
 
 static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
index 2d503dce0481..b6caeea02dc3 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
@@ -31,7 +31,7 @@ 
 static void l2_guest_code(void)
 {
 	/* Exit to L1 */
-	__asm__ __volatile__("vmcall");
+	vmcall();
 }
 
 static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 5ab735bcf8ab..f8eeb35432f0 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -42,7 +42,7 @@  static void l2_guest_code(void)
 	GUEST_SYNC(false);
 
 	/* Exit to L1 and never come back.  */
-	vmcall();
+	nested_guest_vmcall();
 }
 
 void l1_guest_code(struct vmx_pages *vmx)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
index e3887e08666c..9ec1b5197193 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
@@ -76,7 +76,7 @@  static void l2_guest_code(void)
 	check_tsc_freq(UCHECK_L2);
 
 	/* exit to L1 */
-	__asm__ __volatile__("vmcall");
+	vmcall();
 }
 
 static void l1_guest_code(struct vmx_pages *vmx_pages)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
index 629b9c4bd6e4..1d5ff49be97d 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
@@ -38,7 +38,7 @@  void l2_guest_code(void)
 {
 	u64 vmx_pt_delta;
 
-	vmcall();
+	nested_guest_vmcall();
 	l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
 
 	/*
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index da9628db7ee6..26cf2554a0a0 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -74,7 +74,7 @@  static void l2_guest_code(void)
 	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
 
 	/* Exit to L1 */
-	__asm__ __volatile__("vmcall");
+	vmcall();
 }
 
 static void l1_guest_code(struct vmx_pages *vmx_pages)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index f003c8cc2812..4c32c977a98f 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -145,6 +145,23 @@  static void guest_wait_for_irq(void)
 	guest_saw_irq = false;
 }
 
+static unsigned long vmcall_helper(unsigned long reg_a, unsigned long reg_di,
+	unsigned long reg_si)
+{
+	unsigned long ret;
+
+	if (is_amd_cpu())
+		__asm__ __volatile__ ("vmmcall" :
+			"=a" (ret) :
+			"a" (reg_a), "D" (reg_di), "S" (reg_si));
+	else
+		__asm__ __volatile__ ("vmcall" :
+			"=a" (ret) :
+			"a" (reg_a), "D" (reg_di), "S" (reg_si));
+
+	return ret;
+}
+
 static void guest_code(void)
 {
 	struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
@@ -217,12 +234,7 @@  static void guest_code(void)
 	 * EVTCHNOP_send hypercall. */
 	unsigned long rax;
 	struct evtchn_send s = { .port = 127 };
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_event_channel_op),
-			      "D" (EVTCHNOP_send),
-			      "S" (&s));
-
+	rax = vmcall_helper(__HYPERVISOR_event_channel_op, EVTCHNOP_send, (unsigned long)&s);
 	GUEST_ASSERT(rax == 0);
 
 	guest_wait_for_irq();
@@ -232,12 +244,7 @@  static void guest_code(void)
 	/* Deliver "outbound" event channel to an eventfd which
 	 * happens to be one of our own irqfds. */
 	s.port = 197;
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_event_channel_op),
-			      "D" (EVTCHNOP_send),
-			      "S" (&s));
-
+	rax = vmcall_helper(__HYPERVISOR_event_channel_op, EVTCHNOP_send, (unsigned long)&s);
 	GUEST_ASSERT(rax == 0);
 
 	guest_wait_for_irq();
@@ -245,10 +252,7 @@  static void guest_code(void)
 	GUEST_SYNC(13);
 
 	/* Set a timer 100ms in the future. */
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_set_timer_op),
-			      "D" (rs->state_entry_time + 100000000));
+	rax = vmcall_helper(__HYPERVISOR_set_timer_op, (rs->state_entry_time + 100000000), 0);
 	GUEST_ASSERT(rax == 0);
 
 	GUEST_SYNC(14);
@@ -271,36 +275,21 @@  static void guest_code(void)
 		.timeout = 0,
 	};
 
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_sched_op),
-			      "D" (SCHEDOP_poll),
-			      "S" (&p));
-
+	rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
 	GUEST_ASSERT(rax == 0);
 
 	GUEST_SYNC(17);
 
 	/* Poll for an unset port and wait for the timeout. */
 	p.timeout = 100000000;
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_sched_op),
-			      "D" (SCHEDOP_poll),
-			      "S" (&p));
-
+	rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
 	GUEST_ASSERT(rax == 0);
 
 	GUEST_SYNC(18);
 
 	/* A timer will wake the masked port we're waiting on, while we poll */
 	p.timeout = 0;
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_sched_op),
-			      "D" (SCHEDOP_poll),
-			      "S" (&p));
-
+	rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
 	GUEST_ASSERT(rax == 0);
 
 	GUEST_SYNC(19);
@@ -309,12 +298,7 @@  static void guest_code(void)
 	 * actual interrupt, while we're polling on a different port. */
 	ports[0]++;
 	p.timeout = 0;
-	__asm__ __volatile__ ("vmcall" :
-			      "=a" (rax) :
-			      "a" (__HYPERVISOR_sched_op),
-			      "D" (SCHEDOP_poll),
-			      "S" (&p));
-
+	rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
 	GUEST_ASSERT(rax == 0);
 
 	guest_wait_for_irq();
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index b17f36bd41cc..63fca30f085b 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -37,10 +37,16 @@  static void guest_code(void)
 	register unsigned long r9 __asm__("r9") = ARGVALUE(6);
 
 	/* First a direct invocation of 'vmcall' */
-	__asm__ __volatile__("vmcall" :
-			     "=a"(rax) :
-			     "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
-			     "r"(r10), "r"(r8), "r"(r9));
+	if (is_amd_cpu())
+		__asm__ __volatile__("vmmcall" :
+			"=a"(rax) :
+			"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
+			"r"(r10), "r"(r8), "r"(r9));
+	else
+		__asm__ __volatile__("vmcall" :
+			"=a"(rax) :
+			"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
+			"r"(r10), "r"(r8), "r"(r9));
 	GUEST_ASSERT(rax == RETVALUE);
 
 	/* Fill in the Xen hypercall page */