diff mbox series

[kvm-unit-tests,v8,02/12] x86: Move architectural code to lib/x86

Message ID 20240612144539.16147-3-vsntk18@gmail.com (mailing list archive)
State New, archived
Headers show
Series Add #VC exception handling for AMD SEV-ES | expand

Commit Message

Vasant Karasulli June 12, 2024, 2:45 p.m. UTC
From: Vasant Karasulli <vkarasulli@suse.de>

This enables sharing common definitions across testcases and lib/.

Signed-off-by: Vasant Karasulli <vkarasulli@suse.de>
---
 {x86 => lib/x86}/svm.h | 105 --------------------------------------
 x86/svm.c              |   2 +-
 x86/svm_npt.c          |   2 +-
 x86/svm_tests.c        |   2 +-
 x86/svm_tests.h        | 113 +++++++++++++++++++++++++++++++++++++++++
 5 files changed, 116 insertions(+), 108 deletions(-)
 rename {x86 => lib/x86}/svm.h (76%)
 create mode 100644 x86/svm_tests.h

--
2.34.1
diff mbox series

Patch

diff --git a/x86/svm.h b/lib/x86/svm.h
similarity index 76%
rename from x86/svm.h
rename to lib/x86/svm.h
index 308daa55..0fc64be7 100644
--- a/x86/svm.h
+++ b/lib/x86/svm.h
@@ -372,22 +372,6 @@  struct __attribute__ ((__packed__)) vmcb {

 #define LBR_CTL_ENABLE_MASK BIT_ULL(0)

-struct svm_test {
-	const char *name;
-	bool (*supported)(void);
-	void (*prepare)(struct svm_test *test);
-	void (*prepare_gif_clear)(struct svm_test *test);
-	void (*guest_func)(struct svm_test *test);
-	bool (*finished)(struct svm_test *test);
-	bool (*succeeded)(struct svm_test *test);
-	int exits;
-	ulong scratch;
-	/* Alternative test interface. */
-	void (*v2)(void);
-	int on_vcpu;
-	bool on_vcpu_done;
-};
-
 struct regs {
 	u64 rax;
 	u64 rbx;
@@ -408,93 +392,4 @@  struct regs {
 	u64 rflags;
 };

-typedef void (*test_guest_func)(struct svm_test *);
-
-int run_svm_tests(int ac, char **av, struct svm_test *svm_tests);
-u64 *npt_get_pte(u64 address);
-u64 *npt_get_pde(u64 address);
-u64 *npt_get_pdpe(u64 address);
-u64 *npt_get_pml4e(void);
-bool smp_supported(void);
-bool default_supported(void);
-bool vgif_supported(void);
-bool lbrv_supported(void);
-bool tsc_scale_supported(void);
-bool pause_filter_supported(void);
-bool pause_threshold_supported(void);
-void default_prepare(struct svm_test *test);
-void default_prepare_gif_clear(struct svm_test *test);
-bool default_finished(struct svm_test *test);
-bool npt_supported(void);
-bool vnmi_supported(void);
-int get_test_stage(struct svm_test *test);
-void set_test_stage(struct svm_test *test, int s);
-void inc_test_stage(struct svm_test *test);
-void vmcb_ident(struct vmcb *vmcb);
-struct regs get_regs(void);
-void vmmcall(void);
-void svm_setup_vmrun(u64 rip);
-int __svm_vmrun(u64 rip);
-int svm_vmrun(void);
-void test_set_guest(test_guest_func func);
-
-extern struct vmcb *vmcb;
-
-static inline void stgi(void)
-{
-    asm volatile ("stgi");
-}
-
-static inline void clgi(void)
-{
-    asm volatile ("clgi");
-}
-
-
-
-#define SAVE_GPR_C                              \
-        "xchg %%rbx, regs+0x8\n\t"              \
-        "xchg %%rcx, regs+0x10\n\t"             \
-        "xchg %%rdx, regs+0x18\n\t"             \
-        "xchg %%rbp, regs+0x28\n\t"             \
-        "xchg %%rsi, regs+0x30\n\t"             \
-        "xchg %%rdi, regs+0x38\n\t"             \
-        "xchg %%r8, regs+0x40\n\t"              \
-        "xchg %%r9, regs+0x48\n\t"              \
-        "xchg %%r10, regs+0x50\n\t"             \
-        "xchg %%r11, regs+0x58\n\t"             \
-        "xchg %%r12, regs+0x60\n\t"             \
-        "xchg %%r13, regs+0x68\n\t"             \
-        "xchg %%r14, regs+0x70\n\t"             \
-        "xchg %%r15, regs+0x78\n\t"
-
-#define LOAD_GPR_C      SAVE_GPR_C
-
-#define ASM_PRE_VMRUN_CMD                       \
-                "vmload %%rax\n\t"              \
-                "mov regs+0x80, %%r15\n\t"      \
-                "mov %%r15, 0x170(%%rax)\n\t"   \
-                "mov regs, %%r15\n\t"           \
-                "mov %%r15, 0x1f8(%%rax)\n\t"   \
-                LOAD_GPR_C                      \
-
-#define ASM_POST_VMRUN_CMD                      \
-                SAVE_GPR_C                      \
-                "mov 0x170(%%rax), %%r15\n\t"   \
-                "mov %%r15, regs+0x80\n\t"      \
-                "mov 0x1f8(%%rax), %%r15\n\t"   \
-                "mov %%r15, regs\n\t"           \
-                "vmsave %%rax\n\t"              \
-
-
-
-#define SVM_BARE_VMRUN \
-	asm volatile ( \
-		ASM_PRE_VMRUN_CMD \
-                "vmrun %%rax\n\t"               \
-		ASM_POST_VMRUN_CMD \
-		: \
-		: "a" (virt_to_phys(vmcb)) \
-		: "memory", "r15") \
-
 #endif
diff --git a/x86/svm.c b/x86/svm.c
index e715e270..251e9ed6 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -2,7 +2,7 @@ 
  * Framework for testing nested virtualization
  */

-#include "svm.h"
+#include "svm_tests.h"
 #include "libcflat.h"
 #include "processor.h"
 #include "desc.h"
diff --git a/x86/svm_npt.c b/x86/svm_npt.c
index b791f1ac..c248a66f 100644
--- a/x86/svm_npt.c
+++ b/x86/svm_npt.c
@@ -1,4 +1,4 @@ 
-#include "svm.h"
+#include "svm_tests.h"
 #include "vm.h"
 #include "alloc_page.h"
 #include "vmalloc.h"
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index c81b7465..0f206632 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -1,4 +1,4 @@ 
-#include "svm.h"
+#include "svm_tests.h"
 #include "libcflat.h"
 #include "processor.h"
 #include "desc.h"
diff --git a/x86/svm_tests.h b/x86/svm_tests.h
new file mode 100644
index 00000000..fcf3bcb5
--- /dev/null
+++ b/x86/svm_tests.h
@@ -0,0 +1,107 @@ 
+#ifndef X86_SVM_TESTS_H
+#define X86_SVM_TESTS_H
+
+#include "x86/svm.h"
+
+struct svm_test {
+	const char *name;
+	bool (*supported)(void);
+	void (*prepare)(struct svm_test *test);
+	void (*prepare_gif_clear)(struct svm_test *test);
+	void (*guest_func)(struct svm_test *test);
+	bool (*finished)(struct svm_test *test);
+	bool (*succeeded)(struct svm_test *test);
+	int exits;
+	ulong scratch;
+	/* Alternative test interface. */
+	void (*v2)(void);
+	int on_vcpu;
+	bool on_vcpu_done;
+};
+
+typedef void (*test_guest_func)(struct svm_test *);
+
+int run_svm_tests(int ac, char **av, struct svm_test *svm_tests);
+u64 *npt_get_pte(u64 address);
+u64 *npt_get_pde(u64 address);
+u64 *npt_get_pdpe(u64 address);
+u64 *npt_get_pml4e(void);
+bool smp_supported(void);
+bool default_supported(void);
+bool vgif_supported(void);
+bool lbrv_supported(void);
+bool tsc_scale_supported(void);
+bool pause_filter_supported(void);
+bool pause_threshold_supported(void);
+void default_prepare(struct svm_test *test);
+void default_prepare_gif_clear(struct svm_test *test);
+bool default_finished(struct svm_test *test);
+bool npt_supported(void);
+bool vnmi_supported(void);
+int get_test_stage(struct svm_test *test);
+void set_test_stage(struct svm_test *test, int s);
+void inc_test_stage(struct svm_test *test);
+void vmcb_ident(struct vmcb *vmcb);
+struct regs get_regs(void);
+void vmmcall(void);
+void svm_setup_vmrun(u64 rip);
+int __svm_vmrun(u64 rip);
+int svm_vmrun(void);
+void test_set_guest(test_guest_func func);
+
+extern struct vmcb *vmcb;
+
+static inline void stgi(void)
+{
+    asm volatile ("stgi");
+}
+
+static inline void clgi(void)
+{
+    asm volatile ("clgi");
+}
+
+#define SAVE_GPR_C                              \
+        "xchg %%rbx, regs+0x8\n\t"              \
+        "xchg %%rcx, regs+0x10\n\t"             \
+        "xchg %%rdx, regs+0x18\n\t"             \
+        "xchg %%rbp, regs+0x28\n\t"             \
+        "xchg %%rsi, regs+0x30\n\t"             \
+        "xchg %%rdi, regs+0x38\n\t"             \
+        "xchg %%r8, regs+0x40\n\t"              \
+        "xchg %%r9, regs+0x48\n\t"              \
+        "xchg %%r10, regs+0x50\n\t"             \
+        "xchg %%r11, regs+0x58\n\t"             \
+        "xchg %%r12, regs+0x60\n\t"             \
+        "xchg %%r13, regs+0x68\n\t"             \
+        "xchg %%r14, regs+0x70\n\t"             \
+        "xchg %%r15, regs+0x78\n\t"
+
+#define LOAD_GPR_C      SAVE_GPR_C
+
+#define ASM_PRE_VMRUN_CMD                       \
+                "vmload %%rax\n\t"              \
+                "mov regs+0x80, %%r15\n\t"      \
+                "mov %%r15, 0x170(%%rax)\n\t"   \
+                "mov regs, %%r15\n\t"           \
+                "mov %%r15, 0x1f8(%%rax)\n\t"   \
+                LOAD_GPR_C                      \
+
+#define ASM_POST_VMRUN_CMD                      \
+                SAVE_GPR_C                      \
+                "mov 0x170(%%rax), %%r15\n\t"   \
+                "mov %%r15, regs+0x80\n\t"      \
+                "mov 0x1f8(%%rax), %%r15\n\t"   \
+                "mov %%r15, regs\n\t"           \
+                "vmsave %%rax\n\t"              \
+
+#define SVM_BARE_VMRUN \
+	asm volatile ( \
+		ASM_PRE_VMRUN_CMD \
+                "vmrun %%rax\n\t"               \
+		ASM_POST_VMRUN_CMD \
+		: \
+		: "a" (virt_to_phys(vmcb)) \
+		: "memory", "r15") \
+
+#endif