diff mbox series

[v3,01/14] x86/svm: move declarations used only by svm code from svm.h to private header

Message ID 20230224185010.3692754-2-burzalodowa@gmail.com (mailing list archive)
State New, archived
Headers show
Series x86/hvm: {svm,vmx} {c,h} cleanup | expand

Commit Message

Xenia Ragiadakou Feb. 24, 2023, 6:49 p.m. UTC
Create a new private header in arch/x86/hvm/svm called svm.h and move there
all definitions and declarations that are used solely by svm code.

Take the opportunity to remove the forward declaration of struct vcpu, that is
a leftover since the removal of svm_update_guest_cr()'s declaration.

Take the opportunity to re-arrange the header as follows, all structures first,
then all variable decalarations, all function delarations, and finally all
inline functions.

No functional change intended.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---

Changes in v3:
  - add SPDX identifier in priv header, reported by Andrew
  - add #ifndef header guard, reported by Andrew and Jan
  - move svm_invlpga() as well, it was not called anyway
  - fold patch removing redundant forward declaration of struct vcpu into
    this patch, suggested by Andrew
  - rearrange the header in the way Jan's proposed
  - update commit message

 xen/arch/x86/hvm/svm/nestedsvm.c       |  1 +
 xen/arch/x86/hvm/svm/svm.c             |  2 +
 xen/arch/x86/hvm/svm/svm.h             | 62 ++++++++++++++++++++++++++
 xen/arch/x86/include/asm/hvm/svm/svm.h | 41 -----------------
 4 files changed, 65 insertions(+), 41 deletions(-)
 create mode 100644 xen/arch/x86/hvm/svm/svm.h
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 77f7547360..a341ccc876 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -27,6 +27,7 @@ 
 #include <asm/event.h> /* for local_event_delivery_(en|dis)able */
 #include <asm/p2m.h> /* p2m_get_pagetable, p2m_get_nestedp2m */
 
+#include "svm.h"
 
 #define NSVM_ERROR_VVMCB        1
 #define NSVM_ERROR_VMENTRY      2
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 9c43227b76..6d394e4fe3 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -55,6 +55,8 @@ 
 
 #include <public/sched.h>
 
+#include "svm.h"
+
 void noreturn svm_asm_do_resume(void);
 
 u32 svm_feature_flags;
diff --git a/xen/arch/x86/hvm/svm/svm.h b/xen/arch/x86/hvm/svm/svm.h
new file mode 100644
index 0000000000..9e65919757
--- /dev/null
+++ b/xen/arch/x86/hvm/svm/svm.h
@@ -0,0 +1,62 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * svm.h: SVM Architecture related definitions
+ *
+ * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2004, Intel Corporation.
+ */
+
+#ifndef __X86_HVM_SVM_SVM_PRIV_H__
+#define __X86_HVM_SVM_SVM_PRIV_H__
+
+#include <xen/types.h>
+
+struct cpu_user_regs;
+
+unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
+void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
+
+static inline void svm_vmload_pa(paddr_t vmcb)
+{
+    asm volatile (
+        ".byte 0x0f,0x01,0xda" /* vmload */
+        : : "a" (vmcb) : "memory" );
+}
+
+static inline void svm_vmsave_pa(paddr_t vmcb)
+{
+    asm volatile (
+        ".byte 0x0f,0x01,0xdb" /* vmsave */
+        : : "a" (vmcb) : "memory" );
+}
+
+static inline void svm_invlpga(unsigned long linear, uint32_t asid)
+{
+    asm volatile (
+        ".byte 0x0f,0x01,0xdf"
+        : /* output */
+        : /* input */
+        "a" (linear), "c" (asid));
+}
+
+/* TSC rate */
+#define DEFAULT_TSC_RATIO       0x0000000100000000ULL
+#define TSC_RATIO_RSVD_BITS     0xffffff0000000000ULL
+
+/* EXITINFO1 fields on NPT faults */
+#define _NPT_PFEC_with_gla     32
+#define NPT_PFEC_with_gla      (1UL<<_NPT_PFEC_with_gla)
+#define _NPT_PFEC_in_gpt       33
+#define NPT_PFEC_in_gpt        (1UL<<_NPT_PFEC_in_gpt)
+
+#endif /* __X86_HVM_SVM_SVM_PRIV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/include/asm/hvm/svm/svm.h b/xen/arch/x86/include/asm/hvm/svm/svm.h
index cf9ed517d5..7d5de0122a 100644
--- a/xen/arch/x86/include/asm/hvm/svm/svm.h
+++ b/xen/arch/x86/include/asm/hvm/svm/svm.h
@@ -20,37 +20,6 @@ 
 #ifndef __ASM_X86_HVM_SVM_H__
 #define __ASM_X86_HVM_SVM_H__
 
-#include <xen/types.h>
-
-static inline void svm_vmload_pa(paddr_t vmcb)
-{
-    asm volatile (
-        ".byte 0x0f,0x01,0xda" /* vmload */
-        : : "a" (vmcb) : "memory" );
-}
-
-static inline void svm_vmsave_pa(paddr_t vmcb)
-{
-    asm volatile (
-        ".byte 0x0f,0x01,0xdb" /* vmsave */
-        : : "a" (vmcb) : "memory" );
-}
-
-static inline void svm_invlpga(unsigned long linear, uint32_t asid)
-{
-    asm volatile (
-        ".byte 0x0f,0x01,0xdf"
-        : /* output */
-        : /* input */
-        "a" (linear), "c" (asid));
-}
-
-struct cpu_user_regs;
-struct vcpu;
-
-unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
-void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
-
 /*
  * PV context switch helpers.  Prefetching the VMCB area itself has been shown
  * to be useful for performance.
@@ -96,14 +65,4 @@  extern u32 svm_feature_flags;
 #define cpu_has_svm_sss       cpu_has_svm_feature(SVM_FEATURE_SSS)
 #define cpu_has_svm_spec_ctrl cpu_has_svm_feature(SVM_FEATURE_SPEC_CTRL)
 
-/* TSC rate */
-#define DEFAULT_TSC_RATIO       0x0000000100000000ULL
-#define TSC_RATIO_RSVD_BITS     0xffffff0000000000ULL
-
-/* EXITINFO1 fields on NPT faults */
-#define _NPT_PFEC_with_gla     32
-#define NPT_PFEC_with_gla      (1UL<<_NPT_PFEC_with_gla)
-#define _NPT_PFEC_in_gpt       33
-#define NPT_PFEC_in_gpt        (1UL<<_NPT_PFEC_in_gpt)
-
 #endif /* __ASM_X86_HVM_SVM_H__ */