@@ -53,9 +53,18 @@ struct arch_vpmu_ops {
#endif
};
+#ifdef CONFIG_INTEL_VMX
const struct arch_vpmu_ops *core2_vpmu_init(void);
+#else
+static inline const struct arch_vpmu_ops *core2_vpmu_init(void) { return NULL; }
+#endif /* CONFIG_INTEL_VMX */
+#ifdef CONFIG_AMD_SVM
const struct arch_vpmu_ops *amd_vpmu_init(void);
const struct arch_vpmu_ops *hygon_vpmu_init(void);
+#else
+static inline const struct arch_vpmu_ops *amd_vpmu_init(void) { return NULL; }
+static inline const struct arch_vpmu_ops *hygon_vpmu_init(void) { return NULL; }
+#endif /* CONFIG_AMD_SVM */
struct vpmu_struct {
u32 flags;
The function core2_vpmu_init() is VT-x specific while the functions amd_vpmu_init() and hygon_vpmu_init() are AMD-V specific, thus need to be guarded with INTEL_VMX and AMD_SVM, respectively. Instead of adding #ifdef guards around the function calls in common vpu code, implement them as static inline null-returning functions when the respective technology is not enabled. No functional change intended. Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com> --- xen/arch/x86/include/asm/vpmu.h | 9 +++++++++ 1 file changed, 9 insertions(+)