@@ -564,6 +564,15 @@ config CPU_HAS_PREFETCH
bool
default y
+config PARAVIRT
+ bool "Enable paravirtualization code"
+ depends on AS_HAS_LVZ_EXTENSION
+ help
+ This changes the kernel so it can modify itself when it is run
+ under a hypervisor, potentially improving performance significantly
+ over full virtualization. However, when run without a hypervisor
+ the kernel is theoretically slower and slightly larger.
+
config ARCH_SUPPORTS_KEXEC
def_bool y
@@ -2,6 +2,13 @@
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
+/*
+ * Hypcall code field
+ */
+#define HYPERVISOR_KVM 1
+#define HYPERVISOR_VENDOR_SHIFT 8
+#define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
+
/*
* LoongArch hypcall return code
*/
new file mode 100644
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_PARAVIRT_H
+#define _ASM_LOONGARCH_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 dummy_steal_clock(int cpu);
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+
+int pv_guest_init(void);
+#else
+static inline int pv_guest_init(void)
+{
+ return 0;
+}
+
+#endif // CONFIG_PARAVIRT
+#endif
new file mode 100644
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
@@ -48,6 +48,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_SMP) += smp.o
new file mode 100644
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/jump_label.h>
+#include <linux/kvm_para.h>
+#include <asm/paravirt.h>
+#include <linux/static_call.h>
+
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
+
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+
+static bool kvm_para_available(void)
+{
+ static int hypervisor_type;
+ int config;
+
+ if (!hypervisor_type) {
+ config = read_cpucfg(CPUCFG_KVM_SIG);
+ if (!memcmp(&config, KVM_SIGNATURE, 4))
+ hypervisor_type = HYPERVISOR_KVM;
+ }
+
+ return hypervisor_type == HYPERVISOR_KVM;
+}
+
+int __init pv_guest_init(void)
+{
+ if (!cpu_has_hypervisor)
+ return 0;
+ if (!kvm_para_available())
+ return 0;
+
+ return 1;
+}
@@ -43,6 +43,7 @@
#include <asm/efi.h>
#include <asm/loongson.h>
#include <asm/numa.h>
+#include <asm/paravirt.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -376,6 +377,7 @@ void __init platform_init(void)
pr_info("The BIOS Version: %s\n", b_info.bios_version);
efi_runtime_init();
+ pv_guest_init();
}
static void __init check_kernel_sections_mem(void)
The patch add paravirt interface for guest kernel, function pv_guest_init firstly checks whether system runs on VM mode. If kernel runs on VM mode, it will call function kvm_para_available to detect whether current VMM is KVM hypervisor. And the paravirt function can work only if current VMM is KVM hypervisor, and there is only KVM hypervisor supported on LoongArch now. Signed-off-by: Bibo Mao <maobibo@loongson.cn> --- arch/loongarch/Kconfig | 9 ++++ arch/loongarch/include/asm/kvm_para.h | 7 ++++ arch/loongarch/include/asm/paravirt.h | 27 ++++++++++++ .../include/asm/paravirt_api_clock.h | 1 + arch/loongarch/kernel/Makefile | 1 + arch/loongarch/kernel/paravirt.c | 41 +++++++++++++++++++ arch/loongarch/kernel/setup.c | 2 + 7 files changed, 88 insertions(+) create mode 100644 arch/loongarch/include/asm/paravirt.h create mode 100644 arch/loongarch/include/asm/paravirt_api_clock.h create mode 100644 arch/loongarch/kernel/paravirt.c