@@ -7,12 +7,22 @@ struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
+struct pvstate_vcpu_info {
+ bool preempted;
+ u8 reserved[63];
+};
+
+struct pv_state_ops {
+ bool (*vcpu_is_preempted)(int cpu);
+};
+
struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
struct paravirt_patch_template {
struct pv_time_ops time;
+ struct pv_state_ops state;
};
extern struct paravirt_patch_template pv_ops;
@@ -22,10 +32,15 @@ static inline u64 paravirt_steal_clock(int cpu)
return pv_ops.time.steal_clock(cpu);
}
+bool native_vcpu_is_preempted(int cpu);
+bool paravirt_vcpu_is_preempted(int cpu);
+
+int __init pv_state_init(void);
int __init pv_time_init(void);
#else
+#define pv_state_init() do {} while (0)
#define pv_time_init() do {} while (0)
#endif // CONFIG_PARAVIRT
@@ -48,7 +48,7 @@ obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
-obj-$(CONFIG_PARAVIRT) += paravirt.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-state.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
new file mode 100644
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "arm-pvstate: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/paravirt.h>
+#include <asm/smp_plat.h>
+
+static DEFINE_PER_CPU(struct pvstate_vcpu_info, vcpus_states);
+
+bool native_vcpu_is_preempted(int cpu)
+{
+ return false;
+}
+
+static bool pv_vcpu_is_preempted(int cpu)
+{
+ struct pvstate_vcpu_info *st;
+
+ st = &per_cpu(vcpus_states, cpu);
+ return READ_ONCE(st->preempted);
+}
+
+bool paravirt_vcpu_is_preempted(int cpu)
+{
+ return pv_ops.state.vcpu_is_preempted(cpu);
+}
+
+static bool has_pvstate(void)
+{
+ struct arm_smccc_res res;
+
+ /* To detect the presence of PV time support we require SMCCC 1.1+ */
+ if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE)
+ return false;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_HV_PV_STATE_FEATURES,
+ &res);
+
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return false;
+ return true;
+}
+
+static int __pvstate_cpu_hook(unsigned int cpu, int event)
+{
+ struct arm_smccc_res res;
+ struct pvstate_vcpu_info *st;
+
+ st = &per_cpu(vcpus_states, cpu);
+ arm_smccc_1_1_invoke(event, virt_to_phys(st), &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EINVAL;
+ return 0;
+}
+
+static int pvstate_cpu_init(unsigned int cpu)
+{
+ int ret = __pvstate_cpu_hook(cpu, ARM_SMCCC_HV_PV_STATE_INIT);
+
+ if (ret)
+ pr_warn("Unable to ARM_SMCCC_HV_PV_STATE_INIT\n");
+ return ret;
+}
+
+static int pvstate_cpu_release(unsigned int cpu)
+{
+ int ret = __pvstate_cpu_hook(cpu, ARM_SMCCC_HV_PV_STATE_RELEASE);
+
+ if (ret)
+ pr_warn("Unable to ARM_SMCCC_HV_PV_STATE_RELEASE\n");
+ return ret;
+}
+
+static int pvstate_register_hooks(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
+ "hypervisor/arm/pvstate:starting",
+ pvstate_cpu_init,
+ pvstate_cpu_release);
+ if (ret < 0)
+ pr_warn("Failed to register CPU hooks\n");
+ return ret;
+}
+
+static int __pvstate_init(void)
+{
+ return pvstate_register_hooks();
+}
+
+int __init pv_state_init(void)
+{
+ int ret;
+
+ if (!has_pvstate())
+ return 0;
+
+ ret = __pvstate_init();
+ if (ret)
+ return ret;
+
+ pv_ops.state.vcpu_is_preempted = pv_vcpu_is_preempted;
+ return 0;
+}
@@ -26,7 +26,9 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct paravirt_patch_template pv_ops;
+struct paravirt_patch_template pv_ops = {
+ .state.vcpu_is_preempted = native_vcpu_is_preempted,
+};
EXPORT_SYMBOL_GPL(pv_ops);
struct pv_time_stolen_time_region {
@@ -68,4 +68,5 @@ void __init time_init(void)
lpj_fine = arch_timer_rate / HZ;
pv_time_init();
+ pv_state_init();
}
PV-state is a per-CPU struct, which, for the time being, holds boolean `preempted' vCPU state. During the startup, given that host supports PV-state, each guest vCPU sends a pointer to its per-CPU variable to the host as a payload with the SMCC HV call, so that host can update vCPU state when it puts or loads vCPU. This has impact on the guest's scheduler - it does check the state of the vCPU it wants to run a task on: [..] wake_up_process() try_to_wake_up() select_task_rq_fair() available_idle_cpu() vcpu_is_preempted() Some sched benchmarks data is available on the github page [0]. [0] https://github.com/sergey-senozhatsky/arm64-vcpu_is_preempted Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> --- arch/arm64/include/asm/paravirt.h | 15 ++++ arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/paravirt-state.c | 117 +++++++++++++++++++++++++++++ arch/arm64/kernel/paravirt.c | 4 +- arch/arm64/kernel/time.c | 1 + 5 files changed, 137 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/kernel/paravirt-state.c