@@ -11,8 +11,13 @@ struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
+struct pv_lock_ops {
+ bool (*vcpu_is_preempted)(int cpu);
+};
+
struct paravirt_patch_template {
struct pv_time_ops time;
+ struct pv_lock_ops lock;
};
extern struct paravirt_patch_template pv_ops;
@@ -24,6 +29,13 @@ static inline u64 paravirt_steal_clock(int cpu)
int __init pv_time_init(void);
+__visible bool __native_vcpu_is_preempted(int cpu);
+
+static inline bool pv_vcpu_is_preempted(int cpu)
+{
+ return pv_ops.lock.vcpu_is_preempted(cpu);
+}
+
#else
#define pv_time_init() do {} while (0)
@@ -7,8 +7,17 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
+#include <asm/paravirt.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
+#ifdef CONFIG_PARAVIRT
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(long cpu)
+{
+ return pv_vcpu_is_preempted(cpu);
+}
+#endif // CONFIG_PARAVIRT
+
#endif /* __ASM_SPINLOCK_H */
@@ -50,7 +50,7 @@ obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
-obj-$(CONFIG_PARAVIRT) += paravirt.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2019 Huawei Technologies Co., Ltd
+ * Author: Zengruan Ye <yezengruan@huawei.com>
+ */
+
+#include <linux/spinlock.h>
+#include <asm/paravirt.h>
+
+__visible bool __native_vcpu_is_preempted(int cpu)
+{
+ return false;
+}
@@ -26,7 +26,9 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct paravirt_patch_template pv_ops;
+struct paravirt_patch_template pv_ops = {
+ .lock.vcpu_is_preempted = __native_vcpu_is_preempted,
+};
EXPORT_SYMBOL_GPL(pv_ops);
struct pv_time_stolen_time_region {
This is to fix some lock holder preemption issues. Some other locks implementation do a spin loop before acquiring the lock itself. Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It takes the CPU as parameter and return true if the CPU is preempted. Then kernel can break the spin loops upon the retval of vcpu_is_preempted. As kernel has used this interface, So lets support it. Reported-by: kbuild test robot <lkp@intel.com> Signed-off-by: Zengruan Ye <yezengruan@huawei.com> --- arch/arm64/include/asm/paravirt.h | 12 ++++++++++++ arch/arm64/include/asm/spinlock.h | 9 +++++++++ arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/paravirt-spinlocks.c | 13 +++++++++++++ arch/arm64/kernel/paravirt.c | 4 +++- 5 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/kernel/paravirt-spinlocks.c