diff mbox series

[V10,19/19] locking/qspinlock: riscv: Add Compact NUMA-aware lock support

Message ID 20230802164701.192791-20-guoren@kernel.org (mailing list archive)
State Handled Elsewhere
Headers show
Series riscv: Add Native/Paravirt/CNA qspinlock support | expand

Checks

Context Check Description
conchuod/tree_selection fail Failed to apply to next/pending-fixes, riscv/for-next or riscv/master

Commit Message

Guo Ren Aug. 2, 2023, 4:47 p.m. UTC
From: Guo Ren <guoren@linux.alibaba.com>

Connect riscv to Compact NUMA-aware lock (CNA), which uses
PRARAVIRT_SPINLOCKS static_call hooks. See numa_spinlock= of
Documentation/admin-guide/kernel-parameters.txt for trying.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
 arch/riscv/Kconfig                 | 18 ++++++++++++++++++
 arch/riscv/include/asm/qspinlock.h |  5 +++++
 arch/riscv/kernel/paravirt.c       | 12 +++++++++++-
 3 files changed, 34 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 13f345b54581..ff483ccd26b9 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -800,6 +800,24 @@  config PARAVIRT_SPINLOCKS
 
 	  If you are unsure how to answer this question, answer Y.
 
+config NUMA_AWARE_SPINLOCKS
+	bool "Numa-aware spinlocks"
+	depends on NUMA
+	depends on QUEUED_SPINLOCKS
+	depends on 64BIT
+	# For now, we depend on PARAVIRT_SPINLOCKS to make the patching work.
+	depends on PARAVIRT_SPINLOCKS
+	default y
+	help
+	  Introduce NUMA (Non Uniform Memory Access) awareness into
+	  the slow path of spinlocks.
+
+	  In this variant of qspinlock, the kernel will try to keep the lock
+	  on the same node, thus reducing the number of remote cache misses,
+	  while trading some of the short term fairness for better performance.
+
+	  Say N if you want absolute first come first serve fairness.
+
 endmenu # "Kernel features"
 
 menu "Boot options"
diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
index 003e9560a0d1..e6f2a0621af0 100644
--- a/arch/riscv/include/asm/qspinlock.h
+++ b/arch/riscv/include/asm/qspinlock.h
@@ -12,6 +12,11 @@  void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 void __pv_init_lock_hash(void);
 void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 
+#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
+bool cna_configure_spin_lock_slowpath(void);
+void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#endif
+
 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 {
 	static_call(pv_queued_spin_lock_slowpath)(lock, val);
diff --git a/arch/riscv/kernel/paravirt.c b/arch/riscv/kernel/paravirt.c
index cc80e968ab13..9466f693a98c 100644
--- a/arch/riscv/kernel/paravirt.c
+++ b/arch/riscv/kernel/paravirt.c
@@ -193,8 +193,10 @@  void __init pv_qspinlock_init(void)
 	if (num_possible_cpus() == 1)
 		return;
 
-	if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
+	if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM) {
+		goto cna_qspinlock;
 		return;
+	}
 
 	if (!sbi_probe_extension(SBI_EXT_PVLOCK))
 		return;
@@ -204,5 +206,13 @@  void __init pv_qspinlock_init(void)
 
 	static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
 	static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
+	return;
+
+cna_qspinlock:
+#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
+	if (cna_configure_spin_lock_slowpath())
+		static_call_update(pv_queued_spin_lock_slowpath,
+					__cna_queued_spin_lock_slowpath);
+#endif
 }
 #endif