diff mbox series

[2/2] LoongArch: KVM: Add paravirt qspinlock in guest side

Message ID 20240723073825.1811600-3-maobibo@loongson.cn (mailing list archive)
State New, archived
Headers show
Series LoongArch: KVM: Add paravirt qspinlock support | expand

Commit Message

Bibo Mao July 23, 2024, 7:38 a.m. UTC
Option PARAVIRT_SPINLOCKS is added on LoongArch system, and
pv_lock_ops template is added here. If option PARAVIRT_SPINLOCKS
is enabled, the native ops works on host machine.

Two functions kvm_wait() and kvm_kick_cpu() are added specicial for
VM, if VM detects hypervisor supports pv spinlock. With kvm_wait()
vCPU thread will exit to hypervisor and give up scheduleing on pCPU,
and with function kvm_kick_cpu() one hypercall function is used
to notify hypervisor to wakeup previously waited vCPU.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/loongarch/Kconfig                        | 14 +++
 arch/loongarch/include/asm/Kbuild             |  1 -
 arch/loongarch/include/asm/paravirt.h         | 47 ++++++++++
 arch/loongarch/include/asm/qspinlock.h        | 39 ++++++++
 .../include/asm/qspinlock_paravirt.h          |  6 ++
 arch/loongarch/kernel/paravirt.c              | 88 +++++++++++++++++++
 arch/loongarch/kernel/smp.c                   |  4 +-
 7 files changed, 197 insertions(+), 2 deletions(-)
 create mode 100644 arch/loongarch/include/asm/qspinlock.h
 create mode 100644 arch/loongarch/include/asm/qspinlock_paravirt.h

Comments

kernel test robot July 23, 2024, 7:57 p.m. UTC | #1
Hi Bibo,

kernel test robot noticed the following build errors:

[auto build test ERROR on 7846b618e0a4c3e08888099d1d4512722b39ca99]

url:    https://github.com/intel-lab-lkp/linux/commits/Bibo-Mao/LoongArch-KVM-Add-paravirt-qspinlock-in-kvm-side/20240723-160536
base:   7846b618e0a4c3e08888099d1d4512722b39ca99
patch link:    https://lore.kernel.org/r/20240723073825.1811600-3-maobibo%40loongson.cn
patch subject: [PATCH 2/2] LoongArch: KVM: Add paravirt qspinlock in guest side
config: loongarch-allmodconfig (https://download.01.org/0day-ci/archive/20240724/202407240320.qqd1uWiE-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 14.1.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240724/202407240320.qqd1uWiE-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202407240320.qqd1uWiE-lkp@intel.com/

All error/warnings (new ones prefixed by >>):

>> arch/loongarch/kernel/paravirt.c:309: warning: expecting prototype for queued_spin_unlock(). Prototype was for native_queued_spin_unlock() instead
--
   In file included from include/linux/atomic.h:80,
                    from include/asm-generic/bitops/atomic.h:5,
                    from arch/loongarch/include/asm/bitops.h:27,
                    from include/linux/bitops.h:63,
                    from include/linux/kernel.h:23,
                    from include/linux/cpumask.h:11,
                    from include/linux/smp.h:13,
                    from kernel/locking/qspinlock.c:16:
   kernel/locking/qspinlock_paravirt.h: In function 'pv_kick_node':
>> include/linux/atomic/atomic-arch-fallback.h:242:34: error: initialization of 'u8 *' {aka 'unsigned char *'} from incompatible pointer type 'enum vcpu_state *' [-Wincompatible-pointer-types]
     242 |         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
         |                                  ^
   include/linux/atomic/atomic-instrumented.h:4908:9: note: in expansion of macro 'raw_try_cmpxchg_relaxed'
    4908 |         raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
         |         ^~~~~~~~~~~~~~~~~~~~~~~
   kernel/locking/qspinlock_paravirt.h:377:14: note: in expansion of macro 'try_cmpxchg_relaxed'
     377 |         if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
         |              ^~~~~~~~~~~~~~~~~~~


vim +309 arch/loongarch/kernel/paravirt.c

   303	
   304	/**
   305	 * queued_spin_unlock - release a queued spinlock
   306	 * @lock : Pointer to queued spinlock structure
   307	 */
   308	static void native_queued_spin_unlock(struct qspinlock *lock)
 > 309	{
   310		/*
   311		 * unlock() needs release semantics:
   312		 */
   313		smp_store_release(&lock->locked, 0);
   314	}
   315
Bibo Mao July 24, 2024, 1:29 a.m. UTC | #2
On 2024/7/24 上午3:57, kernel test robot wrote:
> Hi Bibo,
> 
> kernel test robot noticed the following build errors:
yes, forgot to mention, it depends on this patch
https://lore.kernel.org/lkml/20240721164552.50175-1-ubizjak@gmail.com/

Regards
Bibo Mao
> 
> [auto build test ERROR on 7846b618e0a4c3e08888099d1d4512722b39ca99]
> 
> url:    https://github.com/intel-lab-lkp/linux/commits/Bibo-Mao/LoongArch-KVM-Add-paravirt-qspinlock-in-kvm-side/20240723-160536
> base:   7846b618e0a4c3e08888099d1d4512722b39ca99
> patch link:    https://lore.kernel.org/r/20240723073825.1811600-3-maobibo%40loongson.cn
> patch subject: [PATCH 2/2] LoongArch: KVM: Add paravirt qspinlock in guest side
> config: loongarch-allmodconfig (https://download.01.org/0day-ci/archive/20240724/202407240320.qqd1uWiE-lkp@intel.com/config)
> compiler: loongarch64-linux-gcc (GCC) 14.1.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240724/202407240320.qqd1uWiE-lkp@intel.com/reproduce)
> 
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202407240320.qqd1uWiE-lkp@intel.com/
> 
> All error/warnings (new ones prefixed by >>):
> 
>>> arch/loongarch/kernel/paravirt.c:309: warning: expecting prototype for queued_spin_unlock(). Prototype was for native_queued_spin_unlock() instead
> --
>     In file included from include/linux/atomic.h:80,
>                      from include/asm-generic/bitops/atomic.h:5,
>                      from arch/loongarch/include/asm/bitops.h:27,
>                      from include/linux/bitops.h:63,
>                      from include/linux/kernel.h:23,
>                      from include/linux/cpumask.h:11,
>                      from include/linux/smp.h:13,
>                      from kernel/locking/qspinlock.c:16:
>     kernel/locking/qspinlock_paravirt.h: In function 'pv_kick_node':
>>> include/linux/atomic/atomic-arch-fallback.h:242:34: error: initialization of 'u8 *' {aka 'unsigned char *'} from incompatible pointer type 'enum vcpu_state *' [-Wincompatible-pointer-types]
>       242 |         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
>           |                                  ^
>     include/linux/atomic/atomic-instrumented.h:4908:9: note: in expansion of macro 'raw_try_cmpxchg_relaxed'
>      4908 |         raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
>           |         ^~~~~~~~~~~~~~~~~~~~~~~
>     kernel/locking/qspinlock_paravirt.h:377:14: note: in expansion of macro 'try_cmpxchg_relaxed'
>       377 |         if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
>           |              ^~~~~~~~~~~~~~~~~~~
> 
> 
> vim +309 arch/loongarch/kernel/paravirt.c
> 
>     303	
>     304	/**
>     305	 * queued_spin_unlock - release a queued spinlock
>     306	 * @lock : Pointer to queued spinlock structure
>     307	 */
>     308	static void native_queued_spin_unlock(struct qspinlock *lock)
>   > 309	{
>     310		/*
>     311		 * unlock() needs release semantics:
>     312		 */
>     313		smp_store_release(&lock->locked, 0);
>     314	}
>     315	
>
kernel test robot July 24, 2024, 2:52 a.m. UTC | #3
Hi Bibo,

kernel test robot noticed the following build warnings:

[auto build test WARNING on 7846b618e0a4c3e08888099d1d4512722b39ca99]

url:    https://github.com/intel-lab-lkp/linux/commits/Bibo-Mao/LoongArch-KVM-Add-paravirt-qspinlock-in-kvm-side/20240723-160536
base:   7846b618e0a4c3e08888099d1d4512722b39ca99
patch link:    https://lore.kernel.org/r/20240723073825.1811600-3-maobibo%40loongson.cn
patch subject: [PATCH 2/2] LoongArch: KVM: Add paravirt qspinlock in guest side
config: loongarch-kismet-CONFIG_PARAVIRT-CONFIG_PARAVIRT_SPINLOCKS-0-0 (https://download.01.org/0day-ci/archive/20240724/202407241016.NtaMVEAg-lkp@intel.com/config)
reproduce: (https://download.01.org/0day-ci/archive/20240724/202407241016.NtaMVEAg-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202407241016.NtaMVEAg-lkp@intel.com/

kismet warnings: (new ones prefixed by >>)
>> kismet: WARNING: unmet direct dependencies detected for PARAVIRT when selected by PARAVIRT_SPINLOCKS
diff mbox series

Patch

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index b81d0eba5c7e..7ad63db2fafd 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -660,6 +660,20 @@  config PARAVIRT_TIME_ACCOUNTING
 
 	  If in doubt, say N here.
 
+config PARAVIRT_SPINLOCKS
+	bool "Paravirtual queued spinlocks"
+	select PARAVIRT
+	depends on SMP
+	help
+	  Paravirtualized spinlocks allow a pvops backend to replace the
+	  spinlock implementation with something virtualization-friendly
+	  (for example, block the virtual CPU rather than spinning).
+
+	  It has a minimal impact on native kernels and gives a nice performance
+	  benefit on paravirtualized kernels.
+
+	  If you are unsure how to answer this question, answer Y.
+
 endmenu
 
 config ARCH_SELECT_MEMORY_MODEL
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 2bb3676429c0..4635b755b2b4 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -6,7 +6,6 @@  generic-y += mcs_spinlock.h
 generic-y += parport.h
 generic-y += early_ioremap.h
 generic-y += qrwlock.h
-generic-y += qspinlock.h
 generic-y += user.h
 generic-y += ioctl.h
 generic-y += statfs.h
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
index dddec49671ae..2617d635171b 100644
--- a/arch/loongarch/include/asm/paravirt.h
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -20,6 +20,47 @@  static inline u64 paravirt_steal_clock(int cpu)
 int __init pv_ipi_init(void);
 int __init pv_time_init(void);
 
+#if defined(CONFIG_PARAVIRT_SPINLOCKS)
+struct qspinlock;
+struct pv_lock_ops {
+	void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+	void (*queued_spin_unlock)(struct qspinlock *lock);
+	void (*wait)(u8 *ptr, u8 val);
+	void (*kick)(int cpu);
+	bool (*vcpu_is_preempted)(int cpu);
+};
+
+extern struct pv_lock_ops pv_lock_ops;
+
+void __init kvm_spinlock_init(void);
+bool pv_is_native_spin_unlock(void);
+
+static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+		u32 val)
+{
+	pv_lock_ops.queued_spin_lock_slowpath(lock, val);
+}
+
+static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+	pv_lock_ops.queued_spin_unlock(lock);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+	pv_lock_ops.wait(ptr, val);
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+	pv_lock_ops.kick(cpu);
+}
+
+static __always_inline bool pv_vcpu_is_preempted(long cpu)
+{
+	return pv_lock_ops.vcpu_is_preempted(cpu);
+}
+#endif /* PARAVIRT_SPINLOCKS */
 #else
 
 static inline int pv_ipi_init(void)
@@ -32,4 +73,10 @@  static inline int pv_time_init(void)
 	return 0;
 }
 #endif // CONFIG_PARAVIRT
+
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void kvm_spinlock_init(void)
+{
+}
+#endif
 #endif
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
new file mode 100644
index 000000000000..8e1b14c9e906
--- /dev/null
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -0,0 +1,39 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_QSPINLOCK_H
+#define _ASM_LOONGARCH_QSPINLOCK_H
+
+#include <asm/paravirt.h>
+
+#define _Q_PENDING_LOOPS       (1 << 9)
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD  (1 << 15)
+
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+extern bool nopvspin;
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+	pv_queued_spin_lock_slowpath(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+	pv_queued_spin_unlock(lock);
+}
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(long cpu)
+{
+	return pv_vcpu_is_preempted(cpu);
+}
+#endif
+
+#include <asm-generic/qspinlock.h>
+
+#endif // _ASM_LOONGARCH_QSPINLOCK_H
diff --git a/arch/loongarch/include/asm/qspinlock_paravirt.h b/arch/loongarch/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000000..d6d7f487daea
--- /dev/null
+++ b/arch/loongarch/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,6 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_QSPINLOCK_PARAVIRT_H
+#define __ASM_QSPINLOCK_PARAVIRT_H
+
+void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
+#endif
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index aee44610007d..758039eabdde 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -298,3 +298,91 @@  int __init pv_time_init(void)
 
 	return 0;
 }
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+static  bool native_vcpu_is_preempted(int cpu)
+{
+	return false;
+}
+
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static void native_queued_spin_unlock(struct qspinlock *lock)
+{
+	/*
+	 * unlock() needs release semantics:
+	 */
+	smp_store_release(&lock->locked, 0);
+}
+
+static void paravirt_nop_kick(int cpu)
+{
+}
+
+static void paravirt_nop_wait(u8 *ptr, u8 val)
+{
+}
+
+static void kvm_wait(u8 *ptr, u8 val)
+{
+	if (READ_ONCE(*ptr) != val)
+		return;
+
+	__asm__ __volatile__("idle 0\n\t" : : : "memory");
+}
+
+/* Kick a cpu. Used to wake up a halted vcpu */
+static void kvm_kick_cpu(int cpu)
+{
+	kvm_hypercall1(KVM_HCALL_FUNC_KICK, cpu_logical_map(cpu));
+}
+
+bool pv_is_native_spin_unlock(void)
+{
+	return pv_lock_ops.queued_spin_unlock == native_queued_spin_unlock;
+}
+
+/*
+ * Setup pv_lock_ops for guest kernel.
+ */
+void __init kvm_spinlock_init(void)
+{
+	int feature;
+
+	/*
+	 * pv_hash()/pv_unhas() need it whatever pv spinlock is
+	 * enabled or not
+	 */
+	__pv_init_lock_hash();
+
+	if (!kvm_para_available())
+		return;
+
+	/* Don't use the pvqspinlock code if there is only 1 vCPU. */
+	if (num_possible_cpus() == 1)
+		return;
+
+	feature = kvm_arch_para_features();
+	if (!(feature & KVM_FEATURE_PARAVIRT_SPINLOCK))
+		return;
+
+	if (nopvspin)
+		return;
+
+	pr_info("Using paravirt qspinlock\n");
+	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+	pv_lock_ops.queued_spin_unlock = __pv_queued_spin_unlock;
+	pv_lock_ops.wait = kvm_wait;
+	pv_lock_ops.kick = kvm_kick_cpu;
+}
+
+struct pv_lock_ops pv_lock_ops = {
+	.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+	.queued_spin_unlock = native_queued_spin_unlock,
+	.wait = paravirt_nop_wait,
+	.kick = paravirt_nop_kick,
+	.vcpu_is_preempted = native_vcpu_is_preempted,
+};
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 1436d2465939..6bc0b182a2ce 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -463,7 +463,7 @@  core_initcall(ipi_pm_init);
 #endif
 
 /* Preload SMP state for boot cpu */
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
 {
 	unsigned int cpu, node, rr_node;
 
@@ -496,6 +496,8 @@  void smp_prepare_boot_cpu(void)
 			rr_node = next_node_in(rr_node, node_online_map);
 		}
 	}
+
+	kvm_spinlock_init();
 }
 
 /* called from main before smp_init() */