diff mbox series

[V11,11/17] RISC-V: paravirt: pvqspinlock: Add paravirt qspinlock skeleton

Message ID 20230910082911.3378782-12-guoren@kernel.org (mailing list archive)
State Changes Requested
Headers show
Series riscv: Add Native/Paravirt qspinlock support | expand

Checks

Context Check Description
conchuod/tree_selection fail Failed to apply to next/pending-fixes, riscv/for-next or riscv/master

Commit Message

Guo Ren Sept. 10, 2023, 8:29 a.m. UTC
From: Guo Ren <guoren@linux.alibaba.com>

Using static_call to switch between:
  native_queued_spin_lock_slowpath()    __pv_queued_spin_lock_slowpath()
  native_queued_spin_unlock()           __pv_queued_spin_unlock()

Finish the pv_wait implementation, but pv_kick needs the SBI
definition of the next patches.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
 arch/riscv/include/asm/Kbuild               |  1 -
 arch/riscv/include/asm/qspinlock.h          | 35 +++++++++++++
 arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++
 arch/riscv/include/asm/spinlock.h           |  2 +-
 arch/riscv/kernel/qspinlock_paravirt.c      | 57 +++++++++++++++++++++
 arch/riscv/kernel/setup.c                   |  4 ++
 6 files changed, 126 insertions(+), 2 deletions(-)
 create mode 100644 arch/riscv/include/asm/qspinlock.h
 create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
 create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c

Comments

Leonardo Bras Sept. 15, 2023, 5:42 a.m. UTC | #1
On Sun, Sep 10, 2023 at 04:29:05AM -0400, guoren@kernel.org wrote:
> From: Guo Ren <guoren@linux.alibaba.com>
> 
> Using static_call to switch between:
>   native_queued_spin_lock_slowpath()    __pv_queued_spin_lock_slowpath()
>   native_queued_spin_unlock()           __pv_queued_spin_unlock()
> 
> Finish the pv_wait implementation, but pv_kick needs the SBI
> definition of the next patches.
> 
> Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> Signed-off-by: Guo Ren <guoren@kernel.org>
> ---
>  arch/riscv/include/asm/Kbuild               |  1 -
>  arch/riscv/include/asm/qspinlock.h          | 35 +++++++++++++
>  arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++
>  arch/riscv/include/asm/spinlock.h           |  2 +-
>  arch/riscv/kernel/qspinlock_paravirt.c      | 57 +++++++++++++++++++++
>  arch/riscv/kernel/setup.c                   |  4 ++
>  6 files changed, 126 insertions(+), 2 deletions(-)
>  create mode 100644 arch/riscv/include/asm/qspinlock.h
>  create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
>  create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c
> 
> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> index a0dc85e4a754..b89cb3b73c13 100644
> --- a/arch/riscv/include/asm/Kbuild
> +++ b/arch/riscv/include/asm/Kbuild
> @@ -7,6 +7,5 @@ generic-y += parport.h
>  generic-y += spinlock_types.h
>  generic-y += qrwlock.h
>  generic-y += qrwlock_types.h
> -generic-y += qspinlock.h
>  generic-y += user.h
>  generic-y += vmlinux.lds.h
> diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
> new file mode 100644
> index 000000000000..7d4f416c908c
> --- /dev/null
> +++ b/arch/riscv/include/asm/qspinlock.h
> @@ -0,0 +1,35 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (c), 2023 Alibaba Cloud
> + * Authors:
> + *	Guo Ren <guoren@linux.alibaba.com>
> + */
> +
> +#ifndef _ASM_RISCV_QSPINLOCK_H
> +#define _ASM_RISCV_QSPINLOCK_H
> +
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +#include <asm/qspinlock_paravirt.h>
> +
> +/* How long a lock should spin before we consider blocking */
> +#define SPIN_THRESHOLD		(1 << 15)
> +
> +void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +void __pv_init_lock_hash(void);
> +void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +
> +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> +{
> +	static_call(pv_queued_spin_lock_slowpath)(lock, val);
> +}
> +
> +#define queued_spin_unlock	queued_spin_unlock
> +static inline void queued_spin_unlock(struct qspinlock *lock)
> +{
> +	static_call(pv_queued_spin_unlock)(lock);
> +}
> +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
> +
> +#include <asm-generic/qspinlock.h>
> +
> +#endif /* _ASM_RISCV_QSPINLOCK_H */
> diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h
> new file mode 100644
> index 000000000000..9681e851f69d
> --- /dev/null
> +++ b/arch/riscv/include/asm/qspinlock_paravirt.h
> @@ -0,0 +1,29 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (c), 2023 Alibaba Cloud
> + * Authors:
> + *	Guo Ren <guoren@linux.alibaba.com>
> + */
> +
> +#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> +#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> +
> +void pv_wait(u8 *ptr, u8 val);
> +void pv_kick(int cpu);
> +
> +void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +void dummy_queued_spin_unlock(struct qspinlock *lock);
> +
> +DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
> +DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
> +
> +void __init pv_qspinlock_init(void);
> +
> +static inline bool pv_is_native_spin_unlock(void)
> +{
> +	return false;
> +}
> +
> +void __pv_queued_spin_unlock(struct qspinlock *lock);
> +
> +#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
> diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> index 6b38d6616f14..ed4253f491fe 100644
> --- a/arch/riscv/include/asm/spinlock.h
> +++ b/arch/riscv/include/asm/spinlock.h
> @@ -39,7 +39,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>  #undef arch_spin_trylock
>  #undef arch_spin_unlock
>  
> -#include <asm-generic/qspinlock.h>
> +#include <asm/qspinlock.h>
>  #include <linux/jump_label.h>
>  
>  #undef arch_spin_is_locked
> diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c
> new file mode 100644
> index 000000000000..85ff5a3ec234
> --- /dev/null
> +++ b/arch/riscv/kernel/qspinlock_paravirt.c
> @@ -0,0 +1,57 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c), 2023 Alibaba Cloud
> + * Authors:
> + *	Guo Ren <guoren@linux.alibaba.com>
> + */
> +
> +#include <linux/static_call.h>
> +#include <asm/qspinlock_paravirt.h>
> +#include <asm/sbi.h>
> +
> +void pv_kick(int cpu)
> +{
> +	return;
> +}
> +
> +void pv_wait(u8 *ptr, u8 val)
> +{
> +	unsigned long flags;
> +
> +	if (in_nmi())
> +		return;
> +
> +	local_irq_save(flags);
> +	if (READ_ONCE(*ptr) != val)
> +		goto out;
> +
> +	/* wait_for_interrupt(); */
> +out:
> +	local_irq_restore(flags);
> +}
> +
> +static void native_queued_spin_unlock(struct qspinlock *lock)
> +{
> +	smp_store_release(&lock->locked, 0);
> +}
> +
> +DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
> +EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
> +
> +DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
> +EXPORT_STATIC_CALL(pv_queued_spin_unlock);
> +
> +void __init pv_qspinlock_init(void)
> +{
> +	if (num_possible_cpus() == 1)
> +		return;
> +
> +	if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)

Checks like this seem to be very common on this patchset.
For someone not much familiar with this, it can be hard to 
understand.

I mean, on patch 8/17 you introduce those IDs, which look to be 
incremental ( ID == N includes stuff from ID < N ), but I am not sure as I 
couln't find much documentation on that.

Then above you test for the id being different than 
SBI_EXT_BASE_IMPL_ID_KVM, but if they are actually incremental and a new 
version lands, the new version will also return early because it passes the 
test.

I am no sure if above is right, but it's all I could understand without 
documentation.

Well, my point is: this seems hard to understand & review, so it would be 
nice to have a macro like this to be used instead:

#define sbi_fw_implements_kvm() \
	(sbi_get_firmware_id() >= SBI_EXT_BASE_IMPL_ID_KVM)

if(!sbi_fw_implements_kvm())
	return;

What do you think?

Other than that, LGTM.

Thanks!
Leo

> +		return;
> +
> +	pr_info("PV qspinlocks enabled\n");
> +	__pv_init_lock_hash();
> +
> +	static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
> +	static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
> +}
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index c57d15b05160..88690751f2ee 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -321,6 +321,10 @@ static void __init riscv_spinlock_init(void)
>  #ifdef CONFIG_QUEUED_SPINLOCKS
>  	virt_spin_lock_init();
>  #endif
> +
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +	pv_qspinlock_init();
> +#endif
>  }
>  
>  extern void __init init_rt_signal_env(void);
> -- 
> 2.36.1
>
Guo Ren Sept. 17, 2023, 2:58 p.m. UTC | #2
On Fri, Sep 15, 2023 at 1:42 PM Leonardo Bras <leobras@redhat.com> wrote:
>
> On Sun, Sep 10, 2023 at 04:29:05AM -0400, guoren@kernel.org wrote:
> > From: Guo Ren <guoren@linux.alibaba.com>
> >
> > Using static_call to switch between:
> >   native_queued_spin_lock_slowpath()    __pv_queued_spin_lock_slowpath()
> >   native_queued_spin_unlock()           __pv_queued_spin_unlock()
> >
> > Finish the pv_wait implementation, but pv_kick needs the SBI
> > definition of the next patches.
> >
> > Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> > Signed-off-by: Guo Ren <guoren@kernel.org>
> > ---
> >  arch/riscv/include/asm/Kbuild               |  1 -
> >  arch/riscv/include/asm/qspinlock.h          | 35 +++++++++++++
> >  arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++
> >  arch/riscv/include/asm/spinlock.h           |  2 +-
> >  arch/riscv/kernel/qspinlock_paravirt.c      | 57 +++++++++++++++++++++
> >  arch/riscv/kernel/setup.c                   |  4 ++
> >  6 files changed, 126 insertions(+), 2 deletions(-)
> >  create mode 100644 arch/riscv/include/asm/qspinlock.h
> >  create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
> >  create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c
> >
> > diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> > index a0dc85e4a754..b89cb3b73c13 100644
> > --- a/arch/riscv/include/asm/Kbuild
> > +++ b/arch/riscv/include/asm/Kbuild
> > @@ -7,6 +7,5 @@ generic-y += parport.h
> >  generic-y += spinlock_types.h
> >  generic-y += qrwlock.h
> >  generic-y += qrwlock_types.h
> > -generic-y += qspinlock.h
> >  generic-y += user.h
> >  generic-y += vmlinux.lds.h
> > diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
> > new file mode 100644
> > index 000000000000..7d4f416c908c
> > --- /dev/null
> > +++ b/arch/riscv/include/asm/qspinlock.h
> > @@ -0,0 +1,35 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/*
> > + * Copyright (c), 2023 Alibaba Cloud
> > + * Authors:
> > + *   Guo Ren <guoren@linux.alibaba.com>
> > + */
> > +
> > +#ifndef _ASM_RISCV_QSPINLOCK_H
> > +#define _ASM_RISCV_QSPINLOCK_H
> > +
> > +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> > +#include <asm/qspinlock_paravirt.h>
> > +
> > +/* How long a lock should spin before we consider blocking */
> > +#define SPIN_THRESHOLD               (1 << 15)
> > +
> > +void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > +void __pv_init_lock_hash(void);
> > +void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > +
> > +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> > +{
> > +     static_call(pv_queued_spin_lock_slowpath)(lock, val);
> > +}
> > +
> > +#define queued_spin_unlock   queued_spin_unlock
> > +static inline void queued_spin_unlock(struct qspinlock *lock)
> > +{
> > +     static_call(pv_queued_spin_unlock)(lock);
> > +}
> > +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
> > +
> > +#include <asm-generic/qspinlock.h>
> > +
> > +#endif /* _ASM_RISCV_QSPINLOCK_H */
> > diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h
> > new file mode 100644
> > index 000000000000..9681e851f69d
> > --- /dev/null
> > +++ b/arch/riscv/include/asm/qspinlock_paravirt.h
> > @@ -0,0 +1,29 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/*
> > + * Copyright (c), 2023 Alibaba Cloud
> > + * Authors:
> > + *   Guo Ren <guoren@linux.alibaba.com>
> > + */
> > +
> > +#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> > +#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> > +
> > +void pv_wait(u8 *ptr, u8 val);
> > +void pv_kick(int cpu);
> > +
> > +void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > +void dummy_queued_spin_unlock(struct qspinlock *lock);
> > +
> > +DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
> > +DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
> > +
> > +void __init pv_qspinlock_init(void);
> > +
> > +static inline bool pv_is_native_spin_unlock(void)
> > +{
> > +     return false;
> > +}
> > +
> > +void __pv_queued_spin_unlock(struct qspinlock *lock);
> > +
> > +#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
> > diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> > index 6b38d6616f14..ed4253f491fe 100644
> > --- a/arch/riscv/include/asm/spinlock.h
> > +++ b/arch/riscv/include/asm/spinlock.h
> > @@ -39,7 +39,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> >  #undef arch_spin_trylock
> >  #undef arch_spin_unlock
> >
> > -#include <asm-generic/qspinlock.h>
> > +#include <asm/qspinlock.h>
> >  #include <linux/jump_label.h>
> >
> >  #undef arch_spin_is_locked
> > diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c
> > new file mode 100644
> > index 000000000000..85ff5a3ec234
> > --- /dev/null
> > +++ b/arch/riscv/kernel/qspinlock_paravirt.c
> > @@ -0,0 +1,57 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Copyright (c), 2023 Alibaba Cloud
> > + * Authors:
> > + *   Guo Ren <guoren@linux.alibaba.com>
> > + */
> > +
> > +#include <linux/static_call.h>
> > +#include <asm/qspinlock_paravirt.h>
> > +#include <asm/sbi.h>
> > +
> > +void pv_kick(int cpu)
> > +{
> > +     return;
> > +}
> > +
> > +void pv_wait(u8 *ptr, u8 val)
> > +{
> > +     unsigned long flags;
> > +
> > +     if (in_nmi())
> > +             return;
> > +
> > +     local_irq_save(flags);
> > +     if (READ_ONCE(*ptr) != val)
> > +             goto out;
> > +
> > +     /* wait_for_interrupt(); */
> > +out:
> > +     local_irq_restore(flags);
> > +}
> > +
> > +static void native_queued_spin_unlock(struct qspinlock *lock)
> > +{
> > +     smp_store_release(&lock->locked, 0);
> > +}
> > +
> > +DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
> > +EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
> > +
> > +DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
> > +EXPORT_STATIC_CALL(pv_queued_spin_unlock);
> > +
> > +void __init pv_qspinlock_init(void)
> > +{
> > +     if (num_possible_cpus() == 1)
> > +             return;
> > +
> > +     if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
>
> Checks like this seem to be very common on this patchset.
> For someone not much familiar with this, it can be hard to
> understand.
>
> I mean, on patch 8/17 you introduce those IDs, which look to be
> incremental ( ID == N includes stuff from ID < N ), but I am not sure as I
> couln't find much documentation on that.
It's from sbi spec:
https://github.com/riscv-non-isa/riscv-sbi-doc/releases

0 Berkeley Boot Loader (BBL)
1 OpenSBI
2 Xvisor
3 KVM
4 RustSBI
5 Diosix
6 Coffer
7 Xen Project
8 PolarFire Hart Software Service

>
> Then above you test for the id being different than
> SBI_EXT_BASE_IMPL_ID_KVM, but if they are actually incremental and a new
> version lands, the new version will also return early because it passes the
> test.
>
> I am no sure if above is right, but it's all I could understand without
> documentation.
>
> Well, my point is: this seems hard to understand & review, so it would be
> nice to have a macro like this to be used instead:
>
> #define sbi_fw_implements_kvm() \
>         (sbi_get_firmware_id() >= SBI_EXT_BASE_IMPL_ID_KVM)
No, it's not correct. It must be:
(sbi_get_firmware_id() == SBI_EXT_BASE_IMPL_ID_KVM)

>
> if(!sbi_fw_implements_kvm())
I'm okay with sbi_fw_implements_kvm().

>         return;
>
> What do you think?
>
> Other than that, LGTM.
>
> Thanks!
> Leo
>
> > +             return;
> > +
> > +     pr_info("PV qspinlocks enabled\n");
> > +     __pv_init_lock_hash();
> > +
> > +     static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
> > +     static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
> > +}
> > diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> > index c57d15b05160..88690751f2ee 100644
> > --- a/arch/riscv/kernel/setup.c
> > +++ b/arch/riscv/kernel/setup.c
> > @@ -321,6 +321,10 @@ static void __init riscv_spinlock_init(void)
> >  #ifdef CONFIG_QUEUED_SPINLOCKS
> >       virt_spin_lock_init();
> >  #endif
> > +
> > +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> > +     pv_qspinlock_init();
> > +#endif
> >  }
> >
> >  extern void __init init_rt_signal_env(void);
> > --
> > 2.36.1
> >
>
Leonardo Bras Sept. 19, 2023, 5:43 a.m. UTC | #3
On Sun, Sep 17, 2023 at 10:58:18PM +0800, Guo Ren wrote:
> On Fri, Sep 15, 2023 at 1:42 PM Leonardo Bras <leobras@redhat.com> wrote:
> >
> > On Sun, Sep 10, 2023 at 04:29:05AM -0400, guoren@kernel.org wrote:
> > > From: Guo Ren <guoren@linux.alibaba.com>
> > >
> > > Using static_call to switch between:
> > >   native_queued_spin_lock_slowpath()    __pv_queued_spin_lock_slowpath()
> > >   native_queued_spin_unlock()           __pv_queued_spin_unlock()
> > >
> > > Finish the pv_wait implementation, but pv_kick needs the SBI
> > > definition of the next patches.
> > >
> > > Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> > > Signed-off-by: Guo Ren <guoren@kernel.org>
> > > ---
> > >  arch/riscv/include/asm/Kbuild               |  1 -
> > >  arch/riscv/include/asm/qspinlock.h          | 35 +++++++++++++
> > >  arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++
> > >  arch/riscv/include/asm/spinlock.h           |  2 +-
> > >  arch/riscv/kernel/qspinlock_paravirt.c      | 57 +++++++++++++++++++++
> > >  arch/riscv/kernel/setup.c                   |  4 ++
> > >  6 files changed, 126 insertions(+), 2 deletions(-)
> > >  create mode 100644 arch/riscv/include/asm/qspinlock.h
> > >  create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
> > >  create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c
> > >
> > > diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> > > index a0dc85e4a754..b89cb3b73c13 100644
> > > --- a/arch/riscv/include/asm/Kbuild
> > > +++ b/arch/riscv/include/asm/Kbuild
> > > @@ -7,6 +7,5 @@ generic-y += parport.h
> > >  generic-y += spinlock_types.h
> > >  generic-y += qrwlock.h
> > >  generic-y += qrwlock_types.h
> > > -generic-y += qspinlock.h
> > >  generic-y += user.h
> > >  generic-y += vmlinux.lds.h
> > > diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
> > > new file mode 100644
> > > index 000000000000..7d4f416c908c
> > > --- /dev/null
> > > +++ b/arch/riscv/include/asm/qspinlock.h
> > > @@ -0,0 +1,35 @@
> > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > +/*
> > > + * Copyright (c), 2023 Alibaba Cloud
> > > + * Authors:
> > > + *   Guo Ren <guoren@linux.alibaba.com>
> > > + */
> > > +
> > > +#ifndef _ASM_RISCV_QSPINLOCK_H
> > > +#define _ASM_RISCV_QSPINLOCK_H
> > > +
> > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> > > +#include <asm/qspinlock_paravirt.h>
> > > +
> > > +/* How long a lock should spin before we consider blocking */
> > > +#define SPIN_THRESHOLD               (1 << 15)
> > > +
> > > +void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > > +void __pv_init_lock_hash(void);
> > > +void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > > +
> > > +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> > > +{
> > > +     static_call(pv_queued_spin_lock_slowpath)(lock, val);
> > > +}
> > > +
> > > +#define queued_spin_unlock   queued_spin_unlock
> > > +static inline void queued_spin_unlock(struct qspinlock *lock)
> > > +{
> > > +     static_call(pv_queued_spin_unlock)(lock);
> > > +}
> > > +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
> > > +
> > > +#include <asm-generic/qspinlock.h>
> > > +
> > > +#endif /* _ASM_RISCV_QSPINLOCK_H */
> > > diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h
> > > new file mode 100644
> > > index 000000000000..9681e851f69d
> > > --- /dev/null
> > > +++ b/arch/riscv/include/asm/qspinlock_paravirt.h
> > > @@ -0,0 +1,29 @@
> > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > +/*
> > > + * Copyright (c), 2023 Alibaba Cloud
> > > + * Authors:
> > > + *   Guo Ren <guoren@linux.alibaba.com>
> > > + */
> > > +
> > > +#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> > > +#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> > > +
> > > +void pv_wait(u8 *ptr, u8 val);
> > > +void pv_kick(int cpu);
> > > +
> > > +void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > > +void dummy_queued_spin_unlock(struct qspinlock *lock);
> > > +
> > > +DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
> > > +DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
> > > +
> > > +void __init pv_qspinlock_init(void);
> > > +
> > > +static inline bool pv_is_native_spin_unlock(void)
> > > +{
> > > +     return false;
> > > +}
> > > +
> > > +void __pv_queued_spin_unlock(struct qspinlock *lock);
> > > +
> > > +#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
> > > diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> > > index 6b38d6616f14..ed4253f491fe 100644
> > > --- a/arch/riscv/include/asm/spinlock.h
> > > +++ b/arch/riscv/include/asm/spinlock.h
> > > @@ -39,7 +39,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> > >  #undef arch_spin_trylock
> > >  #undef arch_spin_unlock
> > >
> > > -#include <asm-generic/qspinlock.h>
> > > +#include <asm/qspinlock.h>
> > >  #include <linux/jump_label.h>
> > >
> > >  #undef arch_spin_is_locked
> > > diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c
> > > new file mode 100644
> > > index 000000000000..85ff5a3ec234
> > > --- /dev/null
> > > +++ b/arch/riscv/kernel/qspinlock_paravirt.c
> > > @@ -0,0 +1,57 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +/*
> > > + * Copyright (c), 2023 Alibaba Cloud
> > > + * Authors:
> > > + *   Guo Ren <guoren@linux.alibaba.com>
> > > + */
> > > +
> > > +#include <linux/static_call.h>
> > > +#include <asm/qspinlock_paravirt.h>
> > > +#include <asm/sbi.h>
> > > +
> > > +void pv_kick(int cpu)
> > > +{
> > > +     return;
> > > +}
> > > +
> > > +void pv_wait(u8 *ptr, u8 val)
> > > +{
> > > +     unsigned long flags;
> > > +
> > > +     if (in_nmi())
> > > +             return;
> > > +
> > > +     local_irq_save(flags);
> > > +     if (READ_ONCE(*ptr) != val)
> > > +             goto out;
> > > +
> > > +     /* wait_for_interrupt(); */
> > > +out:
> > > +     local_irq_restore(flags);
> > > +}
> > > +
> > > +static void native_queued_spin_unlock(struct qspinlock *lock)
> > > +{
> > > +     smp_store_release(&lock->locked, 0);
> > > +}
> > > +
> > > +DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
> > > +EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
> > > +
> > > +DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
> > > +EXPORT_STATIC_CALL(pv_queued_spin_unlock);
> > > +
> > > +void __init pv_qspinlock_init(void)
> > > +{
> > > +     if (num_possible_cpus() == 1)
> > > +             return;
> > > +
> > > +     if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
> >
> > Checks like this seem to be very common on this patchset.
> > For someone not much familiar with this, it can be hard to
> > understand.
> >
> > I mean, on patch 8/17 you introduce those IDs, which look to be
> > incremental ( ID == N includes stuff from ID < N ), but I am not sure as I
> > couln't find much documentation on that.
> It's from sbi spec:
> https://github.com/riscv-non-isa/riscv-sbi-doc/releases
> 
> 0 Berkeley Boot Loader (BBL)
> 1 OpenSBI
> 2 Xvisor
> 3 KVM
> 4 RustSBI
> 5 Diosix
> 6 Coffer
> 7 Xen Project
> 8 PolarFire Hart Software Service

Oh, I see. Thanks for the reference!
Please also include the github link and/or the doc name into the commit 
file for future references :)

> 
> >
> > Then above you test for the id being different than
> > SBI_EXT_BASE_IMPL_ID_KVM, but if they are actually incremental and a new
> > version lands, the new version will also return early because it passes the
> > test.
> >
> > I am no sure if above is right, but it's all I could understand without
> > documentation.
> >
> > Well, my point is: this seems hard to understand & review, so it would be
> > nice to have a macro like this to be used instead:
> >
> > #define sbi_fw_implements_kvm() \
> >         (sbi_get_firmware_id() >= SBI_EXT_BASE_IMPL_ID_KVM)
> No, it's not correct. It must be:
> (sbi_get_firmware_id() == SBI_EXT_BASE_IMPL_ID_KVM)

Looking at the doc you provided, I think to be able to understand it.
The idea is to provide a code for given implementation of SBI, so in those 
tests you check if the SBI implementation being used is KVM, meaning it's a 
KVM guest. Ok, that makes sense now. Thanks!

> 
> >
> > if(!sbi_fw_implements_kvm())
> I'm okay with sbi_fw_implements_kvm().

Thanks! also, thanks again for sharing the doc!

With above suggestions, please feel free to include in next versions:

Reviewed-by: Leonardo Bras <leobras@redhat.com>

Thx
Leo

> 
> >         return;
> >
> > What do you think?
> >
> > Other than that, LGTM.
> >
> > Thanks!
> > Leo
> >
> > > +             return;
> > > +
> > > +     pr_info("PV qspinlocks enabled\n");
> > > +     __pv_init_lock_hash();
> > > +
> > > +     static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
> > > +     static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
> > > +}
> > > diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> > > index c57d15b05160..88690751f2ee 100644
> > > --- a/arch/riscv/kernel/setup.c
> > > +++ b/arch/riscv/kernel/setup.c
> > > @@ -321,6 +321,10 @@ static void __init riscv_spinlock_init(void)
> > >  #ifdef CONFIG_QUEUED_SPINLOCKS
> > >       virt_spin_lock_init();
> > >  #endif
> > > +
> > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> > > +     pv_qspinlock_init();
> > > +#endif
> > >  }
> > >
> > >  extern void __init init_rt_signal_env(void);
> > > --
> > > 2.36.1
> > >
> >
> 
> 
> -- 
> Best Regards
>  Guo Ren
>
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index a0dc85e4a754..b89cb3b73c13 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -7,6 +7,5 @@  generic-y += parport.h
 generic-y += spinlock_types.h
 generic-y += qrwlock.h
 generic-y += qrwlock_types.h
-generic-y += qspinlock.h
 generic-y += user.h
 generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
new file mode 100644
index 000000000000..7d4f416c908c
--- /dev/null
+++ b/arch/riscv/include/asm/qspinlock.h
@@ -0,0 +1,35 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c), 2023 Alibaba Cloud
+ * Authors:
+ *	Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#ifndef _ASM_RISCV_QSPINLOCK_H
+#define _ASM_RISCV_QSPINLOCK_H
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/qspinlock_paravirt.h>
+
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD		(1 << 15)
+
+void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void __pv_init_lock_hash(void);
+void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+	static_call(pv_queued_spin_lock_slowpath)(lock, val);
+}
+
+#define queued_spin_unlock	queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+	static_call(pv_queued_spin_unlock)(lock);
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_RISCV_QSPINLOCK_H */
diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000000..9681e851f69d
--- /dev/null
+++ b/arch/riscv/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,29 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c), 2023 Alibaba Cloud
+ * Authors:
+ *	Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+
+void pv_wait(u8 *ptr, u8 val);
+void pv_kick(int cpu);
+
+void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void dummy_queued_spin_unlock(struct qspinlock *lock);
+
+DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
+DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void);
+
+static inline bool pv_is_native_spin_unlock(void)
+{
+	return false;
+}
+
+void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
index 6b38d6616f14..ed4253f491fe 100644
--- a/arch/riscv/include/asm/spinlock.h
+++ b/arch/riscv/include/asm/spinlock.h
@@ -39,7 +39,7 @@  static inline bool virt_spin_lock(struct qspinlock *lock)
 #undef arch_spin_trylock
 #undef arch_spin_unlock
 
-#include <asm-generic/qspinlock.h>
+#include <asm/qspinlock.h>
 #include <linux/jump_label.h>
 
 #undef arch_spin_is_locked
diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c
new file mode 100644
index 000000000000..85ff5a3ec234
--- /dev/null
+++ b/arch/riscv/kernel/qspinlock_paravirt.c
@@ -0,0 +1,57 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c), 2023 Alibaba Cloud
+ * Authors:
+ *	Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#include <linux/static_call.h>
+#include <asm/qspinlock_paravirt.h>
+#include <asm/sbi.h>
+
+void pv_kick(int cpu)
+{
+	return;
+}
+
+void pv_wait(u8 *ptr, u8 val)
+{
+	unsigned long flags;
+
+	if (in_nmi())
+		return;
+
+	local_irq_save(flags);
+	if (READ_ONCE(*ptr) != val)
+		goto out;
+
+	/* wait_for_interrupt(); */
+out:
+	local_irq_restore(flags);
+}
+
+static void native_queued_spin_unlock(struct qspinlock *lock)
+{
+	smp_store_release(&lock->locked, 0);
+}
+
+DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
+EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
+
+DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
+EXPORT_STATIC_CALL(pv_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void)
+{
+	if (num_possible_cpus() == 1)
+		return;
+
+	if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
+		return;
+
+	pr_info("PV qspinlocks enabled\n");
+	__pv_init_lock_hash();
+
+	static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
+	static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
+}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index c57d15b05160..88690751f2ee 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -321,6 +321,10 @@  static void __init riscv_spinlock_init(void)
 #ifdef CONFIG_QUEUED_SPINLOCKS
 	virt_spin_lock_init();
 #endif
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+	pv_qspinlock_init();
+#endif
 }
 
 extern void __init init_rt_signal_env(void);