diff mbox series

[v3,11/11] riscv: Add qspinlock support

Message ID 20240717061957.140712-12-alexghiti@rivosinc.com (mailing list archive)
State Superseded
Headers show
Series Zacas/Zabha support and qspinlocks | expand

Checks

Context Check Description
conchuod/vmtest-fixes-PR fail PR summary
conchuod/patch-11-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-11-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-11-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-11-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-11-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-11-test-6 fail .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-11-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-11-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-11-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-11-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-11-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-11-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

Alexandre Ghiti July 17, 2024, 6:19 a.m. UTC
In order to produce a generic kernel, a user can select
CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket
spinlock implementation if Zabha or Ziccrse are not present.

Note that we can't use alternatives here because the discovery of
extensions is done too late and we need to start with the qspinlock
implementation because the ticket spinlock implementation would pollute
the spinlock value, so let's use static keys.

This is largely based on Guo's work and Leonardo reviews at [1].

Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1]
Signed-off-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
---
 .../locking/queued-spinlocks/arch-support.txt |  2 +-
 arch/riscv/Kconfig                            | 29 ++++++++++++++
 arch/riscv/include/asm/Kbuild                 |  4 +-
 arch/riscv/include/asm/spinlock.h             | 39 +++++++++++++++++++
 arch/riscv/kernel/setup.c                     | 33 ++++++++++++++++
 include/asm-generic/qspinlock.h               |  2 +
 include/asm-generic/ticket_spinlock.h         |  2 +
 7 files changed, 109 insertions(+), 2 deletions(-)
 create mode 100644 arch/riscv/include/asm/spinlock.h

Comments

Guo Ren July 17, 2024, 9:30 a.m. UTC | #1
On Wed, Jul 17, 2024 at 2:31 PM Alexandre Ghiti <alexghiti@rivosinc.com> wrote:
>
> In order to produce a generic kernel, a user can select
> CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket
> spinlock implementation if Zabha or Ziccrse are not present.
>
> Note that we can't use alternatives here because the discovery of
> extensions is done too late and we need to start with the qspinlock
> implementation because the ticket spinlock implementation would pollute
> the spinlock value, so let's use static keys.
>
> This is largely based on Guo's work and Leonardo reviews at [1].
>
> Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1]
> Signed-off-by: Guo Ren <guoren@kernel.org>
> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> ---
>  .../locking/queued-spinlocks/arch-support.txt |  2 +-
>  arch/riscv/Kconfig                            | 29 ++++++++++++++
>  arch/riscv/include/asm/Kbuild                 |  4 +-
>  arch/riscv/include/asm/spinlock.h             | 39 +++++++++++++++++++
>  arch/riscv/kernel/setup.c                     | 33 ++++++++++++++++
>  include/asm-generic/qspinlock.h               |  2 +
>  include/asm-generic/ticket_spinlock.h         |  2 +
>  7 files changed, 109 insertions(+), 2 deletions(-)
>  create mode 100644 arch/riscv/include/asm/spinlock.h
>
> diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> index 22f2990392ff..cf26042480e2 100644
> --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
> +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> @@ -20,7 +20,7 @@
>      |    openrisc: |  ok  |
>      |      parisc: | TODO |
>      |     powerpc: |  ok  |
> -    |       riscv: | TODO |
> +    |       riscv: |  ok  |
>      |        s390: | TODO |
>      |          sh: | TODO |
>      |       sparc: |  ok  |
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 0bbaec0444d0..5040c7eac70d 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -72,6 +72,7 @@ config RISCV
>         select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
>         select ARCH_WANTS_NO_INSTR
>         select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
> +       select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
>         select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
>         select BUILDTIME_TABLE_SORT if MMU
>         select CLINT_TIMER if RISCV_M_MODE
> @@ -482,6 +483,34 @@ config NODES_SHIFT
>           Specify the maximum number of NUMA Nodes available on the target
>           system.  Increases memory reserved to accommodate various tables.
>
> +choice
> +       prompt "RISC-V spinlock type"
> +       default RISCV_COMBO_SPINLOCKS
> +
> +config RISCV_TICKET_SPINLOCKS
> +       bool "Using ticket spinlock"
> +
> +config RISCV_QUEUED_SPINLOCKS
> +       bool "Using queued spinlock"
> +       depends on SMP && MMU
> +       select ARCH_USE_QUEUED_SPINLOCKS
> +       help
> +         The queued spinlock implementation requires the forward progress
> +         guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
> +         LR/SC with Ziccrse provide such guarantee.
> +
> +         Select this if and only if Zabha or Ziccrse is available on your
> +         platform.
> +
> +config RISCV_COMBO_SPINLOCKS
> +       bool "Using combo spinlock"
> +       depends on SMP && MMU
> +       select ARCH_USE_QUEUED_SPINLOCKS
> +       help
> +         Embed both queued spinlock and ticket lock so that the spinlock
> +         implementation can be chosen at runtime.
> +endchoice
> +
>  config RISCV_ALTERNATIVE
>         bool
>         depends on !XIP_KERNEL
> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> index 504f8b7e72d4..ad72f2bd4cc9 100644
> --- a/arch/riscv/include/asm/Kbuild
> +++ b/arch/riscv/include/asm/Kbuild
> @@ -2,10 +2,12 @@
>  generic-y += early_ioremap.h
>  generic-y += flat.h
>  generic-y += kvm_para.h
> +generic-y += mcs_spinlock.h
>  generic-y += parport.h
> -generic-y += spinlock.h
>  generic-y += spinlock_types.h
> +generic-y += ticket_spinlock.h
>  generic-y += qrwlock.h
>  generic-y += qrwlock_types.h
> +generic-y += qspinlock.h
>  generic-y += user.h
>  generic-y += vmlinux.lds.h
> diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> new file mode 100644
> index 000000000000..4856d50006f2
> --- /dev/null
> +++ b/arch/riscv/include/asm/spinlock.h
> @@ -0,0 +1,39 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef __ASM_RISCV_SPINLOCK_H
> +#define __ASM_RISCV_SPINLOCK_H
> +
> +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
> +#define _Q_PENDING_LOOPS       (1 << 9)
> +
> +#define __no_arch_spinlock_redefine
> +#include <asm/ticket_spinlock.h>
> +#include <asm/qspinlock.h>
> +#include <asm/alternative.h>
> +
> +DECLARE_STATIC_KEY_TRUE(qspinlock_key);
> +
> +#define SPINLOCK_BASE_DECLARE(op, type, type_lock)                     \
> +static __always_inline type arch_spin_##op(type_lock lock)             \
> +{                                                                      \
> +       if (static_branch_unlikely(&qspinlock_key))                     \
> +               return queued_spin_##op(lock);                          \
> +       return ticket_spin_##op(lock);                                  \
> +}
> +
> +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
> +
> +#else
> +
> +#include <asm/ticket_spinlock.h>
> +
> +#endif
> +
> +#include <asm/qrwlock.h>
> +
> +#endif /* __ASM_RISCV_SPINLOCK_H */
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index 4f73c0ae44b2..d7c31c9b8ead 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -244,6 +244,38 @@ static void __init parse_dtb(void)
>  #endif
>  }
>
> +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
> +EXPORT_SYMBOL(qspinlock_key);
> +
> +static void __init riscv_spinlock_init(void)
> +{
> +       char *using_ext;
> +
> +       if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
> +           IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
> +               using_ext = "using Zabha";
> +
> +               asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
> +                        : : : : no_zacas);
> +               asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
> +                        : : : : qspinlock);
> +       }
I'm okay with this patch.
I suggest putting an arg such as "enable_qspinlock," which people
could use on the non-ZABHA machines. I hope it could happen in this
series. That's all I need, thank you very much.

> +
> +no_zacas:
> +       using_ext = "using Ziccrse";
> +       asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
> +                            RISCV_ISA_EXT_ZICCRSE, 1)
> +                : : : : qspinlock);
> +
> +       static_branch_disable(&qspinlock_key);
> +       pr_info("Ticket spinlock: enabled\n");
> +
> +       return;
> +
> +qspinlock:
> +       pr_info("Queued spinlock %s: enabled\n", using_ext);
> +}
> +
>  extern void __init init_rt_signal_env(void);
>
>  void __init setup_arch(char **cmdline_p)
> @@ -295,6 +327,7 @@ void __init setup_arch(char **cmdline_p)
>         riscv_set_dma_cache_alignment();
>
>         riscv_user_isa_enable();
> +       riscv_spinlock_init();
>  }
>
>  bool arch_cpu_is_hotpluggable(int cpu)
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index 0655aa5b57b2..bf47cca2c375 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>  }
>  #endif
>
> +#ifndef __no_arch_spinlock_redefine
>  /*
>   * Remapping spinlock architecture specific functions to the corresponding
>   * queued spinlock functions.
> @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>  #define arch_spin_lock(l)              queued_spin_lock(l)
>  #define arch_spin_trylock(l)           queued_spin_trylock(l)
>  #define arch_spin_unlock(l)            queued_spin_unlock(l)
> +#endif
>
>  #endif /* __ASM_GENERIC_QSPINLOCK_H */
> diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
> index cfcff22b37b3..325779970d8a 100644
> --- a/include/asm-generic/ticket_spinlock.h
> +++ b/include/asm-generic/ticket_spinlock.h
> @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
>         return (s16)((val >> 16) - (val & 0xffff)) > 1;
>  }
>
> +#ifndef __no_arch_spinlock_redefine
>  /*
>   * Remapping spinlock architecture specific functions to the corresponding
>   * ticket spinlock functions.
> @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
>  #define arch_spin_lock(l)              ticket_spin_lock(l)
>  #define arch_spin_trylock(l)           ticket_spin_trylock(l)
>  #define arch_spin_unlock(l)            ticket_spin_unlock(l)
> +#endif
>
>  #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
> --
> 2.39.2
>
Andrea Parri July 17, 2024, 4:29 p.m. UTC | #2
> +config RISCV_QUEUED_SPINLOCKS

I'm seeing the following warnings with CONFIG_RISCV_QUEUED_SPINLOCKS=y:

In file included from ./arch/riscv/include/generated/asm/qspinlock.h:1,
                 from kernel/locking/qspinlock.c:24:
./include/asm-generic/qspinlock.h:144:9: warning: "arch_spin_is_locked" redefined
  144 | #define arch_spin_is_locked(l)          queued_spin_is_locked(l)
      |         ^~~~~~~~~~~~~~~~~~~
In file included from ./arch/riscv/include/generated/asm/ticket_spinlock.h:1,
                 from ./arch/riscv/include/asm/spinlock.h:33,
                 from ./include/linux/spinlock.h:95,
                 from ./include/linux/sched.h:2142,
                 from ./include/linux/percpu.h:13,
                 from kernel/locking/qspinlock.c:19:
./include/asm-generic/ticket_spinlock.h:97:9: note: this is the location of the previous definition
   97 | #define arch_spin_is_locked(l)          ticket_spin_is_locked(l)
      |         ^~~~~~~~~~~~~~~~~~~
./include/asm-generic/qspinlock.h:145:9: warning: "arch_spin_is_contended" redefined
  145 | #define arch_spin_is_contended(l)       queued_spin_is_contended(l)
      |         ^~~~~~~~~~~~~~~~~~~~~~
./include/asm-generic/ticket_spinlock.h:98:9: note: this is the location of the previous definition
   98 | #define arch_spin_is_contended(l)       ticket_spin_is_contended(l)
      |         ^~~~~~~~~~~~~~~~~~~~~~
./include/asm-generic/qspinlock.h:146:9: warning: "arch_spin_value_unlocked" redefined
  146 | #define arch_spin_value_unlocked(l)     queued_spin_value_unlocked(l)
      |         ^~~~~~~~~~~~~~~~~~~~~~~~
./include/asm-generic/ticket_spinlock.h:99:9: note: this is the location of the previous definition
   99 | #define arch_spin_value_unlocked(l)     ticket_spin_value_unlocked(l)
      |         ^~~~~~~~~~~~~~~~~~~~~~~~
./include/asm-generic/qspinlock.h:147:9: warning: "arch_spin_lock" redefined
  147 | #define arch_spin_lock(l)               queued_spin_lock(l)
      |         ^~~~~~~~~~~~~~
./include/asm-generic/ticket_spinlock.h:100:9: note: this is the location of the previous definition
  100 | #define arch_spin_lock(l)               ticket_spin_lock(l)
      |         ^~~~~~~~~~~~~~
./include/asm-generic/qspinlock.h:148:9: warning: "arch_spin_trylock" redefined
  148 | #define arch_spin_trylock(l)            queued_spin_trylock(l)
      |         ^~~~~~~~~~~~~~~~~
./include/asm-generic/ticket_spinlock.h:101:9: note: this is the location of the previous definition
  101 | #define arch_spin_trylock(l)            ticket_spin_trylock(l)
      |         ^~~~~~~~~~~~~~~~~
./include/asm-generic/qspinlock.h:149:9: warning: "arch_spin_unlock" redefined
  149 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
      |         ^~~~~~~~~~~~~~~~
./include/asm-generic/ticket_spinlock.h:102:9: note: this is the location of the previous definition
  102 | #define arch_spin_unlock(l)             ticket_spin_unlock(l)


The following diff resolves them for me (please double check):

diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
index 4856d50006f28..2d59f56a9e2d1 100644
--- a/arch/riscv/include/asm/spinlock.h
+++ b/arch/riscv/include/asm/spinlock.h
@@ -30,7 +30,11 @@ SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
 
 #else
 
+#if defined(CONFIG_RISCV_TICKET_SPINLOCKS)
 #include <asm/ticket_spinlock.h>
+#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS)
+#include <asm/qspinlock.h>
+#endif
 
 #endif


> +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
> +EXPORT_SYMBOL(qspinlock_key);
> +
> +static void __init riscv_spinlock_init(void)
> +{
> +	char *using_ext;
> +
> +	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
> +	    IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
> +		using_ext = "using Zabha";
> +
> +		asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
> +			 : : : : no_zacas);
> +		asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
> +			 : : : : qspinlock);
> +	}
> +
> +no_zacas:
> +	using_ext = "using Ziccrse";
> +	asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
> +			     RISCV_ISA_EXT_ZICCRSE, 1)
> +		 : : : : qspinlock);
> +
> +	static_branch_disable(&qspinlock_key);
> +	pr_info("Ticket spinlock: enabled\n");
> +
> +	return;
> +
> +qspinlock:
> +	pr_info("Queued spinlock %s: enabled\n", using_ext);
> +}
> +

Your commit message suggests that riscv_spinlock_init() doesn't need to
do anything if CONFIG_RISCV_COMBO_SPINLOCKS=n:

diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index d7c31c9b8ead2..b2be1b0b700d2 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -244,6 +244,7 @@ static void __init parse_dtb(void)
 #endif
 }
 
+#if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
 DEFINE_STATIC_KEY_TRUE(qspinlock_key);
 EXPORT_SYMBOL(qspinlock_key);
 
@@ -275,6 +276,11 @@ static void __init riscv_spinlock_init(void)
 qspinlock:
 	pr_info("Queued spinlock %s: enabled\n", using_ext);
 }
+#else
+static void __init riscv_spinlock_init(void)
+{
+}
+#endif
 
 extern void __init init_rt_signal_env(void);


Makes sense?  What am I missing?

  Andrea
Alexandre Ghiti July 18, 2024, 1:08 p.m. UTC | #3
Hi Andrea,

On 17/07/2024 18:29, Andrea Parri wrote:
>> +config RISCV_QUEUED_SPINLOCKS
> I'm seeing the following warnings with CONFIG_RISCV_QUEUED_SPINLOCKS=y:
>
> In file included from ./arch/riscv/include/generated/asm/qspinlock.h:1,
>                   from kernel/locking/qspinlock.c:24:
> ./include/asm-generic/qspinlock.h:144:9: warning: "arch_spin_is_locked" redefined
>    144 | #define arch_spin_is_locked(l)          queued_spin_is_locked(l)
>        |         ^~~~~~~~~~~~~~~~~~~
> In file included from ./arch/riscv/include/generated/asm/ticket_spinlock.h:1,
>                   from ./arch/riscv/include/asm/spinlock.h:33,
>                   from ./include/linux/spinlock.h:95,
>                   from ./include/linux/sched.h:2142,
>                   from ./include/linux/percpu.h:13,
>                   from kernel/locking/qspinlock.c:19:
> ./include/asm-generic/ticket_spinlock.h:97:9: note: this is the location of the previous definition
>     97 | #define arch_spin_is_locked(l)          ticket_spin_is_locked(l)
>        |         ^~~~~~~~~~~~~~~~~~~
> ./include/asm-generic/qspinlock.h:145:9: warning: "arch_spin_is_contended" redefined
>    145 | #define arch_spin_is_contended(l)       queued_spin_is_contended(l)
>        |         ^~~~~~~~~~~~~~~~~~~~~~
> ./include/asm-generic/ticket_spinlock.h:98:9: note: this is the location of the previous definition
>     98 | #define arch_spin_is_contended(l)       ticket_spin_is_contended(l)
>        |         ^~~~~~~~~~~~~~~~~~~~~~
> ./include/asm-generic/qspinlock.h:146:9: warning: "arch_spin_value_unlocked" redefined
>    146 | #define arch_spin_value_unlocked(l)     queued_spin_value_unlocked(l)
>        |         ^~~~~~~~~~~~~~~~~~~~~~~~
> ./include/asm-generic/ticket_spinlock.h:99:9: note: this is the location of the previous definition
>     99 | #define arch_spin_value_unlocked(l)     ticket_spin_value_unlocked(l)
>        |         ^~~~~~~~~~~~~~~~~~~~~~~~
> ./include/asm-generic/qspinlock.h:147:9: warning: "arch_spin_lock" redefined
>    147 | #define arch_spin_lock(l)               queued_spin_lock(l)
>        |         ^~~~~~~~~~~~~~
> ./include/asm-generic/ticket_spinlock.h:100:9: note: this is the location of the previous definition
>    100 | #define arch_spin_lock(l)               ticket_spin_lock(l)
>        |         ^~~~~~~~~~~~~~
> ./include/asm-generic/qspinlock.h:148:9: warning: "arch_spin_trylock" redefined
>    148 | #define arch_spin_trylock(l)            queued_spin_trylock(l)
>        |         ^~~~~~~~~~~~~~~~~
> ./include/asm-generic/ticket_spinlock.h:101:9: note: this is the location of the previous definition
>    101 | #define arch_spin_trylock(l)            ticket_spin_trylock(l)
>        |         ^~~~~~~~~~~~~~~~~
> ./include/asm-generic/qspinlock.h:149:9: warning: "arch_spin_unlock" redefined
>    149 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
>        |         ^~~~~~~~~~~~~~~~
> ./include/asm-generic/ticket_spinlock.h:102:9: note: this is the location of the previous definition
>    102 | #define arch_spin_unlock(l)             ticket_spin_unlock(l)
>
>
> The following diff resolves them for me (please double check):
>
> diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> index 4856d50006f28..2d59f56a9e2d1 100644
> --- a/arch/riscv/include/asm/spinlock.h
> +++ b/arch/riscv/include/asm/spinlock.h
> @@ -30,7 +30,11 @@ SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
>   
>   #else
>   
> +#if defined(CONFIG_RISCV_TICKET_SPINLOCKS)
>   #include <asm/ticket_spinlock.h>
> +#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS)
> +#include <asm/qspinlock.h>
> +#endif
>   
>   #endif


Thanks for testing this config (when I did not...)!

I came up with something slightly different, but same fix in the end, 
thanks!


>
>> +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
>> +EXPORT_SYMBOL(qspinlock_key);
>> +
>> +static void __init riscv_spinlock_init(void)
>> +{
>> +	char *using_ext;
>> +
>> +	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
>> +	    IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
>> +		using_ext = "using Zabha";
>> +
>> +		asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
>> +			 : : : : no_zacas);
>> +		asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
>> +			 : : : : qspinlock);
>> +	}
>> +
>> +no_zacas:
>> +	using_ext = "using Ziccrse";
>> +	asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
>> +			     RISCV_ISA_EXT_ZICCRSE, 1)
>> +		 : : : : qspinlock);
>> +
>> +	static_branch_disable(&qspinlock_key);
>> +	pr_info("Ticket spinlock: enabled\n");
>> +
>> +	return;
>> +
>> +qspinlock:
>> +	pr_info("Queued spinlock %s: enabled\n", using_ext);
>> +}
>> +
> Your commit message suggests that riscv_spinlock_init() doesn't need to
> do anything if CONFIG_RISCV_COMBO_SPINLOCKS=n:
>
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index d7c31c9b8ead2..b2be1b0b700d2 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -244,6 +244,7 @@ static void __init parse_dtb(void)
>   #endif
>   }
>   
> +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
>   DEFINE_STATIC_KEY_TRUE(qspinlock_key);
>   EXPORT_SYMBOL(qspinlock_key);
>   
> @@ -275,6 +276,11 @@ static void __init riscv_spinlock_init(void)
>   qspinlock:
>   	pr_info("Queued spinlock %s: enabled\n", using_ext);
>   }
> +#else
> +static void __init riscv_spinlock_init(void)
> +{
> +}
> +#endif
>   
>   extern void __init init_rt_signal_env(void);
>
>
> Makes sense?  What am I missing?


Totally makes sense, I completely overlooked this when I added the 
ticket/queued configs, thanks for taking the time to look into it.

That will be fixed in the next version.

Thanks again,

Alex


>
>    Andrea
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
Alexandre Ghiti July 18, 2024, 1:11 p.m. UTC | #4
Hi Guo,

On 17/07/2024 11:30, Guo Ren wrote:
> On Wed, Jul 17, 2024 at 2:31 PM Alexandre Ghiti <alexghiti@rivosinc.com> wrote:
>> In order to produce a generic kernel, a user can select
>> CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket
>> spinlock implementation if Zabha or Ziccrse are not present.
>>
>> Note that we can't use alternatives here because the discovery of
>> extensions is done too late and we need to start with the qspinlock
>> implementation because the ticket spinlock implementation would pollute
>> the spinlock value, so let's use static keys.
>>
>> This is largely based on Guo's work and Leonardo reviews at [1].
>>
>> Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1]
>> Signed-off-by: Guo Ren <guoren@kernel.org>
>> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
>> ---
>>   .../locking/queued-spinlocks/arch-support.txt |  2 +-
>>   arch/riscv/Kconfig                            | 29 ++++++++++++++
>>   arch/riscv/include/asm/Kbuild                 |  4 +-
>>   arch/riscv/include/asm/spinlock.h             | 39 +++++++++++++++++++
>>   arch/riscv/kernel/setup.c                     | 33 ++++++++++++++++
>>   include/asm-generic/qspinlock.h               |  2 +
>>   include/asm-generic/ticket_spinlock.h         |  2 +
>>   7 files changed, 109 insertions(+), 2 deletions(-)
>>   create mode 100644 arch/riscv/include/asm/spinlock.h
>>
>> diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
>> index 22f2990392ff..cf26042480e2 100644
>> --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
>> +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
>> @@ -20,7 +20,7 @@
>>       |    openrisc: |  ok  |
>>       |      parisc: | TODO |
>>       |     powerpc: |  ok  |
>> -    |       riscv: | TODO |
>> +    |       riscv: |  ok  |
>>       |        s390: | TODO |
>>       |          sh: | TODO |
>>       |       sparc: |  ok  |
>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>> index 0bbaec0444d0..5040c7eac70d 100644
>> --- a/arch/riscv/Kconfig
>> +++ b/arch/riscv/Kconfig
>> @@ -72,6 +72,7 @@ config RISCV
>>          select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
>>          select ARCH_WANTS_NO_INSTR
>>          select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
>> +       select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
>>          select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
>>          select BUILDTIME_TABLE_SORT if MMU
>>          select CLINT_TIMER if RISCV_M_MODE
>> @@ -482,6 +483,34 @@ config NODES_SHIFT
>>            Specify the maximum number of NUMA Nodes available on the target
>>            system.  Increases memory reserved to accommodate various tables.
>>
>> +choice
>> +       prompt "RISC-V spinlock type"
>> +       default RISCV_COMBO_SPINLOCKS
>> +
>> +config RISCV_TICKET_SPINLOCKS
>> +       bool "Using ticket spinlock"
>> +
>> +config RISCV_QUEUED_SPINLOCKS
>> +       bool "Using queued spinlock"
>> +       depends on SMP && MMU
>> +       select ARCH_USE_QUEUED_SPINLOCKS
>> +       help
>> +         The queued spinlock implementation requires the forward progress
>> +         guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
>> +         LR/SC with Ziccrse provide such guarantee.
>> +
>> +         Select this if and only if Zabha or Ziccrse is available on your
>> +         platform.
>> +
>> +config RISCV_COMBO_SPINLOCKS
>> +       bool "Using combo spinlock"
>> +       depends on SMP && MMU
>> +       select ARCH_USE_QUEUED_SPINLOCKS
>> +       help
>> +         Embed both queued spinlock and ticket lock so that the spinlock
>> +         implementation can be chosen at runtime.
>> +endchoice
>> +
>>   config RISCV_ALTERNATIVE
>>          bool
>>          depends on !XIP_KERNEL
>> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
>> index 504f8b7e72d4..ad72f2bd4cc9 100644
>> --- a/arch/riscv/include/asm/Kbuild
>> +++ b/arch/riscv/include/asm/Kbuild
>> @@ -2,10 +2,12 @@
>>   generic-y += early_ioremap.h
>>   generic-y += flat.h
>>   generic-y += kvm_para.h
>> +generic-y += mcs_spinlock.h
>>   generic-y += parport.h
>> -generic-y += spinlock.h
>>   generic-y += spinlock_types.h
>> +generic-y += ticket_spinlock.h
>>   generic-y += qrwlock.h
>>   generic-y += qrwlock_types.h
>> +generic-y += qspinlock.h
>>   generic-y += user.h
>>   generic-y += vmlinux.lds.h
>> diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
>> new file mode 100644
>> index 000000000000..4856d50006f2
>> --- /dev/null
>> +++ b/arch/riscv/include/asm/spinlock.h
>> @@ -0,0 +1,39 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +
>> +#ifndef __ASM_RISCV_SPINLOCK_H
>> +#define __ASM_RISCV_SPINLOCK_H
>> +
>> +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
>> +#define _Q_PENDING_LOOPS       (1 << 9)
>> +
>> +#define __no_arch_spinlock_redefine
>> +#include <asm/ticket_spinlock.h>
>> +#include <asm/qspinlock.h>
>> +#include <asm/alternative.h>
>> +
>> +DECLARE_STATIC_KEY_TRUE(qspinlock_key);
>> +
>> +#define SPINLOCK_BASE_DECLARE(op, type, type_lock)                     \
>> +static __always_inline type arch_spin_##op(type_lock lock)             \
>> +{                                                                      \
>> +       if (static_branch_unlikely(&qspinlock_key))                     \
>> +               return queued_spin_##op(lock);                          \
>> +       return ticket_spin_##op(lock);                                  \
>> +}
>> +
>> +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
>> +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
>> +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
>> +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
>> +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
>> +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
>> +
>> +#else
>> +
>> +#include <asm/ticket_spinlock.h>
>> +
>> +#endif
>> +
>> +#include <asm/qrwlock.h>
>> +
>> +#endif /* __ASM_RISCV_SPINLOCK_H */
>> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
>> index 4f73c0ae44b2..d7c31c9b8ead 100644
>> --- a/arch/riscv/kernel/setup.c
>> +++ b/arch/riscv/kernel/setup.c
>> @@ -244,6 +244,38 @@ static void __init parse_dtb(void)
>>   #endif
>>   }
>>
>> +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
>> +EXPORT_SYMBOL(qspinlock_key);
>> +
>> +static void __init riscv_spinlock_init(void)
>> +{
>> +       char *using_ext;
>> +
>> +       if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
>> +           IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
>> +               using_ext = "using Zabha";
>> +
>> +               asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
>> +                        : : : : no_zacas);
>> +               asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
>> +                        : : : : qspinlock);
>> +       }
> I'm okay with this patch.


Great, thanks!


> I suggest putting an arg such as "enable_qspinlock," which people
> could use on the non-ZABHA machines. I hope it could happen in this
> series. That's all I need, thank you very much.


Do you think that's really necessary? I added Ziccrse support just 
below, to me that fits your needs to use qspinlocks on !Ziccrse.

BTW, can I add you SoB on this patch? Or a Co-developed-by or anything 
to show that you greatly contributed to this patch?

Thanks,

Alex


>
>> +
>> +no_zacas:
>> +       using_ext = "using Ziccrse";
>> +       asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
>> +                            RISCV_ISA_EXT_ZICCRSE, 1)
>> +                : : : : qspinlock);
>> +
>> +       static_branch_disable(&qspinlock_key);
>> +       pr_info("Ticket spinlock: enabled\n");
>> +
>> +       return;
>> +
>> +qspinlock:
>> +       pr_info("Queued spinlock %s: enabled\n", using_ext);
>> +}
>> +
>>   extern void __init init_rt_signal_env(void);
>>
>>   void __init setup_arch(char **cmdline_p)
>> @@ -295,6 +327,7 @@ void __init setup_arch(char **cmdline_p)
>>          riscv_set_dma_cache_alignment();
>>
>>          riscv_user_isa_enable();
>> +       riscv_spinlock_init();
>>   }
>>
>>   bool arch_cpu_is_hotpluggable(int cpu)
>> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
>> index 0655aa5b57b2..bf47cca2c375 100644
>> --- a/include/asm-generic/qspinlock.h
>> +++ b/include/asm-generic/qspinlock.h
>> @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>>   }
>>   #endif
>>
>> +#ifndef __no_arch_spinlock_redefine
>>   /*
>>    * Remapping spinlock architecture specific functions to the corresponding
>>    * queued spinlock functions.
>> @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>>   #define arch_spin_lock(l)              queued_spin_lock(l)
>>   #define arch_spin_trylock(l)           queued_spin_trylock(l)
>>   #define arch_spin_unlock(l)            queued_spin_unlock(l)
>> +#endif
>>
>>   #endif /* __ASM_GENERIC_QSPINLOCK_H */
>> diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
>> index cfcff22b37b3..325779970d8a 100644
>> --- a/include/asm-generic/ticket_spinlock.h
>> +++ b/include/asm-generic/ticket_spinlock.h
>> @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
>>          return (s16)((val >> 16) - (val & 0xffff)) > 1;
>>   }
>>
>> +#ifndef __no_arch_spinlock_redefine
>>   /*
>>    * Remapping spinlock architecture specific functions to the corresponding
>>    * ticket spinlock functions.
>> @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
>>   #define arch_spin_lock(l)              ticket_spin_lock(l)
>>   #define arch_spin_trylock(l)           ticket_spin_trylock(l)
>>   #define arch_spin_unlock(l)            ticket_spin_unlock(l)
>> +#endif
>>
>>   #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
>> --
>> 2.39.2
>>
>
Samuel Holland July 19, 2024, 1:05 a.m. UTC | #5
Hi Alex,

On 2024-07-17 1:19 AM, Alexandre Ghiti wrote:
> In order to produce a generic kernel, a user can select
> CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket
> spinlock implementation if Zabha or Ziccrse are not present.
> 
> Note that we can't use alternatives here because the discovery of
> extensions is done too late and we need to start with the qspinlock
> implementation because the ticket spinlock implementation would pollute
> the spinlock value, so let's use static keys.
> 
> This is largely based on Guo's work and Leonardo reviews at [1].
> 
> Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1]
> Signed-off-by: Guo Ren <guoren@kernel.org>
> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> ---
>  .../locking/queued-spinlocks/arch-support.txt |  2 +-
>  arch/riscv/Kconfig                            | 29 ++++++++++++++
>  arch/riscv/include/asm/Kbuild                 |  4 +-
>  arch/riscv/include/asm/spinlock.h             | 39 +++++++++++++++++++
>  arch/riscv/kernel/setup.c                     | 33 ++++++++++++++++
>  include/asm-generic/qspinlock.h               |  2 +
>  include/asm-generic/ticket_spinlock.h         |  2 +
>  7 files changed, 109 insertions(+), 2 deletions(-)
>  create mode 100644 arch/riscv/include/asm/spinlock.h
> 
> diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> index 22f2990392ff..cf26042480e2 100644
> --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
> +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> @@ -20,7 +20,7 @@
>      |    openrisc: |  ok  |
>      |      parisc: | TODO |
>      |     powerpc: |  ok  |
> -    |       riscv: | TODO |
> +    |       riscv: |  ok  |
>      |        s390: | TODO |
>      |          sh: | TODO |
>      |       sparc: |  ok  |
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 0bbaec0444d0..5040c7eac70d 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -72,6 +72,7 @@ config RISCV
>  	select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
>  	select ARCH_WANTS_NO_INSTR
>  	select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
> +	select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
>  	select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
>  	select BUILDTIME_TABLE_SORT if MMU
>  	select CLINT_TIMER if RISCV_M_MODE
> @@ -482,6 +483,34 @@ config NODES_SHIFT
>  	  Specify the maximum number of NUMA Nodes available on the target
>  	  system.  Increases memory reserved to accommodate various tables.
>  
> +choice
> +	prompt "RISC-V spinlock type"
> +	default RISCV_COMBO_SPINLOCKS
> +
> +config RISCV_TICKET_SPINLOCKS
> +	bool "Using ticket spinlock"
> +
> +config RISCV_QUEUED_SPINLOCKS
> +	bool "Using queued spinlock"
> +	depends on SMP && MMU

This needs:

	depends on NONPORTABLE

> +	select ARCH_USE_QUEUED_SPINLOCKS
> +	help
> +	  The queued spinlock implementation requires the forward progress
> +	  guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
> +	  LR/SC with Ziccrse provide such guarantee.
> +
> +	  Select this if and only if Zabha or Ziccrse is available on your
> +	  platform.
> +
> +config RISCV_COMBO_SPINLOCKS
> +	bool "Using combo spinlock"
> +	depends on SMP && MMU
> +	select ARCH_USE_QUEUED_SPINLOCKS
> +	help
> +	  Embed both queued spinlock and ticket lock so that the spinlock
> +	  implementation can be chosen at runtime.
> +endchoice
> +
>  config RISCV_ALTERNATIVE
>  	bool
>  	depends on !XIP_KERNEL
> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> index 504f8b7e72d4..ad72f2bd4cc9 100644
> --- a/arch/riscv/include/asm/Kbuild
> +++ b/arch/riscv/include/asm/Kbuild
> @@ -2,10 +2,12 @@
>  generic-y += early_ioremap.h
>  generic-y += flat.h
>  generic-y += kvm_para.h
> +generic-y += mcs_spinlock.h
>  generic-y += parport.h
> -generic-y += spinlock.h
>  generic-y += spinlock_types.h
> +generic-y += ticket_spinlock.h
>  generic-y += qrwlock.h
>  generic-y += qrwlock_types.h
> +generic-y += qspinlock.h
>  generic-y += user.h
>  generic-y += vmlinux.lds.h
> diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> new file mode 100644
> index 000000000000..4856d50006f2
> --- /dev/null
> +++ b/arch/riscv/include/asm/spinlock.h
> @@ -0,0 +1,39 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef __ASM_RISCV_SPINLOCK_H
> +#define __ASM_RISCV_SPINLOCK_H
> +
> +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
> +#define _Q_PENDING_LOOPS	(1 << 9)
> +
> +#define __no_arch_spinlock_redefine
> +#include <asm/ticket_spinlock.h>
> +#include <asm/qspinlock.h>
> +#include <asm/alternative.h>
> +
> +DECLARE_STATIC_KEY_TRUE(qspinlock_key);
> +
> +#define SPINLOCK_BASE_DECLARE(op, type, type_lock)			\
> +static __always_inline type arch_spin_##op(type_lock lock)		\
> +{									\
> +	if (static_branch_unlikely(&qspinlock_key))			\
> +		return queued_spin_##op(lock);				\
> +	return ticket_spin_##op(lock);					\
> +}
> +
> +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
> +
> +#else
> +
> +#include <asm/ticket_spinlock.h>
> +
> +#endif
> +
> +#include <asm/qrwlock.h>
> +
> +#endif /* __ASM_RISCV_SPINLOCK_H */
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index 4f73c0ae44b2..d7c31c9b8ead 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -244,6 +244,38 @@ static void __init parse_dtb(void)
>  #endif
>  }
>  
> +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
> +EXPORT_SYMBOL(qspinlock_key);
> +
> +static void __init riscv_spinlock_init(void)
> +{
> +	char *using_ext;
> +
> +	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
> +	    IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
> +		using_ext = "using Zabha";
> +
> +		asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
> +			 : : : : no_zacas);
> +		asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
> +			 : : : : qspinlock);
> +	}
> +
> +no_zacas:
> +	using_ext = "using Ziccrse";
> +	asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
> +			     RISCV_ISA_EXT_ZICCRSE, 1)
> +		 : : : : qspinlock);
> +
> +	static_branch_disable(&qspinlock_key);
> +	pr_info("Ticket spinlock: enabled\n");
> +
> +	return;
> +
> +qspinlock:
> +	pr_info("Queued spinlock %s: enabled\n", using_ext);
> +}

This function would be much easier to read with
riscv_has_extension_[un]likely(), or even riscv_isa_extension_available() since
it only gets called once. Thankfully the concerns about using those inside
macros don't apply here :)

Regards,
Samuel

> +
>  extern void __init init_rt_signal_env(void);
>  
>  void __init setup_arch(char **cmdline_p)
> @@ -295,6 +327,7 @@ void __init setup_arch(char **cmdline_p)
>  	riscv_set_dma_cache_alignment();
>  
>  	riscv_user_isa_enable();
> +	riscv_spinlock_init();
>  }
>  
>  bool arch_cpu_is_hotpluggable(int cpu)
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index 0655aa5b57b2..bf47cca2c375 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>  }
>  #endif
>  
> +#ifndef __no_arch_spinlock_redefine
>  /*
>   * Remapping spinlock architecture specific functions to the corresponding
>   * queued spinlock functions.
> @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>  #define arch_spin_lock(l)		queued_spin_lock(l)
>  #define arch_spin_trylock(l)		queued_spin_trylock(l)
>  #define arch_spin_unlock(l)		queued_spin_unlock(l)
> +#endif
>  
>  #endif /* __ASM_GENERIC_QSPINLOCK_H */
> diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
> index cfcff22b37b3..325779970d8a 100644
> --- a/include/asm-generic/ticket_spinlock.h
> +++ b/include/asm-generic/ticket_spinlock.h
> @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
>  	return (s16)((val >> 16) - (val & 0xffff)) > 1;
>  }
>  
> +#ifndef __no_arch_spinlock_redefine
>  /*
>   * Remapping spinlock architecture specific functions to the corresponding
>   * ticket spinlock functions.
> @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
>  #define arch_spin_lock(l)		ticket_spin_lock(l)
>  #define arch_spin_trylock(l)		ticket_spin_trylock(l)
>  #define arch_spin_unlock(l)		ticket_spin_unlock(l)
> +#endif
>  
>  #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
Alexandre Ghiti July 19, 2024, 9:06 a.m. UTC | #6
Hi Samuel,

On Fri, Jul 19, 2024 at 3:05 AM Samuel Holland
<samuel.holland@sifive.com> wrote:
>
> Hi Alex,
>
> On 2024-07-17 1:19 AM, Alexandre Ghiti wrote:
> > In order to produce a generic kernel, a user can select
> > CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket
> > spinlock implementation if Zabha or Ziccrse are not present.
> >
> > Note that we can't use alternatives here because the discovery of
> > extensions is done too late and we need to start with the qspinlock
> > implementation because the ticket spinlock implementation would pollute
> > the spinlock value, so let's use static keys.
> >
> > This is largely based on Guo's work and Leonardo reviews at [1].
> >
> > Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1]
> > Signed-off-by: Guo Ren <guoren@kernel.org>
> > Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> > ---
> >  .../locking/queued-spinlocks/arch-support.txt |  2 +-
> >  arch/riscv/Kconfig                            | 29 ++++++++++++++
> >  arch/riscv/include/asm/Kbuild                 |  4 +-
> >  arch/riscv/include/asm/spinlock.h             | 39 +++++++++++++++++++
> >  arch/riscv/kernel/setup.c                     | 33 ++++++++++++++++
> >  include/asm-generic/qspinlock.h               |  2 +
> >  include/asm-generic/ticket_spinlock.h         |  2 +
> >  7 files changed, 109 insertions(+), 2 deletions(-)
> >  create mode 100644 arch/riscv/include/asm/spinlock.h
> >
> > diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> > index 22f2990392ff..cf26042480e2 100644
> > --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
> > +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> > @@ -20,7 +20,7 @@
> >      |    openrisc: |  ok  |
> >      |      parisc: | TODO |
> >      |     powerpc: |  ok  |
> > -    |       riscv: | TODO |
> > +    |       riscv: |  ok  |
> >      |        s390: | TODO |
> >      |          sh: | TODO |
> >      |       sparc: |  ok  |
> > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> > index 0bbaec0444d0..5040c7eac70d 100644
> > --- a/arch/riscv/Kconfig
> > +++ b/arch/riscv/Kconfig
> > @@ -72,6 +72,7 @@ config RISCV
> >       select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
> >       select ARCH_WANTS_NO_INSTR
> >       select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
> > +     select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
> >       select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
> >       select BUILDTIME_TABLE_SORT if MMU
> >       select CLINT_TIMER if RISCV_M_MODE
> > @@ -482,6 +483,34 @@ config NODES_SHIFT
> >         Specify the maximum number of NUMA Nodes available on the target
> >         system.  Increases memory reserved to accommodate various tables.
> >
> > +choice
> > +     prompt "RISC-V spinlock type"
> > +     default RISCV_COMBO_SPINLOCKS
> > +
> > +config RISCV_TICKET_SPINLOCKS
> > +     bool "Using ticket spinlock"
> > +
> > +config RISCV_QUEUED_SPINLOCKS
> > +     bool "Using queued spinlock"
> > +     depends on SMP && MMU
>
> This needs:
>
>         depends on NONPORTABLE

Nice, thanks

>
> > +     select ARCH_USE_QUEUED_SPINLOCKS
> > +     help
> > +       The queued spinlock implementation requires the forward progress
> > +       guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
> > +       LR/SC with Ziccrse provide such guarantee.
> > +
> > +       Select this if and only if Zabha or Ziccrse is available on your
> > +       platform.
> > +
> > +config RISCV_COMBO_SPINLOCKS
> > +     bool "Using combo spinlock"
> > +     depends on SMP && MMU
> > +     select ARCH_USE_QUEUED_SPINLOCKS
> > +     help
> > +       Embed both queued spinlock and ticket lock so that the spinlock
> > +       implementation can be chosen at runtime.
> > +endchoice
> > +
> >  config RISCV_ALTERNATIVE
> >       bool
> >       depends on !XIP_KERNEL
> > diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> > index 504f8b7e72d4..ad72f2bd4cc9 100644
> > --- a/arch/riscv/include/asm/Kbuild
> > +++ b/arch/riscv/include/asm/Kbuild
> > @@ -2,10 +2,12 @@
> >  generic-y += early_ioremap.h
> >  generic-y += flat.h
> >  generic-y += kvm_para.h
> > +generic-y += mcs_spinlock.h
> >  generic-y += parport.h
> > -generic-y += spinlock.h
> >  generic-y += spinlock_types.h
> > +generic-y += ticket_spinlock.h
> >  generic-y += qrwlock.h
> >  generic-y += qrwlock_types.h
> > +generic-y += qspinlock.h
> >  generic-y += user.h
> >  generic-y += vmlinux.lds.h
> > diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> > new file mode 100644
> > index 000000000000..4856d50006f2
> > --- /dev/null
> > +++ b/arch/riscv/include/asm/spinlock.h
> > @@ -0,0 +1,39 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +
> > +#ifndef __ASM_RISCV_SPINLOCK_H
> > +#define __ASM_RISCV_SPINLOCK_H
> > +
> > +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
> > +#define _Q_PENDING_LOOPS     (1 << 9)
> > +
> > +#define __no_arch_spinlock_redefine
> > +#include <asm/ticket_spinlock.h>
> > +#include <asm/qspinlock.h>
> > +#include <asm/alternative.h>
> > +
> > +DECLARE_STATIC_KEY_TRUE(qspinlock_key);
> > +
> > +#define SPINLOCK_BASE_DECLARE(op, type, type_lock)                   \
> > +static __always_inline type arch_spin_##op(type_lock lock)           \
> > +{                                                                    \
> > +     if (static_branch_unlikely(&qspinlock_key))                     \
> > +             return queued_spin_##op(lock);                          \
> > +     return ticket_spin_##op(lock);                                  \
> > +}
> > +
> > +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
> > +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
> > +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
> > +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
> > +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
> > +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
> > +
> > +#else
> > +
> > +#include <asm/ticket_spinlock.h>
> > +
> > +#endif
> > +
> > +#include <asm/qrwlock.h>
> > +
> > +#endif /* __ASM_RISCV_SPINLOCK_H */
> > diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> > index 4f73c0ae44b2..d7c31c9b8ead 100644
> > --- a/arch/riscv/kernel/setup.c
> > +++ b/arch/riscv/kernel/setup.c
> > @@ -244,6 +244,38 @@ static void __init parse_dtb(void)
> >  #endif
> >  }
> >
> > +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
> > +EXPORT_SYMBOL(qspinlock_key);
> > +
> > +static void __init riscv_spinlock_init(void)
> > +{
> > +     char *using_ext;
> > +
> > +     if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
> > +         IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
> > +             using_ext = "using Zabha";
> > +
> > +             asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
> > +                      : : : : no_zacas);
> > +             asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
> > +                      : : : : qspinlock);
> > +     }
> > +
> > +no_zacas:
> > +     using_ext = "using Ziccrse";
> > +     asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
> > +                          RISCV_ISA_EXT_ZICCRSE, 1)
> > +              : : : : qspinlock);
> > +
> > +     static_branch_disable(&qspinlock_key);
> > +     pr_info("Ticket spinlock: enabled\n");
> > +
> > +     return;
> > +
> > +qspinlock:
> > +     pr_info("Queued spinlock %s: enabled\n", using_ext);
> > +}
>
> This function would be much easier to read with
> riscv_has_extension_[un]likely(), or even riscv_isa_extension_available() since
> it only gets called once. Thankfully the concerns about using those inside
> macros don't apply here :)
>

Yeah, way better, thanks!

Alex

> Regards,
> Samuel
>
> > +
> >  extern void __init init_rt_signal_env(void);
> >
> >  void __init setup_arch(char **cmdline_p)
> > @@ -295,6 +327,7 @@ void __init setup_arch(char **cmdline_p)
> >       riscv_set_dma_cache_alignment();
> >
> >       riscv_user_isa_enable();
> > +     riscv_spinlock_init();
> >  }
> >
> >  bool arch_cpu_is_hotpluggable(int cpu)
> > diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> > index 0655aa5b57b2..bf47cca2c375 100644
> > --- a/include/asm-generic/qspinlock.h
> > +++ b/include/asm-generic/qspinlock.h
> > @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
> >  }
> >  #endif
> >
> > +#ifndef __no_arch_spinlock_redefine
> >  /*
> >   * Remapping spinlock architecture specific functions to the corresponding
> >   * queued spinlock functions.
> > @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
> >  #define arch_spin_lock(l)            queued_spin_lock(l)
> >  #define arch_spin_trylock(l)         queued_spin_trylock(l)
> >  #define arch_spin_unlock(l)          queued_spin_unlock(l)
> > +#endif
> >
> >  #endif /* __ASM_GENERIC_QSPINLOCK_H */
> > diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
> > index cfcff22b37b3..325779970d8a 100644
> > --- a/include/asm-generic/ticket_spinlock.h
> > +++ b/include/asm-generic/ticket_spinlock.h
> > @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
> >       return (s16)((val >> 16) - (val & 0xffff)) > 1;
> >  }
> >
> > +#ifndef __no_arch_spinlock_redefine
> >  /*
> >   * Remapping spinlock architecture specific functions to the corresponding
> >   * ticket spinlock functions.
> > @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
> >  #define arch_spin_lock(l)            ticket_spin_lock(l)
> >  #define arch_spin_trylock(l)         ticket_spin_trylock(l)
> >  #define arch_spin_unlock(l)          ticket_spin_unlock(l)
> > +#endif
> >
> >  #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
>
diff mbox series

Patch

diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
index 22f2990392ff..cf26042480e2 100644
--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
@@ -20,7 +20,7 @@ 
     |    openrisc: |  ok  |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: | TODO |
     |       sparc: |  ok  |
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 0bbaec0444d0..5040c7eac70d 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -72,6 +72,7 @@  config RISCV
 	select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
 	select ARCH_WANTS_NO_INSTR
 	select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
+	select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
 	select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
 	select BUILDTIME_TABLE_SORT if MMU
 	select CLINT_TIMER if RISCV_M_MODE
@@ -482,6 +483,34 @@  config NODES_SHIFT
 	  Specify the maximum number of NUMA Nodes available on the target
 	  system.  Increases memory reserved to accommodate various tables.
 
+choice
+	prompt "RISC-V spinlock type"
+	default RISCV_COMBO_SPINLOCKS
+
+config RISCV_TICKET_SPINLOCKS
+	bool "Using ticket spinlock"
+
+config RISCV_QUEUED_SPINLOCKS
+	bool "Using queued spinlock"
+	depends on SMP && MMU
+	select ARCH_USE_QUEUED_SPINLOCKS
+	help
+	  The queued spinlock implementation requires the forward progress
+	  guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
+	  LR/SC with Ziccrse provide such guarantee.
+
+	  Select this if and only if Zabha or Ziccrse is available on your
+	  platform.
+
+config RISCV_COMBO_SPINLOCKS
+	bool "Using combo spinlock"
+	depends on SMP && MMU
+	select ARCH_USE_QUEUED_SPINLOCKS
+	help
+	  Embed both queued spinlock and ticket lock so that the spinlock
+	  implementation can be chosen at runtime.
+endchoice
+
 config RISCV_ALTERNATIVE
 	bool
 	depends on !XIP_KERNEL
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 504f8b7e72d4..ad72f2bd4cc9 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -2,10 +2,12 @@ 
 generic-y += early_ioremap.h
 generic-y += flat.h
 generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
 generic-y += parport.h
-generic-y += spinlock.h
 generic-y += spinlock_types.h
+generic-y += ticket_spinlock.h
 generic-y += qrwlock.h
 generic-y += qrwlock_types.h
+generic-y += qspinlock.h
 generic-y += user.h
 generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
new file mode 100644
index 000000000000..4856d50006f2
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock.h
@@ -0,0 +1,39 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_RISCV_SPINLOCK_H
+#define __ASM_RISCV_SPINLOCK_H
+
+#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
+#define _Q_PENDING_LOOPS	(1 << 9)
+
+#define __no_arch_spinlock_redefine
+#include <asm/ticket_spinlock.h>
+#include <asm/qspinlock.h>
+#include <asm/alternative.h>
+
+DECLARE_STATIC_KEY_TRUE(qspinlock_key);
+
+#define SPINLOCK_BASE_DECLARE(op, type, type_lock)			\
+static __always_inline type arch_spin_##op(type_lock lock)		\
+{									\
+	if (static_branch_unlikely(&qspinlock_key))			\
+		return queued_spin_##op(lock);				\
+	return ticket_spin_##op(lock);					\
+}
+
+SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
+
+#else
+
+#include <asm/ticket_spinlock.h>
+
+#endif
+
+#include <asm/qrwlock.h>
+
+#endif /* __ASM_RISCV_SPINLOCK_H */
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 4f73c0ae44b2..d7c31c9b8ead 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -244,6 +244,38 @@  static void __init parse_dtb(void)
 #endif
 }
 
+DEFINE_STATIC_KEY_TRUE(qspinlock_key);
+EXPORT_SYMBOL(qspinlock_key);
+
+static void __init riscv_spinlock_init(void)
+{
+	char *using_ext;
+
+	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
+	    IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
+		using_ext = "using Zabha";
+
+		asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
+			 : : : : no_zacas);
+		asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
+			 : : : : qspinlock);
+	}
+
+no_zacas:
+	using_ext = "using Ziccrse";
+	asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
+			     RISCV_ISA_EXT_ZICCRSE, 1)
+		 : : : : qspinlock);
+
+	static_branch_disable(&qspinlock_key);
+	pr_info("Ticket spinlock: enabled\n");
+
+	return;
+
+qspinlock:
+	pr_info("Queued spinlock %s: enabled\n", using_ext);
+}
+
 extern void __init init_rt_signal_env(void);
 
 void __init setup_arch(char **cmdline_p)
@@ -295,6 +327,7 @@  void __init setup_arch(char **cmdline_p)
 	riscv_set_dma_cache_alignment();
 
 	riscv_user_isa_enable();
+	riscv_spinlock_init();
 }
 
 bool arch_cpu_is_hotpluggable(int cpu)
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 0655aa5b57b2..bf47cca2c375 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -136,6 +136,7 @@  static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 }
 #endif
 
+#ifndef __no_arch_spinlock_redefine
 /*
  * Remapping spinlock architecture specific functions to the corresponding
  * queued spinlock functions.
@@ -146,5 +147,6 @@  static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 #define arch_spin_lock(l)		queued_spin_lock(l)
 #define arch_spin_trylock(l)		queued_spin_trylock(l)
 #define arch_spin_unlock(l)		queued_spin_unlock(l)
+#endif
 
 #endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
index cfcff22b37b3..325779970d8a 100644
--- a/include/asm-generic/ticket_spinlock.h
+++ b/include/asm-generic/ticket_spinlock.h
@@ -89,6 +89,7 @@  static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
 	return (s16)((val >> 16) - (val & 0xffff)) > 1;
 }
 
+#ifndef __no_arch_spinlock_redefine
 /*
  * Remapping spinlock architecture specific functions to the corresponding
  * ticket spinlock functions.
@@ -99,5 +100,6 @@  static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
 #define arch_spin_lock(l)		ticket_spin_lock(l)
 #define arch_spin_trylock(l)		ticket_spin_trylock(l)
 #define arch_spin_unlock(l)		ticket_spin_unlock(l)
+#endif
 
 #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */