diff mbox series

RISC-V: insn: Use a raw spinlock to protect TEXT_POKE*

Message ID 20210429061713.783628-1-palmer@dabbelt.com (mailing list archive)
State New, archived
Headers show
Series RISC-V: insn: Use a raw spinlock to protect TEXT_POKE* | expand

Commit Message

Palmer Dabbelt April 29, 2021, 6:17 a.m. UTC
From: Palmer Dabbelt <palmerdabbelt@google.com>

We currently use text_mutex to protect the fixmap sections from
concurrent callers.  This is convienent for kprobes as the generic code
already holds text_mutex, but ftrace doesn't which triggers a lockdep
assertion.  We could take text_mutex for ftrace, but the jump label
implementation (which is currently taking text_mutex) isn't explicitly
listed as being sleepable and it's called from enough places it seems
safer to just avoid sleeping.

arm64 and parisc, the other two TEXT_POKE-style patching
implemnetations, already use raw spinlocks.  abffa6f3b157 ("arm64:
convert patch_lock to raw lock") lays out the case for a raw spinlock as
opposed to a regular spinlock, and while I don't know of anyone using rt
on RISC-V I'm sure it'll eventually show up and I don't see any reason
to wait.

Fixes: ebc00dde8a97 ("riscv: Add jump-label implementation")
Reported-by: Changbin Du <changbin.du@gmail.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
---
 arch/riscv/include/asm/fixmap.h |  3 +++
 arch/riscv/kernel/jump_label.c  |  2 --
 arch/riscv/kernel/patch.c       | 13 +++++++++----
 3 files changed, 12 insertions(+), 6 deletions(-)

Comments

Anup Patel April 29, 2021, 7:55 a.m. UTC | #1
On Thu, Apr 29, 2021 at 11:50 AM Palmer Dabbelt <palmer@dabbelt.com> wrote:
>
> From: Palmer Dabbelt <palmerdabbelt@google.com>
>
> We currently use text_mutex to protect the fixmap sections from
> concurrent callers.  This is convienent for kprobes as the generic code
> already holds text_mutex, but ftrace doesn't which triggers a lockdep
> assertion.  We could take text_mutex for ftrace, but the jump label
> implementation (which is currently taking text_mutex) isn't explicitly
> listed as being sleepable and it's called from enough places it seems
> safer to just avoid sleeping.
>
> arm64 and parisc, the other two TEXT_POKE-style patching
> implemnetations, already use raw spinlocks.  abffa6f3b157 ("arm64:
> convert patch_lock to raw lock") lays out the case for a raw spinlock as
> opposed to a regular spinlock, and while I don't know of anyone using rt
> on RISC-V I'm sure it'll eventually show up and I don't see any reason
> to wait.
>
> Fixes: ebc00dde8a97 ("riscv: Add jump-label implementation")
> Reported-by: Changbin Du <changbin.du@gmail.com>
> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>

Looks good to me.

Reviewed-by: Anup Patel <anup@brainfault.org>

Regards,
Anup

> ---
>  arch/riscv/include/asm/fixmap.h |  3 +++
>  arch/riscv/kernel/jump_label.c  |  2 --
>  arch/riscv/kernel/patch.c       | 13 +++++++++----
>  3 files changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
> index 54cbf07fb4e9..d1c0a1f123cf 100644
> --- a/arch/riscv/include/asm/fixmap.h
> +++ b/arch/riscv/include/asm/fixmap.h
> @@ -24,8 +24,11 @@ enum fixed_addresses {
>         FIX_HOLE,
>         FIX_PTE,
>         FIX_PMD,
> +
> +       /* Only used in kernel/insn.c */
>         FIX_TEXT_POKE1,
>         FIX_TEXT_POKE0,
> +
>         FIX_EARLYCON_MEM_BASE,
>
>         __end_of_permanent_fixed_addresses,
> diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c
> index 20e09056d141..45bb32f91b5c 100644
> --- a/arch/riscv/kernel/jump_label.c
> +++ b/arch/riscv/kernel/jump_label.c
> @@ -35,9 +35,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
>                 insn = RISCV_INSN_NOP;
>         }
>
> -       mutex_lock(&text_mutex);
>         patch_text_nosync(addr, &insn, sizeof(insn));
> -       mutex_unlock(&text_mutex);
>  }
>
>  void arch_jump_label_transform_static(struct jump_entry *entry,
> diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
> index 0b552873a577..dfa7ee8eb63f 100644
> --- a/arch/riscv/kernel/patch.c
> +++ b/arch/riscv/kernel/patch.c
> @@ -19,6 +19,8 @@ struct patch_insn {
>         atomic_t cpu_count;
>  };
>
> +static DEFINE_RAW_SPINLOCK(patch_lock);
> +
>  #ifdef CONFIG_MMU
>  /*
>   * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
> @@ -54,13 +56,14 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
>         void *waddr = addr;
>         bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
>         int ret;
> +       unsigned long flags = 0;
>
>         /*
> -        * Before reaching here, it was expected to lock the text_mutex
> -        * already, so we don't need to give another lock here and could
> -        * ensure that it was safe between each cores.
> +        * FIX_TEXT_POKE{0,1} are only used for text patching, but we must
> +        * ensure that concurrent callers do not re-map these before we're done
> +        * with them.
>          */
> -       lockdep_assert_held(&text_mutex);
> +       raw_spin_lock_irqsave(&patch_lock, flags);
>
>         if (across_pages)
>                 patch_map(addr + len, FIX_TEXT_POKE1);
> @@ -74,6 +77,8 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
>         if (across_pages)
>                 patch_unmap(FIX_TEXT_POKE1);
>
> +       raw_spin_unlock_irqrestore(&patch_lock, flags);
> +
>         return ret;
>  }
>  NOKPROBE_SYMBOL(patch_insn_write);
> --
> 2.31.1.498.g6c1eba8ee3d-goog
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
Steven Rostedt April 29, 2021, 4:30 p.m. UTC | #2
On Wed, 28 Apr 2021 23:17:13 -0700
Palmer Dabbelt <palmer@dabbelt.com> wrote:

> From: Palmer Dabbelt <palmerdabbelt@google.com>
> 
> We currently use text_mutex to protect the fixmap sections from
> concurrent callers.  This is convienent for kprobes as the generic code
> already holds text_mutex, but ftrace doesn't which triggers a lockdep
> assertion.  We could take text_mutex for ftrace, but the jump label
> implementation (which is currently taking text_mutex) isn't explicitly
> listed as being sleepable and it's called from enough places it seems
> safer to just avoid sleeping.
> 
> arm64 and parisc, the other two TEXT_POKE-style patching
> implemnetations, already use raw spinlocks.  abffa6f3b157 ("arm64:
> convert patch_lock to raw lock") lays out the case for a raw spinlock as
> opposed to a regular spinlock, and while I don't know of anyone using rt
> on RISC-V I'm sure it'll eventually show up and I don't see any reason
> to wait.

On x86 we use text_mutex for jump label and ftrace. I don't understand the
issue here. The arm64 update was already using spin locks in the
insn_write() function itself. riscv just makes sure that text_mutex is held.

It also looks like ftrace on riscv should also have text_mutex held
whenever it modifies the code. Because I see this in
arch/riscv/kernel/ftrace.c:


int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
{
        mutex_lock(&text_mutex);
        return 0;
}

int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
{
        mutex_unlock(&text_mutex);
        return 0;
}

Which should be getting called before and after respectively from when
ftrace does its updates.

Can you show me the back trace of that lockdep splat?

-- Steve
Changbin Du April 29, 2021, 9:54 p.m. UTC | #3
On Thu, Apr 29, 2021 at 12:30:07PM -0400, Steven Rostedt wrote:
> On Wed, 28 Apr 2021 23:17:13 -0700
> Palmer Dabbelt <palmer@dabbelt.com> wrote:
> 
> > From: Palmer Dabbelt <palmerdabbelt@google.com>
> > 
> > We currently use text_mutex to protect the fixmap sections from
> > concurrent callers.  This is convienent for kprobes as the generic code
> > already holds text_mutex, but ftrace doesn't which triggers a lockdep
> > assertion.  We could take text_mutex for ftrace, but the jump label
> > implementation (which is currently taking text_mutex) isn't explicitly
> > listed as being sleepable and it's called from enough places it seems
> > safer to just avoid sleeping.
> > 
> > arm64 and parisc, the other two TEXT_POKE-style patching
> > implemnetations, already use raw spinlocks.  abffa6f3b157 ("arm64:
> > convert patch_lock to raw lock") lays out the case for a raw spinlock as
> > opposed to a regular spinlock, and while I don't know of anyone using rt
> > on RISC-V I'm sure it'll eventually show up and I don't see any reason
> > to wait.
> 
> On x86 we use text_mutex for jump label and ftrace. I don't understand the
> issue here. The arm64 update was already using spin locks in the
> insn_write() function itself. riscv just makes sure that text_mutex is held.
> 
> It also looks like ftrace on riscv should also have text_mutex held
> whenever it modifies the code. Because I see this in
> arch/riscv/kernel/ftrace.c:
> 
> 
> int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
> {
>         mutex_lock(&text_mutex);
>         return 0;
> }
> 
> int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
> {
>         mutex_unlock(&text_mutex);
>         return 0;
> }
> 
> Which should be getting called before and after respectively from when
> ftrace does its updates.
> 
> Can you show me the back trace of that lockdep splat?
> 
The problem is that lockdep cannot handle locks across tasks since we use
stopmachine to patch code for risc-v. So there's a false positive report.
See privious disscussion here:
https://lkml.org/lkml/2021/4/29/63

> -- Steve
Steven Rostedt April 30, 2021, 2:47 a.m. UTC | #4
On Fri, 30 Apr 2021 05:54:51 +0800
Changbin Du <changbin.du@gmail.com> wrote:

> The problem is that lockdep cannot handle locks across tasks since we use
> stopmachine to patch code for risc-v. So there's a false positive report.
> See privious disscussion here:

> https://lkml.org/lkml/2021/4/29/63

Please use lore.kernel.org, lkml.org is highly unreliable, and is
considered deprecated for use of referencing linux kernel archives.

Would the following patch work?

(note, I did not even compile test it)

-- Steve

diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 845002cc2e57..19acbb4aaeff 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -25,6 +25,8 @@ struct dyn_arch_ftrace {
 };
 #endif
 
+extern int running_ftrace;
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 /*
  * A general call in RISC-V is a pair of insts:
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 7f1e5203de88..834ab4fad637 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -11,15 +11,19 @@
 #include <asm/cacheflush.h>
 #include <asm/patch.h>
 
+int running_ftrace;
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
 {
 	mutex_lock(&text_mutex);
+	running_ftrace = 1;
 	return 0;
 }
 
 int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
 {
+	running_ftrace = 0;
 	mutex_unlock(&text_mutex);
 	return 0;
 }
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 0b552873a577..4cd1c79a9689 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -12,6 +12,7 @@
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/patch.h>
+#include <asm/ftrace.h>
 
 struct patch_insn {
 	void *addr;
@@ -59,8 +60,13 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
 	 * Before reaching here, it was expected to lock the text_mutex
 	 * already, so we don't need to give another lock here and could
 	 * ensure that it was safe between each cores.
+	 *
+	 * ftrace uses stop machine, and even though the text_mutex is
+	 * held, the stop machine task that calls this function will not
+	 * be the owner.
 	 */
-	lockdep_assert_held(&text_mutex);
+	if (!running_ftrace)
+		lockdep_assert_held(&text_mutex);
 
 	if (across_pages)
 		patch_map(addr + len, FIX_TEXT_POKE1);
Anup Patel April 30, 2021, 4:06 a.m. UTC | #5
> -----Original Message-----
> From: Steven Rostedt <rostedt@goodmis.org>
> Sent: 30 April 2021 08:18
> To: Changbin Du <changbin.du@gmail.com>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>; linux-riscv@lists.infradead.org;
> Paul Walmsley <paul.walmsley@sifive.com>; aou@eecs.berkeley.edu;
> peterz@infradead.org; jpoimboe@redhat.com; jbaron@akamai.com;
> ardb@kernel.org; Atish Patra <Atish.Patra@wdc.com>; Anup Patel
> <Anup.Patel@wdc.com>; akpm@linux-foundation.org; rppt@kernel.org;
> mhiramat@kernel.org; zong.li@sifive.com; guoren@linux.alibaba.com;
> wangkefeng.wang@huawei.com; 0x7f454c46@gmail.com;
> chenhuang5@huawei.com; linux-kernel@vger.kernel.org; kernel-
> team@android.com; Palmer Dabbelt <palmerdabbelt@google.com>
> Subject: Re: [PATCH] RISC-V: insn: Use a raw spinlock to protect TEXT_POKE*
> 
> On Fri, 30 Apr 2021 05:54:51 +0800
> Changbin Du <changbin.du@gmail.com> wrote:
> 
> > The problem is that lockdep cannot handle locks across tasks since we
> > use stopmachine to patch code for risc-v. So there's a false positive report.
> > See privious disscussion here
> 
> > https://lkml.org/lkml/2021/4/29/63
> 
> Please use lore.kernel.org, lkml.org is highly unreliable, and is considered
> deprecated for use of referencing linux kernel archives.
> 
> Would the following patch work?

This patch only takes care of ftrace path.

The RISC-V instruction patching is used by RISC-V jump label implementation
as well and will called from various critical parts of core kernel.

The RAW spinlock approach allows same instruction patching to be used
for kprobes, ftrace, and jump label.

Regards,
Anup

> 
> (note, I did not even compile test it)
> 
> -- Steve
> 
> diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
> index 845002cc2e57..19acbb4aaeff 100644
> --- a/arch/riscv/include/asm/ftrace.h
> +++ b/arch/riscv/include/asm/ftrace.h
> @@ -25,6 +25,8 @@ struct dyn_arch_ftrace {  };  #endif
> 
> +extern int running_ftrace;
> +
>  #ifdef CONFIG_DYNAMIC_FTRACE
>  /*
>   * A general call in RISC-V is a pair of insts:
> diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index
> 7f1e5203de88..834ab4fad637 100644
> --- a/arch/riscv/kernel/ftrace.c
> +++ b/arch/riscv/kernel/ftrace.c
> @@ -11,15 +11,19 @@
>  #include <asm/cacheflush.h>
>  #include <asm/patch.h>
> 
> +int running_ftrace;
> +
>  #ifdef CONFIG_DYNAMIC_FTRACE
>  int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)  {
>  	mutex_lock(&text_mutex);
> +	running_ftrace = 1;
>  	return 0;
>  }
> 
>  int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
> {
> +	running_ftrace = 0;
>  	mutex_unlock(&text_mutex);
>  	return 0;
>  }
> diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index
> 0b552873a577..4cd1c79a9689 100644
> --- a/arch/riscv/kernel/patch.c
> +++ b/arch/riscv/kernel/patch.c
> @@ -12,6 +12,7 @@
>  #include <asm/cacheflush.h>
>  #include <asm/fixmap.h>
>  #include <asm/patch.h>
> +#include <asm/ftrace.h>
> 
>  struct patch_insn {
>  	void *addr;
> @@ -59,8 +60,13 @@ static int patch_insn_write(void *addr, const void
> *insn, size_t len)
>  	 * Before reaching here, it was expected to lock the text_mutex
>  	 * already, so we don't need to give another lock here and could
>  	 * ensure that it was safe between each cores.
> +	 *
> +	 * ftrace uses stop machine, and even though the text_mutex is
> +	 * held, the stop machine task that calls this function will not
> +	 * be the owner.
>  	 */
> -	lockdep_assert_held(&text_mutex);
> +	if (!running_ftrace)
> +		lockdep_assert_held(&text_mutex);
> 
>  	if (across_pages)
>  		patch_map(addr + len, FIX_TEXT_POKE1);
Steven Rostedt April 30, 2021, 11:34 a.m. UTC | #6
On Fri, 30 Apr 2021 04:06:35 +0000
Anup Patel <Anup.Patel@wdc.com> wrote:

> This patch only takes care of ftrace path.
> 
> The RISC-V instruction patching is used by RISC-V jump label implementation
> as well and will called from various critical parts of core kernel.
> 
> The RAW spinlock approach allows same instruction patching to be used
> for kprobes, ftrace, and jump label.

So what path hits this outside of stop machine?

-- Steve
Palmer Dabbelt April 30, 2021, 7:44 p.m. UTC | #7
On Fri, 30 Apr 2021 04:34:31 PDT (-0700), rostedt@goodmis.org wrote:
> On Fri, 30 Apr 2021 04:06:35 +0000
> Anup Patel <Anup.Patel@wdc.com> wrote:
>
>> This patch only takes care of ftrace path.
>>
>> The RISC-V instruction patching is used by RISC-V jump label implementation
>> as well and will called from various critical parts of core kernel.
>>
>> The RAW spinlock approach allows same instruction patching to be used
>> for kprobes, ftrace, and jump label.
>
> So what path hits this outside of stop machine?

I didn't actually dig through all the usages of jump_label, I just saw a 
handful in places where it's generally not sane to assume that sleeping 
is safe -- for example, thoughout kernel/sched.  If you think it's OK to 
rely on users of the static branch stuff (IIUC the only jump_label user 
in the kernel?) to know that it can sleep then I'm fine keeping the 
text_mutex call in jump_label and adding one to ftrace (I'm fine with 
something generic, but it's simple to do in arch/riscv).

IMO if the static branch stuff can be expected to sleep it'd be good to 
call that out in the documentation, and I'd like to audit the uses 
before committing to that.  I'm happy to do that, we can just take the 
lock in arch/riscv's frace code for now to get around the lockdep 
assertion failure -- IIUC that's indicating a real bug, as nothing in 
ftrace avoids concurrency with jump_label and kprobes.
Palmer Dabbelt May 6, 2021, 7:11 a.m. UTC | #8
On Fri, 30 Apr 2021 12:44:37 PDT (-0700), Palmer Dabbelt wrote:
> On Fri, 30 Apr 2021 04:34:31 PDT (-0700), rostedt@goodmis.org wrote:
>> On Fri, 30 Apr 2021 04:06:35 +0000
>> Anup Patel <Anup.Patel@wdc.com> wrote:
>>
>>> This patch only takes care of ftrace path.
>>>
>>> The RISC-V instruction patching is used by RISC-V jump label implementation
>>> as well and will called from various critical parts of core kernel.
>>>
>>> The RAW spinlock approach allows same instruction patching to be used
>>> for kprobes, ftrace, and jump label.
>>
>> So what path hits this outside of stop machine?
>
> I didn't actually dig through all the usages of jump_label, I just saw a
> handful in places where it's generally not sane to assume that sleeping
> is safe -- for example, thoughout kernel/sched.  If you think it's OK to
> rely on users of the static branch stuff (IIUC the only jump_label user
> in the kernel?) to know that it can sleep then I'm fine keeping the
> text_mutex call in jump_label and adding one to ftrace (I'm fine with
> something generic, but it's simple to do in arch/riscv).
>
> IMO if the static branch stuff can be expected to sleep it'd be good to
> call that out in the documentation, and I'd like to audit the uses
> before committing to that.  I'm happy to do that, we can just take the
> lock in arch/riscv's frace code for now to get around the lockdep
> assertion failure -- IIUC that's indicating a real bug, as nothing in
> ftrace avoids concurrency with jump_label and kprobes.

Turns out I'd actually opened the wrong thread and was looking at a 
stack trace from a bur reported a year ago, which is why nothing I was 
saying here makes any sense.  That bug has already been fixed, I have a 
proper fix for this one.  It turns out to be almost exactly the same as 
something Steven suggested in this thread.
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 54cbf07fb4e9..d1c0a1f123cf 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -24,8 +24,11 @@  enum fixed_addresses {
 	FIX_HOLE,
 	FIX_PTE,
 	FIX_PMD,
+
+	/* Only used in kernel/insn.c */
 	FIX_TEXT_POKE1,
 	FIX_TEXT_POKE0,
+
 	FIX_EARLYCON_MEM_BASE,
 
 	__end_of_permanent_fixed_addresses,
diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c
index 20e09056d141..45bb32f91b5c 100644
--- a/arch/riscv/kernel/jump_label.c
+++ b/arch/riscv/kernel/jump_label.c
@@ -35,9 +35,7 @@  void arch_jump_label_transform(struct jump_entry *entry,
 		insn = RISCV_INSN_NOP;
 	}
 
-	mutex_lock(&text_mutex);
 	patch_text_nosync(addr, &insn, sizeof(insn));
-	mutex_unlock(&text_mutex);
 }
 
 void arch_jump_label_transform_static(struct jump_entry *entry,
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 0b552873a577..dfa7ee8eb63f 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -19,6 +19,8 @@  struct patch_insn {
 	atomic_t cpu_count;
 };
 
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
 #ifdef CONFIG_MMU
 /*
  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
@@ -54,13 +56,14 @@  static int patch_insn_write(void *addr, const void *insn, size_t len)
 	void *waddr = addr;
 	bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
 	int ret;
+	unsigned long flags = 0;
 
 	/*
-	 * Before reaching here, it was expected to lock the text_mutex
-	 * already, so we don't need to give another lock here and could
-	 * ensure that it was safe between each cores.
+	 * FIX_TEXT_POKE{0,1} are only used for text patching, but we must
+	 * ensure that concurrent callers do not re-map these before we're done
+	 * with them.
 	 */
-	lockdep_assert_held(&text_mutex);
+	raw_spin_lock_irqsave(&patch_lock, flags);
 
 	if (across_pages)
 		patch_map(addr + len, FIX_TEXT_POKE1);
@@ -74,6 +77,8 @@  static int patch_insn_write(void *addr, const void *insn, size_t len)
 	if (across_pages)
 		patch_unmap(FIX_TEXT_POKE1);
 
+	raw_spin_unlock_irqrestore(&patch_lock, flags);
+
 	return ret;
 }
 NOKPROBE_SYMBOL(patch_insn_write);