diff mbox series

[RFC,v2,3/3] riscv: hw-breakpoints: add more trigger controls

Message ID 20221203215535.208948-4-geomatsi@gmail.com (mailing list archive)
State RFC
Delegated to: Palmer Dabbelt
Headers show
Series riscv: support for hardware breakpoints/watchpoints | expand

Checks

Context Check Description
conchuod/tree_selection fail Guessing tree name failed

Commit Message

Sergey Matyukevich Dec. 3, 2022, 9:55 p.m. UTC
From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>

RISC-V SBI Debug Trigger extension proposal defines multiple functions
to control debug triggers. The pair of install/uninstall functions was
enough to implement ptrace interface for user-space debug. This patch
implements kernel wrappers for start/update/stop SBI functions. The
intended users of these control wrappers are kgdb and kernel modules
that make use of kernel-wide hardware breakpoints and watchpoints.

Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
---
 arch/riscv/include/asm/hw_breakpoint.h |  15 ++++
 arch/riscv/kernel/hw_breakpoint.c      | 116 ++++++++++++++++++++++++-
 2 files changed, 127 insertions(+), 4 deletions(-)

Comments

Andrew Bresticker Dec. 5, 2022, 6 p.m. UTC | #1
Hi Sergey,

On Sat, Dec 3, 2022 at 4:55 PM Sergey Matyukevich <geomatsi@gmail.com> wrote:
>
> From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
>
> RISC-V SBI Debug Trigger extension proposal defines multiple functions
> to control debug triggers. The pair of install/uninstall functions was
> enough to implement ptrace interface for user-space debug. This patch
> implements kernel wrappers for start/update/stop SBI functions. The
> intended users of these control wrappers are kgdb and kernel modules
> that make use of kernel-wide hardware breakpoints and watchpoints.
>
> Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>

<snip>

> diff --git a/arch/riscv/include/asm/hw_breakpoint.h b/arch/riscv/include/asm/hw_breakpoint.h
> index 5bb3b55cd464..afc59f8e034e 100644
> --- a/arch/riscv/include/asm/hw_breakpoint.h
> +++ b/arch/riscv/include/asm/hw_breakpoint.h
> @@ -137,6 +137,9 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
>  int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
>                                     unsigned long val, void *data);
>
> +void arch_enable_hw_breakpoint(struct perf_event *bp);
> +void arch_update_hw_breakpoint(struct perf_event *bp);
> +void arch_disable_hw_breakpoint(struct perf_event *bp);
>  int arch_install_hw_breakpoint(struct perf_event *bp);
>  void arch_uninstall_hw_breakpoint(struct perf_event *bp);
>  void hw_breakpoint_pmu_read(struct perf_event *bp);
> @@ -153,5 +156,17 @@ static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
>  {
>  }
>
> +void arch_enable_hw_breakpoint(struct perf_event *bp)
> +{
> +}
> +
> +void arch_update_hw_breakpoint(struct perf_event *bp)
> +{
> +}
> +
> +void arch_disable_hw_breakpoint(struct perf_event *bp)
> +{
> +}

I don't see any references to {enable,update,disable}_hw_breakpoint in
common kernel code, nor do any other architectures define these
functions. Who are the intended users? Do we even need them for kgdb
on RISC-V?

> diff --git a/arch/riscv/kernel/hw_breakpoint.c b/arch/riscv/kernel/hw_breakpoint.c
> index 8eddf512cd03..ca7df02830c2 100644
> --- a/arch/riscv/kernel/hw_breakpoint.c
> +++ b/arch/riscv/kernel/hw_breakpoint.c
> @@ -2,6 +2,7 @@
>
>  #include <linux/hw_breakpoint.h>
>  #include <linux/perf_event.h>
> +#include <linux/spinlock.h>
>  #include <linux/percpu.h>
>  #include <linux/kdebug.h>
>
> @@ -9,6 +10,8 @@
>
>  /* bps/wps currently set on each debug trigger for each cpu */
>  static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
> +static DEFINE_PER_CPU(unsigned long, msg_lock_flags);
> +static DEFINE_PER_CPU(raw_spinlock_t, msg_lock);

I'm not sure I understand the point of this per-CPU spinlock. Just
disable preemption (and interrupts, if necessary).

Also, arch_{install,uninstall}_hw_breakpoint are already expected to
be called from atomic context; is the intention here that they be
callable from outside atomic context?

-Andrew

>
>  static struct sbi_dbtr_data_msg __percpu *sbi_xmit;
>  static struct sbi_dbtr_id_msg __percpu *sbi_recv;
> @@ -318,6 +321,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
>         struct perf_event **slot;
>         unsigned long idx;
>         struct sbiret ret;
> +       int err = 0;
> +
> +       raw_spin_lock_irqsave(this_cpu_ptr(&msg_lock),
> +                         *this_cpu_ptr(&msg_lock_flags));
>
>         xmit->tdata1 = cpu_to_lle(info->trig_data1.value);
>         xmit->tdata2 = cpu_to_lle(info->trig_data2);
> @@ -328,25 +335,31 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
>                         0, 0, 0);
>         if (ret.error) {
>                 pr_warn("%s: failed to install trigger\n", __func__);
> -               return -EIO;
> +               err = -EIO;
> +               goto done;
>         }
>
>         idx = lle_to_cpu(recv->idx);
>
>         if (idx >= dbtr_total_num) {
>                 pr_warn("%s: invalid trigger index %lu\n", __func__, idx);
> -               return -EINVAL;
> +               err = -EINVAL;
> +               goto done;
>         }
>
>         slot = this_cpu_ptr(&bp_per_reg[idx]);
>         if (*slot) {
>                 pr_warn("%s: slot %lu is in use\n", __func__, idx);
> -               return -EBUSY;
> +               err = -EBUSY;
> +               goto done;
>         }
>
>         *slot = bp;
>
> -       return 0;
> +done:
> +       raw_spin_unlock_irqrestore(this_cpu_ptr(&msg_lock),
> +                              *this_cpu_ptr(&msg_lock_flags));
> +       return err;
>  }
>
>  /* atomic: counter->ctx->lock is held */
> @@ -375,6 +388,96 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
>                 pr_warn("%s: failed to uninstall trigger %d\n", __func__, i);
>  }
>
> +void arch_enable_hw_breakpoint(struct perf_event *bp)
> +{
> +       struct sbiret ret;
> +       int i;
> +
> +       for (i = 0; i < dbtr_total_num; i++) {
> +               struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
> +
> +               if (*slot == bp)
> +                       break;
> +       }
> +
> +       if (i == dbtr_total_num) {
> +               pr_warn("%s: unknown breakpoint\n", __func__);
> +               return;
> +       }
> +
> +       ret = sbi_ecall(SBI_EXT_DBTR, SBI_EXT_DBTR_TRIGGER_ENABLE,
> +                       i, 1, 0, 0, 0, 0);
> +       if (ret.error) {
> +               pr_warn("%s: failed to install trigger %d\n", __func__, i);
> +               return;
> +       }
> +}
> +EXPORT_SYMBOL_GPL(arch_enable_hw_breakpoint);
> +
> +void arch_update_hw_breakpoint(struct perf_event *bp)
> +{
> +       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
> +       struct sbi_dbtr_data_msg *xmit = this_cpu_ptr(sbi_xmit);
> +       struct perf_event **slot;
> +       struct sbiret ret;
> +       int i;
> +
> +       for (i = 0; i < dbtr_total_num; i++) {
> +               slot = this_cpu_ptr(&bp_per_reg[i]);
> +
> +               if (*slot == bp)
> +                       break;
> +       }
> +
> +       if (i == dbtr_total_num) {
> +               pr_warn("%s: unknown breakpoint\n", __func__);
> +               return;
> +       }
> +
> +       raw_spin_lock_irqsave(this_cpu_ptr(&msg_lock),
> +                         *this_cpu_ptr(&msg_lock_flags));
> +
> +       xmit->tdata1 = cpu_to_lle(info->trig_data1.value);
> +       xmit->tdata2 = cpu_to_lle(info->trig_data2);
> +       xmit->tdata3 = cpu_to_lle(info->trig_data3);
> +
> +       ret = sbi_ecall(SBI_EXT_DBTR, SBI_EXT_DBTR_TRIGGER_UPDATE,
> +                       i, 1, __pa(xmit) >> 4,
> +                       0, 0, 0);
> +       if (ret.error)
> +               pr_warn("%s: failed to update trigger %d\n", __func__, i);
> +
> +       raw_spin_unlock_irqrestore(this_cpu_ptr(&msg_lock),
> +                              *this_cpu_ptr(&msg_lock_flags));
> +}
> +EXPORT_SYMBOL_GPL(arch_update_hw_breakpoint);
> +
> +void arch_disable_hw_breakpoint(struct perf_event *bp)
> +{
> +       struct sbiret ret;
> +       int i;
> +
> +       for (i = 0; i < dbtr_total_num; i++) {
> +               struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
> +
> +               if (*slot == bp)
> +                       break;
> +       }
> +
> +       if (i == dbtr_total_num) {
> +               pr_warn("%s: unknown breakpoint\n", __func__);
> +               return;
> +       }
> +
> +       ret = sbi_ecall(SBI_EXT_DBTR, SBI_EXT_DBTR_TRIGGER_DISABLE,
> +                       i, 1, 0, 0, 0, 0);
> +       if (ret.error) {
> +               pr_warn("%s: failed to uninstall trigger %d\n", __func__, i);
> +               return;
> +       }
> +}
> +EXPORT_SYMBOL_GPL(arch_disable_hw_breakpoint);
> +
>  void hw_breakpoint_pmu_read(struct perf_event *bp)
>  {
>  }
> @@ -406,6 +509,8 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
>
>  static int __init arch_hw_breakpoint_init(void)
>  {
> +       unsigned int cpu;
> +
>         sbi_xmit = __alloc_percpu(sizeof(*sbi_xmit), SZ_16);
>         if (!sbi_xmit) {
>                 pr_warn("failed to allocate SBI xmit message buffer\n");
> @@ -418,6 +523,9 @@ static int __init arch_hw_breakpoint_init(void)
>                 return -ENOMEM;
>         }
>
> +       for_each_possible_cpu(cpu)
> +               raw_spin_lock_init(&per_cpu(msg_lock, cpu));
> +
>         if (!dbtr_init)
>                 arch_hw_breakpoint_init_sbi();
>
> --
> 2.38.1
>
Sergey Matyukevich Dec. 5, 2022, 8:25 p.m. UTC | #2
Hi Andrew,


> Hi Sergey,
> 
> On Sat, Dec 3, 2022 at 4:55 PM Sergey Matyukevich <geomatsi@gmail.com> wrote:
> >
> > From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
> >
> > RISC-V SBI Debug Trigger extension proposal defines multiple functions
> > to control debug triggers. The pair of install/uninstall functions was
> > enough to implement ptrace interface for user-space debug. This patch
> > implements kernel wrappers for start/update/stop SBI functions. The
> > intended users of these control wrappers are kgdb and kernel modules
> > that make use of kernel-wide hardware breakpoints and watchpoints.
> >
> > Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
> 
> <snip>
> 
> > diff --git a/arch/riscv/include/asm/hw_breakpoint.h b/arch/riscv/include/asm/hw_breakpoint.h
> > index 5bb3b55cd464..afc59f8e034e 100644
> > --- a/arch/riscv/include/asm/hw_breakpoint.h
> > +++ b/arch/riscv/include/asm/hw_breakpoint.h
> > @@ -137,6 +137,9 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
> >  int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
> >                                     unsigned long val, void *data);
> >
> > +void arch_enable_hw_breakpoint(struct perf_event *bp);
> > +void arch_update_hw_breakpoint(struct perf_event *bp);
> > +void arch_disable_hw_breakpoint(struct perf_event *bp);
> >  int arch_install_hw_breakpoint(struct perf_event *bp);
> >  void arch_uninstall_hw_breakpoint(struct perf_event *bp);
> >  void hw_breakpoint_pmu_read(struct perf_event *bp);
> > @@ -153,5 +156,17 @@ static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
> >  {
> >  }
> >
> > +void arch_enable_hw_breakpoint(struct perf_event *bp)
> > +{
> > +}
> > +
> > +void arch_update_hw_breakpoint(struct perf_event *bp)
> > +{
> > +}
> > +
> > +void arch_disable_hw_breakpoint(struct perf_event *bp)
> > +{
> > +}
> 
> I don't see any references to {enable,update,disable}_hw_breakpoint in
> common kernel code, nor do any other architectures define these
> functions. Who are the intended users? Do we even need them for kgdb
> on RISC-V?

SBI Debug Trigger extension proposal defines more functions than just
install/uninstall required for ptrace hw-breakpoints API. So this patch
is an attempt to expose some additional SBI features to the rest of the
kernel.

For instance, I have been using these stop/update/start functions for
managing kernel-wide hw-breakpoints when experimenting with a sample
module from samples/hw_breakpoint. It can be convenient to modify a
breakpoint or watchpoint when it fires, e.g. to perform single-step
before proceeding execution. Full-fledged unregister/register cycle is
not suitable for this task. And this is where disable/update/enable
sequence can help.

I haven't yet tried to implement hw-breakpoint support in RISC-V kgdb,
so I don't know for sure if these custom calls are needed there.

> > diff --git a/arch/riscv/kernel/hw_breakpoint.c b/arch/riscv/kernel/hw_breakpoint.c
> > index 8eddf512cd03..ca7df02830c2 100644
> > --- a/arch/riscv/kernel/hw_breakpoint.c
> > +++ b/arch/riscv/kernel/hw_breakpoint.c
> > @@ -2,6 +2,7 @@
> >
> >  #include <linux/hw_breakpoint.h>
> >  #include <linux/perf_event.h>
> > +#include <linux/spinlock.h>
> >  #include <linux/percpu.h>
> >  #include <linux/kdebug.h>
> >
> > @@ -9,6 +10,8 @@
> >
> >  /* bps/wps currently set on each debug trigger for each cpu */
> >  static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
> > +static DEFINE_PER_CPU(unsigned long, msg_lock_flags);
> > +static DEFINE_PER_CPU(raw_spinlock_t, msg_lock);
> 
> I'm not sure I understand the point of this per-CPU spinlock. Just
> disable preemption (and interrupts, if necessary).
> 
> Also, arch_{install,uninstall}_hw_breakpoint are already expected to
> be called from atomic context; is the intention here that they be
> callable from outside atomic context?

These additional locsk are not needed for install/uninstall pair due to
the reason that you mentioned: those calls are called by kernel event
core in atomic context with ctx->lock held. These locks are needed only
to make sure that new 'arch_update_hw_breakpoint' function can use the
same message buffers as install/uninstall.

Regards,
Sergey
Andrew Bresticker Dec. 5, 2022, 10:04 p.m. UTC | #3
On Mon, Dec 5, 2022 at 3:25 PM Sergey Matyukevich <geomatsi@gmail.com> wrote:
>
> Hi Andrew,
>
>
> > Hi Sergey,
> >
> > On Sat, Dec 3, 2022 at 4:55 PM Sergey Matyukevich <geomatsi@gmail.com> wrote:
> > >
> > > From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
> > >
> > > RISC-V SBI Debug Trigger extension proposal defines multiple functions
> > > to control debug triggers. The pair of install/uninstall functions was
> > > enough to implement ptrace interface for user-space debug. This patch
> > > implements kernel wrappers for start/update/stop SBI functions. The
> > > intended users of these control wrappers are kgdb and kernel modules
> > > that make use of kernel-wide hardware breakpoints and watchpoints.
> > >
> > > Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
> >
> > <snip>
> >
> > > diff --git a/arch/riscv/include/asm/hw_breakpoint.h b/arch/riscv/include/asm/hw_breakpoint.h
> > > index 5bb3b55cd464..afc59f8e034e 100644
> > > --- a/arch/riscv/include/asm/hw_breakpoint.h
> > > +++ b/arch/riscv/include/asm/hw_breakpoint.h
> > > @@ -137,6 +137,9 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
> > >  int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
> > >                                     unsigned long val, void *data);
> > >
> > > +void arch_enable_hw_breakpoint(struct perf_event *bp);
> > > +void arch_update_hw_breakpoint(struct perf_event *bp);
> > > +void arch_disable_hw_breakpoint(struct perf_event *bp);
> > >  int arch_install_hw_breakpoint(struct perf_event *bp);
> > >  void arch_uninstall_hw_breakpoint(struct perf_event *bp);
> > >  void hw_breakpoint_pmu_read(struct perf_event *bp);
> > > @@ -153,5 +156,17 @@ static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
> > >  {
> > >  }
> > >
> > > +void arch_enable_hw_breakpoint(struct perf_event *bp)
> > > +{
> > > +}
> > > +
> > > +void arch_update_hw_breakpoint(struct perf_event *bp)
> > > +{
> > > +}
> > > +
> > > +void arch_disable_hw_breakpoint(struct perf_event *bp)
> > > +{
> > > +}
> >
> > I don't see any references to {enable,update,disable}_hw_breakpoint in
> > common kernel code, nor do any other architectures define these
> > functions. Who are the intended users? Do we even need them for kgdb
> > on RISC-V?
>
> SBI Debug Trigger extension proposal defines more functions than just
> install/uninstall required for ptrace hw-breakpoints API. So this patch
> is an attempt to expose some additional SBI features to the rest of the
> kernel.

Got it. IMHO it's generally best not to add dead code unless a user
for it is imminent. It's harder to review something if you don't know
exactly how it'll be used.

> For instance, I have been using these stop/update/start functions for
> managing kernel-wide hw-breakpoints when experimenting with a sample
> module from samples/hw_breakpoint. It can be convenient to modify a
> breakpoint or watchpoint when it fires, e.g. to perform single-step
> before proceeding execution. Full-fledged unregister/register cycle is
> not suitable for this task. And this is where disable/update/enable
> sequence can help.
>
> I haven't yet tried to implement hw-breakpoint support in RISC-V kgdb,
> so I don't know for sure if these custom calls are needed there.
>
> > > diff --git a/arch/riscv/kernel/hw_breakpoint.c b/arch/riscv/kernel/hw_breakpoint.c
> > > index 8eddf512cd03..ca7df02830c2 100644
> > > --- a/arch/riscv/kernel/hw_breakpoint.c
> > > +++ b/arch/riscv/kernel/hw_breakpoint.c
> > > @@ -2,6 +2,7 @@
> > >
> > >  #include <linux/hw_breakpoint.h>
> > >  #include <linux/perf_event.h>
> > > +#include <linux/spinlock.h>
> > >  #include <linux/percpu.h>
> > >  #include <linux/kdebug.h>
> > >
> > > @@ -9,6 +10,8 @@
> > >
> > >  /* bps/wps currently set on each debug trigger for each cpu */
> > >  static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
> > > +static DEFINE_PER_CPU(unsigned long, msg_lock_flags);
> > > +static DEFINE_PER_CPU(raw_spinlock_t, msg_lock);
> >
> > I'm not sure I understand the point of this per-CPU spinlock. Just
> > disable preemption (and interrupts, if necessary).
> >
> > Also, arch_{install,uninstall}_hw_breakpoint are already expected to
> > be called from atomic context; is the intention here that they be
> > callable from outside atomic context?
>
> These additional locsk are not needed for install/uninstall pair due to
> the reason that you mentioned: those calls are called by kernel event
> core in atomic context with ctx->lock held. These locks are needed only
> to make sure that new 'arch_update_hw_breakpoint' function can use the
> same message buffers as install/uninstall.

Sure, but you can achieve the same by disabling preemption. No need
for an actual spinlock.

-Andrew

>
> Regards,
> Sergey
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/hw_breakpoint.h b/arch/riscv/include/asm/hw_breakpoint.h
index 5bb3b55cd464..afc59f8e034e 100644
--- a/arch/riscv/include/asm/hw_breakpoint.h
+++ b/arch/riscv/include/asm/hw_breakpoint.h
@@ -137,6 +137,9 @@  int hw_breakpoint_arch_parse(struct perf_event *bp,
 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
 				    unsigned long val, void *data);
 
+void arch_enable_hw_breakpoint(struct perf_event *bp);
+void arch_update_hw_breakpoint(struct perf_event *bp);
+void arch_disable_hw_breakpoint(struct perf_event *bp);
 int arch_install_hw_breakpoint(struct perf_event *bp);
 void arch_uninstall_hw_breakpoint(struct perf_event *bp);
 void hw_breakpoint_pmu_read(struct perf_event *bp);
@@ -153,5 +156,17 @@  static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
 {
 }
 
+void arch_enable_hw_breakpoint(struct perf_event *bp)
+{
+}
+
+void arch_update_hw_breakpoint(struct perf_event *bp)
+{
+}
+
+void arch_disable_hw_breakpoint(struct perf_event *bp)
+{
+}
+
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
 #endif /* __RISCV_HW_BREAKPOINT_H */
diff --git a/arch/riscv/kernel/hw_breakpoint.c b/arch/riscv/kernel/hw_breakpoint.c
index 8eddf512cd03..ca7df02830c2 100644
--- a/arch/riscv/kernel/hw_breakpoint.c
+++ b/arch/riscv/kernel/hw_breakpoint.c
@@ -2,6 +2,7 @@ 
 
 #include <linux/hw_breakpoint.h>
 #include <linux/perf_event.h>
+#include <linux/spinlock.h>
 #include <linux/percpu.h>
 #include <linux/kdebug.h>
 
@@ -9,6 +10,8 @@ 
 
 /* bps/wps currently set on each debug trigger for each cpu */
 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
+static DEFINE_PER_CPU(unsigned long, msg_lock_flags);
+static DEFINE_PER_CPU(raw_spinlock_t, msg_lock);
 
 static struct sbi_dbtr_data_msg __percpu *sbi_xmit;
 static struct sbi_dbtr_id_msg __percpu *sbi_recv;
@@ -318,6 +321,10 @@  int arch_install_hw_breakpoint(struct perf_event *bp)
 	struct perf_event **slot;
 	unsigned long idx;
 	struct sbiret ret;
+	int err = 0;
+
+	raw_spin_lock_irqsave(this_cpu_ptr(&msg_lock),
+			  *this_cpu_ptr(&msg_lock_flags));
 
 	xmit->tdata1 = cpu_to_lle(info->trig_data1.value);
 	xmit->tdata2 = cpu_to_lle(info->trig_data2);
@@ -328,25 +335,31 @@  int arch_install_hw_breakpoint(struct perf_event *bp)
 			0, 0, 0);
 	if (ret.error) {
 		pr_warn("%s: failed to install trigger\n", __func__);
-		return -EIO;
+		err = -EIO;
+		goto done;
 	}
 
 	idx = lle_to_cpu(recv->idx);
 
 	if (idx >= dbtr_total_num) {
 		pr_warn("%s: invalid trigger index %lu\n", __func__, idx);
-		return -EINVAL;
+		err = -EINVAL;
+		goto done;
 	}
 
 	slot = this_cpu_ptr(&bp_per_reg[idx]);
 	if (*slot) {
 		pr_warn("%s: slot %lu is in use\n", __func__, idx);
-		return -EBUSY;
+		err = -EBUSY;
+		goto done;
 	}
 
 	*slot = bp;
 
-	return 0;
+done:
+	raw_spin_unlock_irqrestore(this_cpu_ptr(&msg_lock),
+			       *this_cpu_ptr(&msg_lock_flags));
+	return err;
 }
 
 /* atomic: counter->ctx->lock is held */
@@ -375,6 +388,96 @@  void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 		pr_warn("%s: failed to uninstall trigger %d\n", __func__, i);
 }
 
+void arch_enable_hw_breakpoint(struct perf_event *bp)
+{
+	struct sbiret ret;
+	int i;
+
+	for (i = 0; i < dbtr_total_num; i++) {
+		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
+
+		if (*slot == bp)
+			break;
+	}
+
+	if (i == dbtr_total_num) {
+		pr_warn("%s: unknown breakpoint\n", __func__);
+		return;
+	}
+
+	ret = sbi_ecall(SBI_EXT_DBTR, SBI_EXT_DBTR_TRIGGER_ENABLE,
+			i, 1, 0, 0, 0, 0);
+	if (ret.error) {
+		pr_warn("%s: failed to install trigger %d\n", __func__, i);
+		return;
+	}
+}
+EXPORT_SYMBOL_GPL(arch_enable_hw_breakpoint);
+
+void arch_update_hw_breakpoint(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	struct sbi_dbtr_data_msg *xmit = this_cpu_ptr(sbi_xmit);
+	struct perf_event **slot;
+	struct sbiret ret;
+	int i;
+
+	for (i = 0; i < dbtr_total_num; i++) {
+		slot = this_cpu_ptr(&bp_per_reg[i]);
+
+		if (*slot == bp)
+			break;
+	}
+
+	if (i == dbtr_total_num) {
+		pr_warn("%s: unknown breakpoint\n", __func__);
+		return;
+	}
+
+	raw_spin_lock_irqsave(this_cpu_ptr(&msg_lock),
+			  *this_cpu_ptr(&msg_lock_flags));
+
+	xmit->tdata1 = cpu_to_lle(info->trig_data1.value);
+	xmit->tdata2 = cpu_to_lle(info->trig_data2);
+	xmit->tdata3 = cpu_to_lle(info->trig_data3);
+
+	ret = sbi_ecall(SBI_EXT_DBTR, SBI_EXT_DBTR_TRIGGER_UPDATE,
+			i, 1, __pa(xmit) >> 4,
+			0, 0, 0);
+	if (ret.error)
+		pr_warn("%s: failed to update trigger %d\n", __func__, i);
+
+	raw_spin_unlock_irqrestore(this_cpu_ptr(&msg_lock),
+			       *this_cpu_ptr(&msg_lock_flags));
+}
+EXPORT_SYMBOL_GPL(arch_update_hw_breakpoint);
+
+void arch_disable_hw_breakpoint(struct perf_event *bp)
+{
+	struct sbiret ret;
+	int i;
+
+	for (i = 0; i < dbtr_total_num; i++) {
+		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
+
+		if (*slot == bp)
+			break;
+	}
+
+	if (i == dbtr_total_num) {
+		pr_warn("%s: unknown breakpoint\n", __func__);
+		return;
+	}
+
+	ret = sbi_ecall(SBI_EXT_DBTR, SBI_EXT_DBTR_TRIGGER_DISABLE,
+			i, 1, 0, 0, 0, 0);
+	if (ret.error) {
+		pr_warn("%s: failed to uninstall trigger %d\n", __func__, i);
+		return;
+	}
+}
+EXPORT_SYMBOL_GPL(arch_disable_hw_breakpoint);
+
 void hw_breakpoint_pmu_read(struct perf_event *bp)
 {
 }
@@ -406,6 +509,8 @@  void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
 
 static int __init arch_hw_breakpoint_init(void)
 {
+	unsigned int cpu;
+
 	sbi_xmit = __alloc_percpu(sizeof(*sbi_xmit), SZ_16);
 	if (!sbi_xmit) {
 		pr_warn("failed to allocate SBI xmit message buffer\n");
@@ -418,6 +523,9 @@  static int __init arch_hw_breakpoint_init(void)
 		return -ENOMEM;
 	}
 
+	for_each_possible_cpu(cpu)
+		raw_spin_lock_init(&per_cpu(msg_lock, cpu));
+
 	if (!dbtr_init)
 		arch_hw_breakpoint_init_sbi();