diff mbox series

[RFC,3/3] perf: RISC-V: add support for SSE event

Message ID 20231026143122.279437-4-cleger@rivosinc.com (mailing list archive)
State Changes Requested
Headers show
Series riscv: add support for SBI Supervisor Software Events | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR fail PR summary
conchuod/patch-3-test-1 success .github/scripts/patches/build_rv32_defconfig.sh
conchuod/patch-3-test-2 success .github/scripts/patches/build_rv64_clang_allmodconfig.sh
conchuod/patch-3-test-3 success .github/scripts/patches/build_rv64_gcc_allmodconfig.sh
conchuod/patch-3-test-4 fail .github/scripts/patches/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-3-test-5 fail .github/scripts/patches/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-3-test-6 warning .github/scripts/patches/checkpatch.sh
conchuod/patch-3-test-7 success .github/scripts/patches/dtb_warn_rv64.sh
conchuod/patch-3-test-8 success .github/scripts/patches/header_inline.sh
conchuod/patch-3-test-9 success .github/scripts/patches/kdoc.sh
conchuod/patch-3-test-10 success .github/scripts/patches/module_param.sh
conchuod/patch-3-test-11 success .github/scripts/patches/verify_fixes.sh
conchuod/patch-3-test-12 success .github/scripts/patches/verify_signedoff.sh

Commit Message

Clément Léger Oct. 26, 2023, 2:31 p.m. UTC
In order to use SSE within PMU drivers, register a SSE handler for the
local PMU event. There is not a lot of specific code needed to handle
the SSE event, just reuse the existing overlflow IRQ handler and pass
appropriate pt_regs.

Signed-off-by: Clément Léger <cleger@rivosinc.com>
---
 arch/riscv/include/asm/sbi.h |  2 ++
 drivers/perf/riscv_pmu_sbi.c | 51 +++++++++++++++++++++++++++++-------
 2 files changed, 44 insertions(+), 9 deletions(-)

Comments

Atish Patra Oct. 26, 2023, 7:52 p.m. UTC | #1
On Thu, Oct 26, 2023 at 7:31 AM Clément Léger <cleger@rivosinc.com> wrote:
>
> In order to use SSE within PMU drivers, register a SSE handler for the
> local PMU event. There is not a lot of specific code needed to handle
> the SSE event, just reuse the existing overlflow IRQ handler and pass
> appropriate pt_regs.
>
> Signed-off-by: Clément Léger <cleger@rivosinc.com>
> ---
>  arch/riscv/include/asm/sbi.h |  2 ++
>  drivers/perf/riscv_pmu_sbi.c | 51 +++++++++++++++++++++++++++++-------
>  2 files changed, 44 insertions(+), 9 deletions(-)
>
> diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
> index 2e99cafe7fed..13b01cd3a814 100644
> --- a/arch/riscv/include/asm/sbi.h
> +++ b/arch/riscv/include/asm/sbi.h
> @@ -121,6 +121,8 @@ enum sbi_ext_pmu_fid {
>         SBI_EXT_PMU_COUNTER_START,
>         SBI_EXT_PMU_COUNTER_STOP,
>         SBI_EXT_PMU_COUNTER_FW_READ,
> +       SBI_EXT_PMU_COUNTER_FW_READ_HI,
> +       SBI_EXT_PMU_COUNTER_IRQ_CLEAR,

FWIW: This is not part of the SBI v2.0 or any proposed improvements.
As the SSE spec is in the draft state,
we need to evaluate if this is required or if there are better way to
request the M-mode to clear the interrupt pending bit.

With counter delegation extensions in place, this requires a bit more
thought as it won't use the SBI PMU at all.
In addition to that, there may be other use cases where irq needs to
be cleared first thing in the handler
There are few options which we are looking at :

1. Move the irq processing to the workqueue and call SSE complete
which can clear the irq
2. Define a generic irq clear function in the SSE extension itself to
achieve this if we have to rely on the SBI call.

>  };
>
>  union sbi_pmu_ctr_info {
> diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
> index 96c7f670c8f0..3fca70b13304 100644
> --- a/drivers/perf/riscv_pmu_sbi.c
> +++ b/drivers/perf/riscv_pmu_sbi.c
> @@ -17,6 +17,7 @@
>  #include <linux/irqdomain.h>
>  #include <linux/of_irq.h>
>  #include <linux/of.h>
> +#include <linux/riscv_sse.h>
>  #include <linux/cpu_pm.h>
>  #include <linux/sched/clock.h>
>
> @@ -625,6 +626,12 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
>                   cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
>  }
>
> +static void pmu_sbi_irq_clear(void)
> +{
> +       /* No need to check the error here as we can't do anything about the error */
> +       sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_IRQ_CLEAR, 0, 0, 0, 0, 0, 0);
> +}
> +
>  /*
>   * This function starts all the used counters in two step approach.
>   * Any counter that did not overflow can be start in a single step
> @@ -670,10 +677,10 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
>         }
>  }
>
> -static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
> +static irqreturn_t pmu_sbi_ovf_handler(struct cpu_hw_events *cpu_hw_evt,
> +                                      struct pt_regs *regs, bool from_sse)
>  {
>         struct perf_sample_data data;
> -       struct pt_regs *regs;
>         struct hw_perf_event *hw_evt;
>         union sbi_pmu_ctr_info *info;
>         int lidx, hidx, fidx;
> @@ -681,7 +688,6 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
>         struct perf_event *event;
>         unsigned long overflow;
>         unsigned long overflowed_ctrs = 0;
> -       struct cpu_hw_events *cpu_hw_evt = dev;
>         u64 start_clock = sched_clock();
>
>         if (WARN_ON_ONCE(!cpu_hw_evt))
> @@ -691,7 +697,10 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
>         fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
>         event = cpu_hw_evt->events[fidx];
>         if (!event) {
> -               csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> +               if (from_sse)
> +                       pmu_sbi_irq_clear();
> +               else
> +                       csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
>                 return IRQ_NONE;
>         }
>
> @@ -703,16 +712,16 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
>
>         /*
>          * Overflow interrupt pending bit should only be cleared after stopping
> -        * all the counters to avoid any race condition.
> +        * all the counters to avoid any race condition. When using SSE,
> +        * interrupt is cleared when stopping counters.
>          */
> -       csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> +       if (!from_sse)
> +               csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
>
>         /* No overflow bit is set */
>         if (!overflow)
>                 return IRQ_NONE;
>
> -       regs = get_irq_regs();
> -
>         for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
>                 struct perf_event *event = cpu_hw_evt->events[lidx];
>
> @@ -758,6 +767,22 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
>         return IRQ_HANDLED;
>  }
>
> +static irqreturn_t pmu_sbi_ovf_irq_handler(int irq, void *dev)
> +{
> +       return pmu_sbi_ovf_handler(dev, get_irq_regs(), false);
> +}
> +
> +static int pmu_sbi_ovf_sse_handler(uint32_t evt, void *arg,
> +                                  struct pt_regs *regs)
> +{
> +       struct cpu_hw_events __percpu *hw_events = arg;
> +       struct cpu_hw_events *hw_event = raw_cpu_ptr(hw_events);
> +
> +       pmu_sbi_ovf_handler(hw_event, regs, true);
> +
> +       return 0;
> +}
> +
>  static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
>  {
>         struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
> @@ -801,9 +826,17 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
>  static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
>  {
>         int ret;
> +       struct sse_event *evt;
>         struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
>         struct irq_domain *domain = NULL;
>
> +       evt = sse_event_register(SBI_SSE_EVENT_LOCAL_PMU, 0,
> +                                pmu_sbi_ovf_sse_handler, hw_events);
> +       if (!IS_ERR(evt)) {
> +               sse_event_enable(evt);
> +               return 0;
> +       }
> +
>         if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
>                 riscv_pmu_irq_num = RV_IRQ_PMU;
>                 riscv_pmu_use_irq = true;
> @@ -831,7 +864,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
>                 return -ENODEV;
>         }
>
> -       ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
> +       ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_irq_handler, "riscv-pmu", hw_events);
>         if (ret) {
>                 pr_err("registering percpu irq failed [%d]\n", ret);
>                 return ret;
> --
> 2.42.0
>


--
Regards,
Atish
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 2e99cafe7fed..13b01cd3a814 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -121,6 +121,8 @@  enum sbi_ext_pmu_fid {
 	SBI_EXT_PMU_COUNTER_START,
 	SBI_EXT_PMU_COUNTER_STOP,
 	SBI_EXT_PMU_COUNTER_FW_READ,
+	SBI_EXT_PMU_COUNTER_FW_READ_HI,
+	SBI_EXT_PMU_COUNTER_IRQ_CLEAR,
 };
 
 union sbi_pmu_ctr_info {
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 96c7f670c8f0..3fca70b13304 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -17,6 +17,7 @@ 
 #include <linux/irqdomain.h>
 #include <linux/of_irq.h>
 #include <linux/of.h>
+#include <linux/riscv_sse.h>
 #include <linux/cpu_pm.h>
 #include <linux/sched/clock.h>
 
@@ -625,6 +626,12 @@  static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
 		  cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
 }
 
+static void pmu_sbi_irq_clear(void)
+{
+	/* No need to check the error here as we can't do anything about the error */
+	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_IRQ_CLEAR, 0, 0, 0, 0, 0, 0);
+}
+
 /*
  * This function starts all the used counters in two step approach.
  * Any counter that did not overflow can be start in a single step
@@ -670,10 +677,10 @@  static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
 	}
 }
 
-static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+static irqreturn_t pmu_sbi_ovf_handler(struct cpu_hw_events *cpu_hw_evt,
+				       struct pt_regs *regs, bool from_sse)
 {
 	struct perf_sample_data data;
-	struct pt_regs *regs;
 	struct hw_perf_event *hw_evt;
 	union sbi_pmu_ctr_info *info;
 	int lidx, hidx, fidx;
@@ -681,7 +688,6 @@  static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 	struct perf_event *event;
 	unsigned long overflow;
 	unsigned long overflowed_ctrs = 0;
-	struct cpu_hw_events *cpu_hw_evt = dev;
 	u64 start_clock = sched_clock();
 
 	if (WARN_ON_ONCE(!cpu_hw_evt))
@@ -691,7 +697,10 @@  static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 	fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
 	event = cpu_hw_evt->events[fidx];
 	if (!event) {
-		csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+		if (from_sse)
+			pmu_sbi_irq_clear();
+		else
+			csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
 		return IRQ_NONE;
 	}
 
@@ -703,16 +712,16 @@  static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 
 	/*
 	 * Overflow interrupt pending bit should only be cleared after stopping
-	 * all the counters to avoid any race condition.
+	 * all the counters to avoid any race condition. When using SSE,
+	 * interrupt is cleared when stopping counters.
 	 */
-	csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+	if (!from_sse)
+		csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
 
 	/* No overflow bit is set */
 	if (!overflow)
 		return IRQ_NONE;
 
-	regs = get_irq_regs();
-
 	for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
 		struct perf_event *event = cpu_hw_evt->events[lidx];
 
@@ -758,6 +767,22 @@  static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t pmu_sbi_ovf_irq_handler(int irq, void *dev)
+{
+	return pmu_sbi_ovf_handler(dev, get_irq_regs(), false);
+}
+
+static int pmu_sbi_ovf_sse_handler(uint32_t evt, void *arg,
+				   struct pt_regs *regs)
+{
+	struct cpu_hw_events __percpu *hw_events = arg;
+	struct cpu_hw_events *hw_event = raw_cpu_ptr(hw_events);
+
+	pmu_sbi_ovf_handler(hw_event, regs, true);
+
+	return 0;
+}
+
 static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
 {
 	struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
@@ -801,9 +826,17 @@  static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
 static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
 {
 	int ret;
+	struct sse_event *evt;
 	struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
 	struct irq_domain *domain = NULL;
 
+	evt = sse_event_register(SBI_SSE_EVENT_LOCAL_PMU, 0,
+				 pmu_sbi_ovf_sse_handler, hw_events);
+	if (!IS_ERR(evt)) {
+		sse_event_enable(evt);
+		return 0;
+	}
+
 	if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
 		riscv_pmu_irq_num = RV_IRQ_PMU;
 		riscv_pmu_use_irq = true;
@@ -831,7 +864,7 @@  static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
 		return -ENODEV;
 	}
 
-	ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
+	ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_irq_handler, "riscv-pmu", hw_events);
 	if (ret) {
 		pr_err("registering percpu irq failed [%d]\n", ret);
 		return ret;