diff mbox series

[2/6] riscv: request misaligned exception delegation from SBI

Message ID 20250106154847.1100344-3-cleger@rivosinc.com (mailing list archive)
State New
Headers show
Series riscv: add SBI FWFT misaligned exception delegation support | expand

Commit Message

Clément Léger Jan. 6, 2025, 3:48 p.m. UTC
Now that the kernel can handle misaligned accesses in S-mode, request
misaligned access exception delegation from SBI. This uses the FWFT SBI
extension defined in SBI version 3.0.

Signed-off-by: Clément Léger <cleger@rivosinc.com>
---
 arch/riscv/include/asm/cpufeature.h        |  1 +
 arch/riscv/kernel/traps_misaligned.c       | 59 ++++++++++++++++++++++
 arch/riscv/kernel/unaligned_access_speed.c |  2 +
 3 files changed, 62 insertions(+)

Comments

Jesse T Jan. 7, 2025, 3:28 a.m. UTC | #1
On Mon, Jan 6, 2025 at 10:52 AM Clément Léger <cleger@rivosinc.com> wrote:
>
> Now that the kernel can handle misaligned accesses in S-mode, request
> misaligned access exception delegation from SBI. This uses the FWFT SBI
> extension defined in SBI version 3.0.
>
> Signed-off-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Jesse Taube <mr.bossman075@gmail.com>

> ---
>  arch/riscv/include/asm/cpufeature.h        |  1 +
>  arch/riscv/kernel/traps_misaligned.c       | 59 ++++++++++++++++++++++
>  arch/riscv/kernel/unaligned_access_speed.c |  2 +
>  3 files changed, 62 insertions(+)
>
> diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> index 4bd054c54c21..cd406fe37df8 100644
> --- a/arch/riscv/include/asm/cpufeature.h
> +++ b/arch/riscv/include/asm/cpufeature.h
> @@ -62,6 +62,7 @@ void __init riscv_user_isa_enable(void);
>         _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
>
>  bool check_unaligned_access_emulated_all_cpus(void);
> +void unaligned_access_init(void);
>  #if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
>  void check_unaligned_access_emulated(struct work_struct *work __always_unused);
>  void unaligned_emulation_finish(void);
> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
> index 7cc108aed74e..4aca600527e9 100644
> --- a/arch/riscv/kernel/traps_misaligned.c
> +++ b/arch/riscv/kernel/traps_misaligned.c
> @@ -16,6 +16,7 @@
>  #include <asm/entry-common.h>
>  #include <asm/hwprobe.h>
>  #include <asm/cpufeature.h>
> +#include <asm/sbi.h>
>  #include <asm/vector.h>
>
>  #define INSN_MATCH_LB                  0x3
> @@ -689,3 +690,61 @@ bool check_unaligned_access_emulated_all_cpus(void)
>         return false;
>  }
>  #endif
> +
> +#ifdef CONFIG_RISCV_SBI
> +
> +struct misaligned_deleg_req {
> +       bool enable;
> +       int error;
> +};
> +
> +static void
> +cpu_unaligned_sbi_request_delegation(void *arg)
> +{
> +       struct misaligned_deleg_req *req = arg;
> +       struct sbiret ret;
> +
> +       ret = sbi_ecall(SBI_EXT_FWFT, SBI_EXT_FWFT_SET,
> +                       SBI_FWFT_MISALIGNED_EXC_DELEG, req->enable, 0, 0, 0, 0);
> +       if (ret.error)
> +               req->error = 1;
> +}
> +
> +static void unaligned_sbi_request_delegation(void)
> +{
> +       struct misaligned_deleg_req req = {true, 0};
> +
> +       on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
> +       if (!req.error) {
> +               pr_info("SBI misaligned access exception delegation ok\n");
> +               /*
> +                * Note that we don't have to take any specific action here, if
> +                * the delegation is successful, then
> +                * check_unaligned_access_emulated() will verify that indeed the
> +                * platform traps on misaligned accesses.
> +                */
> +               return;
> +       }
> +
> +       /*
> +        * If at least delegation request failed on one hart, revert misaligned
> +        * delegation for all harts, if we don't do that, we'll panic at
> +        * misaligned delegation check time (see
> +        * check_unaligned_access_emulated()).
> +        */
> +       req.enable = false;
> +       req.error = 0;
> +       on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
> +       if (req.error)
> +               panic("Failed to disable misaligned delegation for all CPUs\n");
> +
> +}
> +
> +void unaligned_access_init(void)
> +{
> +       if (sbi_probe_extension(SBI_EXT_FWFT) > 0)
> +               unaligned_sbi_request_delegation();
> +}
> +#else
> +void unaligned_access_init(void) {}
> +#endif
> diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
> index 91f189cf1611..1e3166100837 100644
> --- a/arch/riscv/kernel/unaligned_access_speed.c
> +++ b/arch/riscv/kernel/unaligned_access_speed.c
> @@ -403,6 +403,8 @@ static int check_unaligned_access_all_cpus(void)
>  {
>         bool all_cpus_emulated, all_cpus_vec_unsupported;
>
> +       unaligned_access_init();
> +
>         all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
>         all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
>
> --
> 2.47.1
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 4bd054c54c21..cd406fe37df8 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -62,6 +62,7 @@  void __init riscv_user_isa_enable(void);
 	_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
 
 bool check_unaligned_access_emulated_all_cpus(void);
+void unaligned_access_init(void);
 #if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
 void check_unaligned_access_emulated(struct work_struct *work __always_unused);
 void unaligned_emulation_finish(void);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7cc108aed74e..4aca600527e9 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -16,6 +16,7 @@ 
 #include <asm/entry-common.h>
 #include <asm/hwprobe.h>
 #include <asm/cpufeature.h>
+#include <asm/sbi.h>
 #include <asm/vector.h>
 
 #define INSN_MATCH_LB			0x3
@@ -689,3 +690,61 @@  bool check_unaligned_access_emulated_all_cpus(void)
 	return false;
 }
 #endif
+
+#ifdef CONFIG_RISCV_SBI
+
+struct misaligned_deleg_req {
+	bool enable;
+	int error;
+};
+
+static void
+cpu_unaligned_sbi_request_delegation(void *arg)
+{
+	struct misaligned_deleg_req *req = arg;
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_FWFT, SBI_EXT_FWFT_SET,
+			SBI_FWFT_MISALIGNED_EXC_DELEG, req->enable, 0, 0, 0, 0);
+	if (ret.error)
+		req->error = 1;
+}
+
+static void unaligned_sbi_request_delegation(void)
+{
+	struct misaligned_deleg_req req = {true, 0};
+
+	on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
+	if (!req.error) {
+		pr_info("SBI misaligned access exception delegation ok\n");
+		/*
+		 * Note that we don't have to take any specific action here, if
+		 * the delegation is successful, then
+		 * check_unaligned_access_emulated() will verify that indeed the
+		 * platform traps on misaligned accesses.
+		 */
+		return;
+	}
+
+	/*
+	 * If at least delegation request failed on one hart, revert misaligned
+	 * delegation for all harts, if we don't do that, we'll panic at
+	 * misaligned delegation check time (see
+	 * check_unaligned_access_emulated()).
+	 */
+	req.enable = false;
+	req.error = 0;
+	on_each_cpu(cpu_unaligned_sbi_request_delegation, &req, 1);
+	if (req.error)
+		panic("Failed to disable misaligned delegation for all CPUs\n");
+
+}
+
+void unaligned_access_init(void)
+{
+	if (sbi_probe_extension(SBI_EXT_FWFT) > 0)
+		unaligned_sbi_request_delegation();
+}
+#else
+void unaligned_access_init(void) {}
+#endif
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 91f189cf1611..1e3166100837 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -403,6 +403,8 @@  static int check_unaligned_access_all_cpus(void)
 {
 	bool all_cpus_emulated, all_cpus_vec_unsupported;
 
+	unaligned_access_init();
+
 	all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
 	all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();