@@ -28,7 +28,9 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
DECLARE_PER_CPU(long, misaligned_access_speed);
+#endif
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
@@ -137,4 +139,8 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+static __always_inline bool has_fast_misaligned_accesses(void)
+{
+ return static_branch_likely(&fast_misaligned_access_speed_key);
+}
#endif
@@ -318,10 +318,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
* branches. The largest chunk of overlap was delegated into the
* do_csum_common function.
*/
- if (static_branch_likely(&fast_misaligned_access_speed_key))
- return do_csum_no_alignment(buff, len);
-
- if (((unsigned long)buff & OFFSET_MASK) == 0)
+ if (has_fast_misaligned_accesses() || (((unsigned long)buff & OFFSET_MASK) == 0b101))
return do_csum_no_alignment(buff, len);
return do_csum_with_alignment(buff, len);
Create has_fast_misaligned_access to avoid needing to explicitly check the fast_misaligned_access_speed_key static key. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> --- arch/riscv/include/asm/cpufeature.h | 6 ++++++ arch/riscv/lib/csum.c | 5 +---- 2 files changed, 7 insertions(+), 4 deletions(-)