diff mbox series

[v2,3/5] riscv: Vector checksum header

Message ID 20230905-optimize_checksum-v2-3-ccd658db743b@rivosinc.com (mailing list archive)
State Superseded
Headers show
Series riscv: Add fine-tuned checksum functions | expand

Checks

Context Check Description
conchuod/cover_letter success Series has a cover letter
conchuod/tree_selection success Guessed tree name to be for-next at HEAD 9e2864968816
conchuod/fixes_present success Fixes tag not required for -next series
conchuod/maintainers_pattern success MAINTAINERS pattern errors before the patch: 2 and now 2
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/kdoc success Errors and warnings before: 0 this patch: 0
conchuod/build_rv64_clang_allmodconfig success Errors and warnings before: 1507 this patch: 1507
conchuod/module_param success Was 0 now: 0
conchuod/build_rv64_gcc_allmodconfig fail Failed to build the tree with this patch.
conchuod/build_rv32_defconfig success Build OK
conchuod/dtb_warn_rv64 success Errors and warnings before: 39 this patch: 39
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/checkpatch warning WARNING: Avoid line continuations in quoted strings WARNING: unnecessary whitespace before a quoted newline
conchuod/build_rv64_nommu_k210_defconfig fail Build failed
conchuod/verify_fixes success No Fixes tag
conchuod/build_rv64_nommu_virt_defconfig fail Build failed

Commit Message

Charlie Jenkins Sept. 6, 2023, 4:46 a.m. UTC
Vector code is written in assembly rather than using the GCC vector
instrinsics because they did not provide optimal code. Vector
instrinsic types are still used so the inline assembly can
appropriately select vector registers. However, this code cannot be
merged yet because it is currently not possible to use vector
instrinsics in the kernel because vector support needs to be directly
enabled by assembly.

Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
---
 arch/riscv/include/asm/checksum.h | 87 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 87 insertions(+)

Comments

Conor Dooley Sept. 7, 2023, 9:47 a.m. UTC | #1
On Tue, Sep 05, 2023 at 09:46:52PM -0700, Charlie Jenkins wrote:
> Vector code is written in assembly rather than using the GCC vector
> instrinsics because they did not provide optimal code. Vector
> instrinsic types are still used so the inline assembly can
> appropriately select vector registers. However, this code cannot be
> merged yet because it is currently not possible to use vector
> instrinsics in the kernel because vector support needs to be directly
> enabled by assembly.
> 
> Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> ---
>  arch/riscv/include/asm/checksum.h | 87 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 87 insertions(+)
> 
> diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
> index 3f9d5a202e95..1d6c23cd1221 100644
> --- a/arch/riscv/include/asm/checksum.h
> +++ b/arch/riscv/include/asm/checksum.h
> @@ -10,6 +10,10 @@
>  #include <linux/in6.h>
>  #include <linux/uaccess.h>
>  
> +#ifdef CONFIG_RISCV_ISA_V
> +#include <riscv_vector.h>
> +#endif
> +
>  #ifdef CONFIG_32BIT
>  typedef unsigned int csum_t;
>  #else
> @@ -43,6 +47,89 @@ static inline __sum16 csum_fold(__wsum sum)
>   */
>  static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
>  {
> +#ifdef CONFIG_RISCV_ISA_V
> +	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
> +		/*
> +		 * Vector is likely available when the kernel is compiled with
> +		 * vector support, so nop when vector is available and jump when
> +		 * vector is not available.
> +		 */
> +		asm_volatile_goto(ALTERNATIVE("j %l[no_vector]", "nop", 0,
> +					      RISCV_ISA_EXT_v, 1)
> +				  :
> +				  :
> +				  :
> +				  : no_vector);
> +	} else {
> +		if (!__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_v))
> +			goto no_vector;
> +	}

Silly question maybe, but is this complexity required?
If you were to go and do
	if (!has_vector())
		goto no_vector
is there any meaningful difference difference in performance?


> +
> +	vuint64m1_t prev_buffer;
> +	vuint32m1_t curr_buffer;
> +	unsigned int vl;
> +#ifdef CONFIG_32_BIT
> +	csum_t high_result, low_result;
> +
> +	riscv_v_enable();
> +	asm(".option push						\n\
> +	.option arch, +v						\n\
> +	vsetivli x0, 1, e64, ta, ma					\n\
> +	vmv.v.i %[prev_buffer], 0					\n\
> +	1:								\n\
> +	vsetvli %[vl], %[ihl], e32, m1, ta, ma				\n\
> +	vle32.v %[curr_buffer], (%[iph])				\n\
> +	vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer]	\n\
> +	sub %[ihl], %[ihl], %[vl]					\n\
> +	slli %[vl], %[vl], 2						\n\

Also, could you please try to align the operands for asm stuff?
It makes quite a difference to readability.

Thanks,
Conor.

> +	add %[iph], %[vl], %[iph]					\n\
> +	# If not all of iph could fit into vector reg, do another sum	\n\
> +	bne %[ihl], zero, 1b						\n\
> +	vsetivli x0, 1, e64, m1, ta, ma					\n\
> +	vmv.x.s %[low_result], %[prev_buffer]				\n\
> +	addi %[vl], x0, 32						\n\
> +	vsrl.vx %[prev_buffer], %[prev_buffer], %[vl]			\n\
> +	vmv.x.s %[high_result], %[prev_buffer]				\n\
> +	.option pop"
> +	: [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
> +		[curr_buffer] "=&vd" (curr_buffer),
> +		[high_result] "=&r" (high_result),
> +		[low_result] "=&r" (low_result)
> +	: [iph] "r" (iph), [ihl] "r" (ihl));
> +	riscv_v_disable();
> +
> +	high_result += low_result;
> +	high_result += high_result < low_result;
> +#else // !CONFIG_32_BIT
> +	csum_t result;
> +
> +	riscv_v_enable();
> +	asm(".option push						\n\
> +	.option arch, +v						\n\
> +	vsetivli x0, 1, e64, ta, ma					\n\
> +	vmv.v.i %[prev_buffer], 0					\n\
> +	1:								\n\
> +	# Setup 32-bit sum of iph					\n\
> +	vsetvli %[vl], %[ihl], e32, m1, ta, ma				\n\
> +	vle32.v %[curr_buffer], (%[iph])				\n\
> +	# Sum each 32-bit segment of iph that can fit into a vector reg	\n\
> +	vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer]     \n\
> +	subw %[ihl], %[ihl], %[vl]					\n\
> +	slli %[vl], %[vl], 2						\n\
> +	addw %[iph], %[vl], %[iph]					\n\
> +	# If not all of iph could fit into vector reg, do another sum	\n\
> +	bne %[ihl], zero, 1b						\n\
> +	vsetvli x0, x0, e64, m1, ta, ma					\n\
> +	vmv.x.s %[result], %[prev_buffer]				\n\
> +	.option pop"
> +	: [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
> +		[curr_buffer] "=&vd" (curr_buffer), [result] "=&r" (result)
> +	: [iph] "r" (iph), [ihl] "r" (ihl));
> +	riscv_v_disable();
> +#endif // !CONFIG_32_BIT
> +no_vector:
> +#endif // !CONFIG_RISCV_ISA_V
> +
>  	csum_t csum = 0;
>  	int pos = 0;
>  
> 
> -- 
> 2.42.0
>
Conor Dooley Sept. 7, 2023, 9:58 a.m. UTC | #2
On Tue, Sep 05, 2023 at 09:46:52PM -0700, Charlie Jenkins wrote:
> Vector code is written in assembly rather than using the GCC vector
> instrinsics because they did not provide optimal code. Vector
> instrinsic types are still used so the inline assembly can
> appropriately select vector registers. However, this code cannot be
> merged yet because it is currently not possible to use vector
> instrinsics in the kernel because vector support needs to be directly
> enabled by assembly.
> 
> Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> ---
>  arch/riscv/include/asm/checksum.h | 87 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 87 insertions(+)
> 
> diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
> index 3f9d5a202e95..1d6c23cd1221 100644
> --- a/arch/riscv/include/asm/checksum.h
> +++ b/arch/riscv/include/asm/checksum.h
> @@ -10,6 +10,10 @@
>  #include <linux/in6.h>
>  #include <linux/uaccess.h>
>  
> +#ifdef CONFIG_RISCV_ISA_V
> +#include <riscv_vector.h>
> +#endif
> +
>  #ifdef CONFIG_32BIT
>  typedef unsigned int csum_t;
>  #else
> @@ -43,6 +47,89 @@ static inline __sum16 csum_fold(__wsum sum)
>   */
>  static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
>  {
> +#ifdef CONFIG_RISCV_ISA_V
> +	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
> +		/*
> +		 * Vector is likely available when the kernel is compiled with
> +		 * vector support, so nop when vector is available and jump when
> +		 * vector is not available.
> +		 */
> +		asm_volatile_goto(ALTERNATIVE("j %l[no_vector]", "nop", 0,
> +					      RISCV_ISA_EXT_v, 1)
> +				  :
> +				  :
> +				  :
> +				  : no_vector);
> +	} else {
> +		if (!__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_v))
> +			goto no_vector;
> +	}
> +
> +	vuint64m1_t prev_buffer;
> +	vuint32m1_t curr_buffer;
> +	unsigned int vl;
> +#ifdef CONFIG_32_BIT
> +	csum_t high_result, low_result;
> +
> +	riscv_v_enable();
> +	asm(".option push						\n\
> +	.option arch, +v						\n\
> +	vsetivli x0, 1, e64, ta, ma					\n\

Also, I don't see that you have addressed previous review comments from
Samuel:
https://lore.kernel.org/linux-riscv/0a8c98bf-46da-e77a-0431-a6c1e224af2e@sifive.com/
Charlie Jenkins Sept. 7, 2023, 5:41 p.m. UTC | #3
On Thu, Sep 07, 2023 at 10:58:33AM +0100, Conor Dooley wrote:
> On Tue, Sep 05, 2023 at 09:46:52PM -0700, Charlie Jenkins wrote:
> > Vector code is written in assembly rather than using the GCC vector
> > instrinsics because they did not provide optimal code. Vector
> > instrinsic types are still used so the inline assembly can
> > appropriately select vector registers. However, this code cannot be
> > merged yet because it is currently not possible to use vector
> > instrinsics in the kernel because vector support needs to be directly
> > enabled by assembly.
> > 
> > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> > ---
> >  arch/riscv/include/asm/checksum.h | 87 +++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 87 insertions(+)
> > 
> > diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
> > index 3f9d5a202e95..1d6c23cd1221 100644
> > --- a/arch/riscv/include/asm/checksum.h
> > +++ b/arch/riscv/include/asm/checksum.h
> > @@ -10,6 +10,10 @@
> >  #include <linux/in6.h>
> >  #include <linux/uaccess.h>
> >  
> > +#ifdef CONFIG_RISCV_ISA_V
> > +#include <riscv_vector.h>
> > +#endif
> > +
> >  #ifdef CONFIG_32BIT
> >  typedef unsigned int csum_t;
> >  #else
> > @@ -43,6 +47,89 @@ static inline __sum16 csum_fold(__wsum sum)
> >   */
> >  static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
> >  {
> > +#ifdef CONFIG_RISCV_ISA_V
> > +	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
> > +		/*
> > +		 * Vector is likely available when the kernel is compiled with
> > +		 * vector support, so nop when vector is available and jump when
> > +		 * vector is not available.
> > +		 */
> > +		asm_volatile_goto(ALTERNATIVE("j %l[no_vector]", "nop", 0,
> > +					      RISCV_ISA_EXT_v, 1)
> > +				  :
> > +				  :
> > +				  :
> > +				  : no_vector);
> > +	} else {
> > +		if (!__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_v))
> > +			goto no_vector;
> > +	}
> > +
> > +	vuint64m1_t prev_buffer;
> > +	vuint32m1_t curr_buffer;
> > +	unsigned int vl;
> > +#ifdef CONFIG_32_BIT
> > +	csum_t high_result, low_result;
> > +
> > +	riscv_v_enable();
> > +	asm(".option push						\n\
> > +	.option arch, +v						\n\
> > +	vsetivli x0, 1, e64, ta, ma					\n\
> 
> Also, I don't see that you have addressed previous review comments from
> Samuel:
> https://lore.kernel.org/linux-riscv/0a8c98bf-46da-e77a-0431-a6c1e224af2e@sifive.com/
I added the check for vector as Samuel suggested, but then I
accidentally used riscv_v_enable() instead of kernel_vector_begin(), I
will make that change.

- Charlie
Charlie Jenkins Sept. 7, 2023, 5:43 p.m. UTC | #4
On Thu, Sep 07, 2023 at 10:47:55AM +0100, Conor Dooley wrote:
> On Tue, Sep 05, 2023 at 09:46:52PM -0700, Charlie Jenkins wrote:
> > Vector code is written in assembly rather than using the GCC vector
> > instrinsics because they did not provide optimal code. Vector
> > instrinsic types are still used so the inline assembly can
> > appropriately select vector registers. However, this code cannot be
> > merged yet because it is currently not possible to use vector
> > instrinsics in the kernel because vector support needs to be directly
> > enabled by assembly.
> > 
> > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> > ---
> >  arch/riscv/include/asm/checksum.h | 87 +++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 87 insertions(+)
> > 
> > diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
> > index 3f9d5a202e95..1d6c23cd1221 100644
> > --- a/arch/riscv/include/asm/checksum.h
> > +++ b/arch/riscv/include/asm/checksum.h
> > @@ -10,6 +10,10 @@
> >  #include <linux/in6.h>
> >  #include <linux/uaccess.h>
> >  
> > +#ifdef CONFIG_RISCV_ISA_V
> > +#include <riscv_vector.h>
> > +#endif
> > +
> >  #ifdef CONFIG_32BIT
> >  typedef unsigned int csum_t;
> >  #else
> > @@ -43,6 +47,89 @@ static inline __sum16 csum_fold(__wsum sum)
> >   */
> >  static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
> >  {
> > +#ifdef CONFIG_RISCV_ISA_V
> > +	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
> > +		/*
> > +		 * Vector is likely available when the kernel is compiled with
> > +		 * vector support, so nop when vector is available and jump when
> > +		 * vector is not available.
> > +		 */
> > +		asm_volatile_goto(ALTERNATIVE("j %l[no_vector]", "nop", 0,
> > +					      RISCV_ISA_EXT_v, 1)
> > +				  :
> > +				  :
> > +				  :
> > +				  : no_vector);
> > +	} else {
> > +		if (!__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_v))
> > +			goto no_vector;
> > +	}
> 
> Silly question maybe, but is this complexity required?
> If you were to go and do
> 	if (!has_vector())
> 		goto no_vector
> is there any meaningful difference difference in performance?
Yes I should use that instead.
> 
> 
> > +
> > +	vuint64m1_t prev_buffer;
> > +	vuint32m1_t curr_buffer;
> > +	unsigned int vl;
> > +#ifdef CONFIG_32_BIT
> > +	csum_t high_result, low_result;
> > +
> > +	riscv_v_enable();
> > +	asm(".option push						\n\
> > +	.option arch, +v						\n\
> > +	vsetivli x0, 1, e64, ta, ma					\n\
> > +	vmv.v.i %[prev_buffer], 0					\n\
> > +	1:								\n\
> > +	vsetvli %[vl], %[ihl], e32, m1, ta, ma				\n\
> > +	vle32.v %[curr_buffer], (%[iph])				\n\
> > +	vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer]	\n\
> > +	sub %[ihl], %[ihl], %[vl]					\n\
> > +	slli %[vl], %[vl], 2						\n\
> 
> Also, could you please try to align the operands for asm stuff?
> It makes quite a difference to readability.
> 
> Thanks,
> Conor.
> 
Will do.

- Charlie
> > +	add %[iph], %[vl], %[iph]					\n\
> > +	# If not all of iph could fit into vector reg, do another sum	\n\
> > +	bne %[ihl], zero, 1b						\n\
> > +	vsetivli x0, 1, e64, m1, ta, ma					\n\
> > +	vmv.x.s %[low_result], %[prev_buffer]				\n\
> > +	addi %[vl], x0, 32						\n\
> > +	vsrl.vx %[prev_buffer], %[prev_buffer], %[vl]			\n\
> > +	vmv.x.s %[high_result], %[prev_buffer]				\n\
> > +	.option pop"
> > +	: [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
> > +		[curr_buffer] "=&vd" (curr_buffer),
> > +		[high_result] "=&r" (high_result),
> > +		[low_result] "=&r" (low_result)
> > +	: [iph] "r" (iph), [ihl] "r" (ihl));
> > +	riscv_v_disable();
> > +
> > +	high_result += low_result;
> > +	high_result += high_result < low_result;
> > +#else // !CONFIG_32_BIT
> > +	csum_t result;
> > +
> > +	riscv_v_enable();
> > +	asm(".option push						\n\
> > +	.option arch, +v						\n\
> > +	vsetivli x0, 1, e64, ta, ma					\n\
> > +	vmv.v.i %[prev_buffer], 0					\n\
> > +	1:								\n\
> > +	# Setup 32-bit sum of iph					\n\
> > +	vsetvli %[vl], %[ihl], e32, m1, ta, ma				\n\
> > +	vle32.v %[curr_buffer], (%[iph])				\n\
> > +	# Sum each 32-bit segment of iph that can fit into a vector reg	\n\
> > +	vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer]     \n\
> > +	subw %[ihl], %[ihl], %[vl]					\n\
> > +	slli %[vl], %[vl], 2						\n\
> > +	addw %[iph], %[vl], %[iph]					\n\
> > +	# If not all of iph could fit into vector reg, do another sum	\n\
> > +	bne %[ihl], zero, 1b						\n\
> > +	vsetvli x0, x0, e64, m1, ta, ma					\n\
> > +	vmv.x.s %[result], %[prev_buffer]				\n\
> > +	.option pop"
> > +	: [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
> > +		[curr_buffer] "=&vd" (curr_buffer), [result] "=&r" (result)
> > +	: [iph] "r" (iph), [ihl] "r" (ihl));
> > +	riscv_v_disable();
> > +#endif // !CONFIG_32_BIT
> > +no_vector:
> > +#endif // !CONFIG_RISCV_ISA_V
> > +
> >  	csum_t csum = 0;
> >  	int pos = 0;
> >  
> > 
> > -- 
> > 2.42.0
> >
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
index 3f9d5a202e95..1d6c23cd1221 100644
--- a/arch/riscv/include/asm/checksum.h
+++ b/arch/riscv/include/asm/checksum.h
@@ -10,6 +10,10 @@ 
 #include <linux/in6.h>
 #include <linux/uaccess.h>
 
+#ifdef CONFIG_RISCV_ISA_V
+#include <riscv_vector.h>
+#endif
+
 #ifdef CONFIG_32BIT
 typedef unsigned int csum_t;
 #else
@@ -43,6 +47,89 @@  static inline __sum16 csum_fold(__wsum sum)
  */
 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
+#ifdef CONFIG_RISCV_ISA_V
+	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+		/*
+		 * Vector is likely available when the kernel is compiled with
+		 * vector support, so nop when vector is available and jump when
+		 * vector is not available.
+		 */
+		asm_volatile_goto(ALTERNATIVE("j %l[no_vector]", "nop", 0,
+					      RISCV_ISA_EXT_v, 1)
+				  :
+				  :
+				  :
+				  : no_vector);
+	} else {
+		if (!__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_v))
+			goto no_vector;
+	}
+
+	vuint64m1_t prev_buffer;
+	vuint32m1_t curr_buffer;
+	unsigned int vl;
+#ifdef CONFIG_32_BIT
+	csum_t high_result, low_result;
+
+	riscv_v_enable();
+	asm(".option push						\n\
+	.option arch, +v						\n\
+	vsetivli x0, 1, e64, ta, ma					\n\
+	vmv.v.i %[prev_buffer], 0					\n\
+	1:								\n\
+	vsetvli %[vl], %[ihl], e32, m1, ta, ma				\n\
+	vle32.v %[curr_buffer], (%[iph])				\n\
+	vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer]	\n\
+	sub %[ihl], %[ihl], %[vl]					\n\
+	slli %[vl], %[vl], 2						\n\
+	add %[iph], %[vl], %[iph]					\n\
+	# If not all of iph could fit into vector reg, do another sum	\n\
+	bne %[ihl], zero, 1b						\n\
+	vsetivli x0, 1, e64, m1, ta, ma					\n\
+	vmv.x.s %[low_result], %[prev_buffer]				\n\
+	addi %[vl], x0, 32						\n\
+	vsrl.vx %[prev_buffer], %[prev_buffer], %[vl]			\n\
+	vmv.x.s %[high_result], %[prev_buffer]				\n\
+	.option pop"
+	: [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
+		[curr_buffer] "=&vd" (curr_buffer),
+		[high_result] "=&r" (high_result),
+		[low_result] "=&r" (low_result)
+	: [iph] "r" (iph), [ihl] "r" (ihl));
+	riscv_v_disable();
+
+	high_result += low_result;
+	high_result += high_result < low_result;
+#else // !CONFIG_32_BIT
+	csum_t result;
+
+	riscv_v_enable();
+	asm(".option push						\n\
+	.option arch, +v						\n\
+	vsetivli x0, 1, e64, ta, ma					\n\
+	vmv.v.i %[prev_buffer], 0					\n\
+	1:								\n\
+	# Setup 32-bit sum of iph					\n\
+	vsetvli %[vl], %[ihl], e32, m1, ta, ma				\n\
+	vle32.v %[curr_buffer], (%[iph])				\n\
+	# Sum each 32-bit segment of iph that can fit into a vector reg	\n\
+	vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer]     \n\
+	subw %[ihl], %[ihl], %[vl]					\n\
+	slli %[vl], %[vl], 2						\n\
+	addw %[iph], %[vl], %[iph]					\n\
+	# If not all of iph could fit into vector reg, do another sum	\n\
+	bne %[ihl], zero, 1b						\n\
+	vsetvli x0, x0, e64, m1, ta, ma					\n\
+	vmv.x.s %[result], %[prev_buffer]				\n\
+	.option pop"
+	: [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
+		[curr_buffer] "=&vd" (curr_buffer), [result] "=&r" (result)
+	: [iph] "r" (iph), [ihl] "r" (ihl));
+	riscv_v_disable();
+#endif // !CONFIG_32_BIT
+no_vector:
+#endif // !CONFIG_RISCV_ISA_V
+
 	csum_t csum = 0;
 	int pos = 0;