diff mbox series

x86/lib: Simplify code for !CONFIG_DCACHE_WORD_ACCESS in csum-partial_64.c

Message ID 5f848b1cd6f844f6bc66fbec44237e08@AcuMS.aculab.com (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series x86/lib: Simplify code for !CONFIG_DCACHE_WORD_ACCESS in csum-partial_64.c | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

David Laight Jan. 6, 2022, 3:21 p.m. UTC
If load_unaligned_zeropad() can't be used (um builds)
then just add together the final bytes and do a single 'adc'
to add to the 64bit sum.

Signed-off-by: David Laight <david.laight@aculab.com>
---

It is a shame that this code is needed at all.
I doubt um would ever fault just reading the 32bit value.

 arch/x86/lib/csum-partial_64.c | 33 ++++++++++-----------------------
 1 file changed, 10 insertions(+), 23 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 061b1ed74d6a..edd3e579c2a7 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -73,41 +73,28 @@  __wsum csum_partial(const void *buff, int len, __wsum sum)
 		buff += 8;
 	}
 	if (len & 7) {
+		unsigned long trail;
 #ifdef CONFIG_DCACHE_WORD_ACCESS
 		unsigned int shift = (8 - (len & 7)) * 8;
-		unsigned long trail;
 
 		trail = (load_unaligned_zeropad(buff) << shift) >> shift;
-
-		asm("addq %[trail],%[res]\n\t"
-		    "adcq $0,%[res]"
-			: [res] "+r" (temp64)
-			: [trail] "r" (trail));
 #else
+		trail = 0;
 		if (len & 4) {
-			asm("addq %[val],%[res]\n\t"
-			    "adcq $0,%[res]"
-				: [res] "+r" (temp64)
-				: [val] "r" ((u64)*(u32 *)buff)
-				: "memory");
+			trail += *(u32 *)buff;
 			buff += 4;
 		}
 		if (len & 2) {
-			asm("addq %[val],%[res]\n\t"
-			    "adcq $0,%[res]"
-				: [res] "+r" (temp64)
-				: [val] "r" ((u64)*(u16 *)buff)
-				: "memory");
+			trail += *(u16 *)buff;
 			buff += 2;
 		}
-		if (len & 1) {
-			asm("addq %[val],%[res]\n\t"
-			    "adcq $0,%[res]"
-				: [res] "+r" (temp64)
-				: [val] "r" ((u64)*(u8 *)buff)
-				: "memory");
-		}
+		if (len & 1)
+			trail += *(u8 *)buff;
 #endif
+		asm("addq %[trail],%[res]\n\t"
+		    "adcq $0,%[res]"
+			: [res] "+r" (temp64)
+			: [trail] "r" (trail));
 	}
 	result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
 	return (__force __wsum)result;