diff mbox series

[RFC,3/7] lib: add rocksoft model crc64

Message ID 20220124160107.1683901-4-kbusch@kernel.org (mailing list archive)
State New, archived
Headers show
Series 64-bit data integrity field support | expand

Commit Message

Keith Busch Jan. 24, 2022, 4:01 p.m. UTC
The NVM Express specification extended data integrity fields to 64 bits
using the Rocksoft^TM parameters. Add the poly (0xad93d23594c93659) to
the crc64 table generation, and provide a library routine implementing
the algorithm.

Since this model reflects inputs and outputs, a helper table and
function are added to reverse bits of 8 and 64 bit values.

Cc: Coly Li <colyli@suse.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 include/linux/crc64.h |  2 ++
 lib/crc64.c           | 79 +++++++++++++++++++++++++++++++++++++++++++
 lib/gen_crc64table.c  | 33 ++++++++++++------
 3 files changed, 103 insertions(+), 11 deletions(-)

Comments

Eric Biggers Jan. 25, 2022, 5:10 a.m. UTC | #1
On Mon, Jan 24, 2022 at 08:01:03AM -0800, Keith Busch wrote:
> Since this model reflects inputs and outputs, a helper table and
> function are added to reverse bits of 8 and 64 bit values.

That's a silly way to do a bit-reflected CRC.  The proper way to do it is to
reflect the bytes too, so that the bits and bytes are ordered consistently, so
explicitly reflecting the bits isn't needed.  Most CRC-32's are bit-reflected,
and they are usually implemented this way.  E.g., see crc32_le() in lib/crc32.c.

Here's a Python script that shows that the Rocksoft CRC-64 can be computed
without explicitly reversing the bits.  It passes the tests from your patch 4:


COEFFS = [63, 61, 59, 58, 56, 55, 52, 49, 48, 47, 46, 44, 41, 37, 36, 34, 32,
          31, 28, 26, 23, 22, 19, 16, 13, 12, 10, 9, 6, 4, 3, 0]
POLY = sum(1 << (63 - coeff) for coeff in COEFFS)

# Generate the table.
table = [0] * 256
for i in range(256):
    crc = 0
    byte = i
    for j in range(8):
        if ((crc ^ (byte >> j)) & 1) == 1:
            crc = (crc >> 1) ^ POLY
        else:
            crc = crc >> 1
    table[i] = crc

# Compute the CRC-64 one byte at a time using the table.
def crc64_rocksoft(data):
    crc = 0xffffffffffffffff
    for byte in data:
        crc = (crc >> 8) ^ table[(crc & 0xff) ^ byte]
    return crc ^ 0xffffffffffffffff

# Tests
assert crc64_rocksoft(bytearray([0] * 4096)) == 0x6482D367EB22B64E
assert crc64_rocksoft(bytearray([255] * 4096)) == 0xC0DDBA7302ECA3AC
assert crc64_rocksoft(bytearray([i % 256 for i in range(4096)])) == 0x3E729F5F6750449C
assert crc64_rocksoft(bytearray([(255-i) % 256 for i in range(4096)])) == 0x9A2DF64B8E9E517E

# Print the table.
print(f'#define CRC64_ROCKSOFT_POLY 0x{POLY:016x}ULL')
print('')
print('static const u64 crc64_rocksoft_tab[] = {')
for i in range(0, 256, 2):
    print('\t', end='')
    for j in range(i, i + 2):
        print(f'0x{table[j]:016x}ULL,', end='')
        if j != 1:
            print(' ', end='')
    print('')
print('};')
Keith Busch Jan. 25, 2022, 3:39 p.m. UTC | #2
On Mon, Jan 24, 2022 at 09:10:38PM -0800, Eric Biggers wrote:
> On Mon, Jan 24, 2022 at 08:01:03AM -0800, Keith Busch wrote:
> > Since this model reflects inputs and outputs, a helper table and
> > function are added to reverse bits of 8 and 64 bit values.
> 
> That's a silly way to do a bit-reflected CRC.  The proper way to do it is to
> reflect the bytes too, so that the bits and bytes are ordered consistently, so
> explicitly reflecting the bits isn't needed.  Most CRC-32's are bit-reflected,
> and they are usually implemented this way.  E.g., see crc32_le() in lib/crc32.c.
> 
> Here's a Python script that shows that the Rocksoft CRC-64 can be computed
> without explicitly reversing the bits.  It passes the tests from your patch 4:

Thanks for the tip! I'll use that for the next version.
diff mbox series

Patch

diff --git a/include/linux/crc64.h b/include/linux/crc64.h
index c756e65a1b58..9f2f20216503 100644
--- a/include/linux/crc64.h
+++ b/include/linux/crc64.h
@@ -8,4 +8,6 @@ 
 #include <linux/types.h>
 
 u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+u64 __pure crc64_rocksoft(u64 crc, const void *p, size_t len);
+
 #endif /* _LINUX_CRC64_H */
diff --git a/lib/crc64.c b/lib/crc64.c
index 9f852a89ee2a..963283d1e682 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -22,6 +22,13 @@ 
  * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
  * x^7 + x^4 + x + 1
  *
+ * crc64rocksoft[256] table is from the Rocksoft specification polynomial
+ * defined as,
+ *
+ * x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 +
+ * x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 +
+ * x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 + x^4 + x^3 + 1
+ *
  * Copyright 2018 SUSE Linux.
  *   Author: Coly Li <colyli@suse.de>
  */
@@ -55,3 +62,75 @@  u64 __pure crc64_be(u64 crc, const void *p, size_t len)
 	return crc;
 }
 EXPORT_SYMBOL_GPL(crc64_be);
+
+static u64 bit_reverse(u64 val)
+{
+	unsigned long *v, *r;
+	u64 new_val = 0;
+	int i;
+
+	v = (unsigned long *)&val;
+	r = (unsigned long *)&new_val;
+
+	for_each_set_bit(i, v, 64)
+		set_bit(63 - i, r);
+
+	return new_val;
+}
+
+/*
+ * quick lookup table with values reversing the bits of the index
+ */
+static const u8 reverse[256] = {
+	0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0,
+	0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
+	0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4,
+	0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
+	0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC,
+	0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
+	0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA,
+	0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
+	0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6,
+	0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
+	0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1,
+	0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
+	0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
+	0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
+	0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD,
+	0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
+	0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3,
+	0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
+	0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7,
+	0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
+	0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF,
+	0x3F, 0xBF, 0x7F, 0xFF
+};
+
+/**
+ * crc64_rocksoft - Calculate bitwise Rocksoft CRC64
+ * @crc: seed value for computation. (u64)~0 for a new CRC calculation, or the
+ * 	 previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ *
+ * The Rocksoft 64-bit CRC model parameters:
+ * 	Initial value: 0xFFFFFFFFFFFFFFFF
+ * 	Reflected Input: True
+ * 	Reflected Output: True
+ * 	Xor Final: 0xFFFFFFFFFFFFFFFF
+ */
+u64 __pure crc64_rocksoft(u64 crc, const void *p, size_t len)
+{
+	size_t i, t;
+
+	const unsigned char *_p = p;
+
+	for (i = 0; i < len; i++) {
+		u8 tmp = reverse[*_p++];
+		t = ((crc >> 56) ^ tmp) & 0xFF;
+		crc = crc64rocksofttable[t] ^ (crc << 8);
+	}
+
+	return bit_reverse(crc) ^ (u64)~0;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft);
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 094b43aef8db..f1fceddb78ff 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -17,10 +17,12 @@ 
 #include <stdio.h>
 
 #define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+#define CRC64_ROCKSOFT_POLY 0xAD93D23594C93659ULL
 
 static uint64_t crc64_table[256] = {0};
+static uint64_t crc64_rocksoft_table[256] = {0};
 
-static void generate_crc64_table(void)
+static void generate_crc64_table(uint64_t table[256], uint64_t poly)
 {
 	uint64_t i, j, c, crc;
 
@@ -30,26 +32,22 @@  static void generate_crc64_table(void)
 
 		for (j = 0; j < 8; j++) {
 			if ((crc ^ c) & 0x8000000000000000ULL)
-				crc = (crc << 1) ^ CRC64_ECMA182_POLY;
+				crc = (crc << 1) ^ poly;
 			else
 				crc <<= 1;
 			c <<= 1;
 		}
 
-		crc64_table[i] = crc;
+		table[i] = crc;
 	}
 }
 
-static void print_crc64_table(void)
+static void output_table(uint64_t table[256])
 {
 	int i;
 
-	printf("/* this file is generated - do not edit */\n\n");
-	printf("#include <linux/types.h>\n");
-	printf("#include <linux/cache.h>\n\n");
-	printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
 	for (i = 0; i < 256; i++) {
-		printf("\t0x%016" PRIx64 "ULL", crc64_table[i]);
+		printf("\t0x%016" PRIx64 "ULL", table[i]);
 		if (i & 0x1)
 			printf(",\n");
 		else
@@ -58,9 +56,22 @@  static void print_crc64_table(void)
 	printf("};\n");
 }
 
+static void print_crc64_tables(void)
+{
+	printf("/* this file is generated - do not edit */\n\n");
+	printf("#include <linux/types.h>\n");
+	printf("#include <linux/cache.h>\n\n");
+	printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+	output_table(crc64_table);
+
+	printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n");
+	output_table(crc64_rocksoft_table);
+}
+
 int main(int argc, char *argv[])
 {
-	generate_crc64_table();
-	print_crc64_table();
+	generate_crc64_table(crc64_table, CRC64_ECMA182_POLY);
+	generate_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY);
+	print_crc64_tables();
 	return 0;
 }