1 /*
2 * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
3 * cleaned up code to current version of sparse and added the slicing-by-8
4 * algorithm to the closely similar existing slicing-by-4 algorithm.
5 *
6 * Oct 15, 2000 Matt Domsch <[email protected]>
7 * Nicer crc32 functions/docs submitted by [email protected]. Thanks!
8 * Code was from the public domain, copyright abandoned. Code was
9 * subsequently included in the kernel, thus was re-licensed under the
10 * GNU GPL v2.
11 *
12 * Oct 12, 2000 Matt Domsch <[email protected]>
13 * Same crc32 function was used in 5 other places in the kernel.
14 * I made one version, and deleted the others.
15 * There are various incantations of crc32(). Some use a seed of 0 or ~0.
16 * Some xor at the end with ~0. The generic crc32() function takes
17 * seed as an argument, and doesn't xor at the end. Then individual
18 * users can do whatever they need.
19 * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
20 * fs/jffs2 uses seed 0, doesn't xor with ~0.
21 * fs/partitions/efi.c uses seed ~0, xor's with ~0.
22 *
23 * This source code is licensed under the GNU General Public License,
24 * Version 2. See the file COPYING for more details.
25 */
26
27 /* see: Documentation/staging/crc32.rst for a description of algorithms */
28
29 #include <linux/crc32.h>
30 #include <linux/crc32poly.h>
31 #include <linux/module.h>
32 #include <linux/types.h>
33
34 #include "crc32table.h"
35
36 MODULE_AUTHOR("Matt Domsch <[email protected]>");
37 MODULE_DESCRIPTION("Various CRC32 calculations");
38 MODULE_LICENSE("GPL");
39
crc32_le_base(u32 crc,const u8 * p,size_t len)40 u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
41 {
42 while (len--)
43 crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++];
44 return crc;
45 }
46 EXPORT_SYMBOL(crc32_le_base);
47
crc32c_le_base(u32 crc,const u8 * p,size_t len)48 u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len)
49 {
50 while (len--)
51 crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
52 return crc;
53 }
54 EXPORT_SYMBOL(crc32c_le_base);
55
56 /*
57 * This multiplies the polynomials x and y modulo the given modulus.
58 * This follows the "little-endian" CRC convention that the lsbit
59 * represents the highest power of x, and the msbit represents x^0.
60 */
gf2_multiply(u32 x,u32 y,u32 modulus)61 static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
62 {
63 u32 product = x & 1 ? y : 0;
64 int i;
65
66 for (i = 0; i < 31; i++) {
67 product = (product >> 1) ^ (product & 1 ? modulus : 0);
68 x >>= 1;
69 product ^= x & 1 ? y : 0;
70 }
71
72 return product;
73 }
74
75 /**
76 * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time
77 * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
78 * @len: The number of bytes. @crc is multiplied by x^(8*@len)
79 * @polynomial: The modulus used to reduce the result to 32 bits.
80 *
81 * It's possible to parallelize CRC computations by computing a CRC
82 * over separate ranges of a buffer, then summing them.
83 * This shifts the given CRC by 8*len bits (i.e. produces the same effect
84 * as appending len bytes of zero to the data), in time proportional
85 * to log(len).
86 */
crc32_generic_shift(u32 crc,size_t len,u32 polynomial)87 static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
88 u32 polynomial)
89 {
90 u32 power = polynomial; /* CRC of x^32 */
91 int i;
92
93 /* Shift up to 32 bits in the simple linear way */
94 for (i = 0; i < 8 * (int)(len & 3); i++)
95 crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
96
97 len >>= 2;
98 if (!len)
99 return crc;
100
101 for (;;) {
102 /* "power" is x^(2^i), modulo the polynomial */
103 if (len & 1)
104 crc = gf2_multiply(crc, power, polynomial);
105
106 len >>= 1;
107 if (!len)
108 break;
109
110 /* Square power, advancing to x^(2^(i+1)) */
111 power = gf2_multiply(power, power, polynomial);
112 }
113
114 return crc;
115 }
116
crc32_le_shift(u32 crc,size_t len)117 u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
118 {
119 return crc32_generic_shift(crc, len, CRC32_POLY_LE);
120 }
121
__crc32c_le_shift(u32 crc,size_t len)122 u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
123 {
124 return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
125 }
126 EXPORT_SYMBOL(crc32_le_shift);
127 EXPORT_SYMBOL(__crc32c_le_shift);
128
crc32_be_base(u32 crc,const u8 * p,size_t len)129 u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len)
130 {
131 while (len--)
132 crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++];
133 return crc;
134 }
135 EXPORT_SYMBOL(crc32_be_base);
136