Lines Matching +full:- +full:8 +full:g
2 // Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
14 // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
62 // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
75 .arch armv8-a
76 .fpu crypto-neon-fp-armv8
116 * Pairwise long polynomial multiplication of two 16-bit values
120 * by two 64-bit values
125 * significant. The resulting 80-bit vectors are XOR'ed together.
127 * This can be implemented using 8x8 long polynomial multiplication, by
128 * reorganizing the input so that each pairwise 8x8 multiplication
134 * 1 (w0*x1 ^ w1*x0) << 8 ^ | (y0*z1 ^ y1*z0) << 8 ^
141 * 8 w1*x7 << 64 | y1*z7 << 64
148 * and after performing 8x8->16 bit long polynomial multiplication of
150 * we obtain the following four vectors of 16-bit elements:
161 * final 80-bit result.
164 vext.8 q11, \v64, \v64, #1
166 vuzp.8 q11, \v64
167 vtbl.8 d24, {\v16\()_L-\v16\()_H}, d24
168 vtbl.8 d25, {\v16\()_L-\v16\()_H}, d25
185 vext.8 q12, q12, q12, #14
186 vext.8 q14, q14, q14, #15
200 vld1.64 {q8-q9}, [buf]!
205 CPU_LE( vrev64.8 q8, q8 )
206 CPU_LE( vrev64.8 q9, q9 )
210 veor.8 \reg1, \reg1, q8
211 veor.8 \reg2, \reg2, q9
220 veor.8 \dst_reg, \dst_reg, \src_reg
232 vld1.64 {q0-q1}, [buf]!
233 vld1.64 {q2-q3}, [buf]!
234 vld1.64 {q4-q5}, [buf]!
235 vld1.64 {q6-q7}, [buf]!
236 CPU_LE( vrev64.8 q0, q0 )
237 CPU_LE( vrev64.8 q1, q1 )
238 CPU_LE( vrev64.8 q2, q2 )
239 CPU_LE( vrev64.8 q3, q3 )
240 CPU_LE( vrev64.8 q4, q4 )
241 CPU_LE( vrev64.8 q5, q5 )
242 CPU_LE( vrev64.8 q6, q6 )
243 CPU_LE( vrev64.8 q7, q7 )
265 // While >= 128 data bytes remain (not counting q0-q7), fold the 128
266 // bytes q0-q7 into them, storing the result back into q0-q7.
275 // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7.
293 adds len, len, #(128-16)
301 CPU_LE( vrev64.8 q0, q0 )
303 veor.8 q7, q7, q0
324 CPU_LE( vrev64.8 q0, q0 )
327 // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes.
330 vld1.8 {q2}, [r1]
331 vtbl.8 q1l, {q7l-q7h}, q2l
332 vtbl.8 q1h, {q7l-q7h}, q2h
334 // q3 = first chunk: q7 right-shifted by '16-len' bytes.
336 veor.8 q2, q2, q3
337 vtbl.8 q3l, {q7l-q7h}, q2l
338 vtbl.8 q3h, {q7l-q7h}, q2h
340 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
343 // q2 = second chunk: 'len' bytes from q0 (low-order bytes),
344 // then '16-len' bytes from q1 (high-order bytes).
345 vbsl.8 q2, q1, q0
349 veor.8 q7, q3, q2
359 CPU_LE( vrev64.8 q7, q7 )
365 veor.8 q7h, q7h, q0h
367 // Load the fold-across-16-bytes constants.
388 // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC.
390 // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
394 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
396 vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x))
397 veor.8 q0h, q0h, q7l // + low bits * x^64
399 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
404 vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x))
405 veor.8 q0, q0, q1 // + low bits
407 // Load G(x) and floor(x^48 / G(x)).
411 vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x))
413 vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x)
415 veor.8 q0l, q0l, q1l // + low 16 nonzero bits
416 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0.
428 CPU_LE( vrev64.8 q7, q7 )
438 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
440 .quad 0x0000000000006123 // x^(8*128) mod G(x)
441 .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
443 .quad 0x0000000000001069 // x^(4*128) mod G(x)
444 .quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
446 .quad 0x000000000000857d // x^(2*128) mod G(x)
447 .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
449 .quad 0x000000000000a010 // x^(1*128) mod G(x)
450 .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
452 .quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
453 .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
455 .quad 0x0000000000018bb7 // G(x)
456 .quad 0x00000001f65a57f8 // floor(x^48 / G(x))
458 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
460 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.