Lines Matching +full:5 +full:v0
118 * 5 (w0*x5 ^ w1*x4) << 40 ^ | (y0*z5 ^ y1*z4) << 40 ^
223 CPU_LE( rev64 v0.16b, v0.16b )
231 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
243 eor v0.16b, v0.16b, v8.16b
252 // While >= 128 data bytes remain (not counting v0-v7), fold the 128
253 // bytes v0-v7 into them, storing the result back into v0-v7.
255 fold_32_bytes \p, v0, v1
263 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
268 fold_16_bytes \p, v0, v4
291 CPU_LE( rev64 v0.16b, v0.16b )
292 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
293 eor v7.16b, v7.16b, v0.16b
310 // v0 = last 16 original data bytes
313 CPU_LE( rev64 v0.16b, v0.16b )
314 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
330 // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
332 bsl v2.16b, v1.16b, v0.16b
335 pmull16x64_\p fold_consts, v3, v0
336 eor v7.16b, v3.16b, v0.16b
351 movi v0.16b, #0
352 mov v0.h[7], init_crc
353 eor v7.16b, v7.16b, v0.16b
393 .align 5
412 ext v0.16b, v2.16b, v7.16b, #8
414 eor v0.16b, v0.16b, v7.16b // + low bits * x^64
418 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
419 mov v0.s[3], v2.s[0] // zero high 32 bits
421 eor v0.16b, v0.16b, v1.16b // + low bits
427 pmull2 v1.1q, v0.2d, fold_consts.2d // high 32 bits * floor(x^48 / G(x))
430 ushr v0.2d, v0.2d, #48
431 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
432 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
434 umov w0, v0.h[0]
442 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0