Lines Matching +full:23 +full:- +full:64

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
10 # Poly1305 - this version mainly using vector/VSX/Scalar
11 # - 26 bits limbs
12 # - Handle multiple 64 byte blcok.
17 # p = 2^130 - 5
25 # 07/22/21 - this revison based on the above sum of products. Setup r^4, r^3, r^2, r and s3, s2, …
56 #include <asm/asm-offsets.h>
57 #include <asm/asm-compat.h>
95 stdu 1,-752(1)
106 SAVE_GPR 23, 184, 1
120 SAVE_VRS 23, 48, 9
121 SAVE_VRS 24, 64, 9
139 SAVE_VSX 23, 336, 9
155 RESTORE_VRS 23, 48, 9
156 RESTORE_VRS 24, 64, 9
174 RESTORE_VSX 23, 336, 9
193 RESTORE_GPR 23, 184, 1
486 lvx 25, 0, 10 # v25 - mask
490 lxv 25, 64(10) # vs25
530 cmpdi 5, 64
587 vaddudm 23, 7, 12
615 vmrgow 7, 12, 23
619 addi 5, 5, -64 # len -= 64
620 addi 21, 21, 64 # offset += 64
622 li 9, 64
633 # h3 = (h1 + m3) * r^2, h4 = (h2 + m4) * r^2 --> (h0 + m1) r*4 + (h3 + m3) r^2, (h0 + m2) r^4 + (h…
635 # h5 = (h3 + m5) * r^2, h6 = (h4 + m6) * r^2 -->
636 # h7 = (h5 + m7) * r^2, h8 = (h6 + m8) * r^1 --> m5 * r^4 + m6 * r^3 + m7 * r^2 + m8 * r
700 vsld 23, 16, 13
701 vor 22, 22, 23
708 vsrd 23, 15, 13 # >> 14
709 vsrd 24, 23, 31 # >> 26, a4
710 vand 23, 23, 25 # a3
718 vaddudm 7, 7, 23
729 addi 5, 5, -64 # len -= 64
730 addi 21, 21, 64 # offset += 64
839 # The following functions implement 64 x 64 bits multiplication poly1305.
857 add 19, 21, 10 # s1: r19 - (r1 >> 2) *5
906 sldi 23, 22, 0x2
907 add 23, 23, 22 # (h2 & 3) * 5
908 addc 27, 27, 23 # h0
923 # - no highbit if final leftover block (highbit = 0)
931 stdu 1,-400(1)
942 SAVE_GPR 23, 184, 1
1008 RESTORE_GPR 23, 184, 1
1039 # h + 5 + (-p)