xref: /aosp_15_r20/external/boringssl/src/crypto/chacha/asm/chacha-armv8.pl (revision 8fb009dc861624b67b6cdb62ea21f0f22d0c584b)
1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# June 2015
18#
19# ChaCha20 for ARMv8.
20#
21# Performance in cycles per byte out of large buffer.
22#
23#			IALU/gcc-4.9    3xNEON+1xIALU	6xNEON+2xIALU
24#
25# Apple A7		5.50/+49%       3.33            1.70
26# Cortex-A53		8.40/+80%       4.72		4.72(*)
27# Cortex-A57		8.06/+43%       4.90            4.43(**)
28# Denver		4.50/+82%       2.63		2.67(*)
29# X-Gene		9.50/+46%       8.82		8.89(*)
30# Mongoose		8.00/+44%	3.64		3.25
31# Kryo			8.17/+50%	4.83		4.65
32#
33# (*)	it's expected that doubling interleave factor doesn't help
34#	all processors, only those with higher NEON latency and
35#	higher instruction issue rate;
36# (**)	expected improvement was actually higher;
37
38$flavour=shift;
39$output=shift;
40
41$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
43( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
44die "can't locate arm-xlate.pl";
45
46open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
47*STDOUT=*OUT;
48
49sub AUTOLOAD()		# thunk [simplified] x86-style perlasm
50{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
51  my $arg = pop;
52    $arg = "#$arg" if ($arg*1 eq $arg);
53    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
54}
55
56my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
57
58my @x=map("x$_",(5..17,19..21));
59my @d=map("x$_",(22..28,30));
60
61sub ROUND {
62my ($a0,$b0,$c0,$d0)=@_;
63my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
64my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
65my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
66
67    (
68	"&add_32	(@x[$a0],@x[$a0],@x[$b0])",
69	 "&add_32	(@x[$a1],@x[$a1],@x[$b1])",
70	  "&add_32	(@x[$a2],@x[$a2],@x[$b2])",
71	   "&add_32	(@x[$a3],@x[$a3],@x[$b3])",
72	"&eor_32	(@x[$d0],@x[$d0],@x[$a0])",
73	 "&eor_32	(@x[$d1],@x[$d1],@x[$a1])",
74	  "&eor_32	(@x[$d2],@x[$d2],@x[$a2])",
75	   "&eor_32	(@x[$d3],@x[$d3],@x[$a3])",
76	"&ror_32	(@x[$d0],@x[$d0],16)",
77	 "&ror_32	(@x[$d1],@x[$d1],16)",
78	  "&ror_32	(@x[$d2],@x[$d2],16)",
79	   "&ror_32	(@x[$d3],@x[$d3],16)",
80
81	"&add_32	(@x[$c0],@x[$c0],@x[$d0])",
82	 "&add_32	(@x[$c1],@x[$c1],@x[$d1])",
83	  "&add_32	(@x[$c2],@x[$c2],@x[$d2])",
84	   "&add_32	(@x[$c3],@x[$c3],@x[$d3])",
85	"&eor_32	(@x[$b0],@x[$b0],@x[$c0])",
86	 "&eor_32	(@x[$b1],@x[$b1],@x[$c1])",
87	  "&eor_32	(@x[$b2],@x[$b2],@x[$c2])",
88	   "&eor_32	(@x[$b3],@x[$b3],@x[$c3])",
89	"&ror_32	(@x[$b0],@x[$b0],20)",
90	 "&ror_32	(@x[$b1],@x[$b1],20)",
91	  "&ror_32	(@x[$b2],@x[$b2],20)",
92	   "&ror_32	(@x[$b3],@x[$b3],20)",
93
94	"&add_32	(@x[$a0],@x[$a0],@x[$b0])",
95	 "&add_32	(@x[$a1],@x[$a1],@x[$b1])",
96	  "&add_32	(@x[$a2],@x[$a2],@x[$b2])",
97	   "&add_32	(@x[$a3],@x[$a3],@x[$b3])",
98	"&eor_32	(@x[$d0],@x[$d0],@x[$a0])",
99	 "&eor_32	(@x[$d1],@x[$d1],@x[$a1])",
100	  "&eor_32	(@x[$d2],@x[$d2],@x[$a2])",
101	   "&eor_32	(@x[$d3],@x[$d3],@x[$a3])",
102	"&ror_32	(@x[$d0],@x[$d0],24)",
103	 "&ror_32	(@x[$d1],@x[$d1],24)",
104	  "&ror_32	(@x[$d2],@x[$d2],24)",
105	   "&ror_32	(@x[$d3],@x[$d3],24)",
106
107	"&add_32	(@x[$c0],@x[$c0],@x[$d0])",
108	 "&add_32	(@x[$c1],@x[$c1],@x[$d1])",
109	  "&add_32	(@x[$c2],@x[$c2],@x[$d2])",
110	   "&add_32	(@x[$c3],@x[$c3],@x[$d3])",
111	"&eor_32	(@x[$b0],@x[$b0],@x[$c0])",
112	 "&eor_32	(@x[$b1],@x[$b1],@x[$c1])",
113	  "&eor_32	(@x[$b2],@x[$b2],@x[$c2])",
114	   "&eor_32	(@x[$b3],@x[$b3],@x[$c3])",
115	"&ror_32	(@x[$b0],@x[$b0],25)",
116	 "&ror_32	(@x[$b1],@x[$b1],25)",
117	  "&ror_32	(@x[$b2],@x[$b2],25)",
118	   "&ror_32	(@x[$b3],@x[$b3],25)"
119    );
120}
121
122$code.=<<___;
123#include <openssl/arm_arch.h>
124
125.section .rodata
126
127.align	5
128.Lsigma:
129.quad	0x3320646e61707865,0x6b20657479622d32		// endian-neutral
130.Lone:
131.long	1,0,0,0
132.asciz	"ChaCha20 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
133
134.text
135
136.globl	ChaCha20_ctr32_nohw
137.type	ChaCha20_ctr32_nohw,%function
138.align	5
139ChaCha20_ctr32_nohw:
140	AARCH64_SIGN_LINK_REGISTER
141	stp	x29,x30,[sp,#-96]!
142	add	x29,sp,#0
143
144	adrp	@x[0],:pg_hi21:.Lsigma
145	add	@x[0],@x[0],:lo12:.Lsigma
146	stp	x19,x20,[sp,#16]
147	stp	x21,x22,[sp,#32]
148	stp	x23,x24,[sp,#48]
149	stp	x25,x26,[sp,#64]
150	stp	x27,x28,[sp,#80]
151	sub	sp,sp,#64
152
153	ldp	@d[0],@d[1],[@x[0]]		// load sigma
154	ldp	@d[2],@d[3],[$key]		// load key
155	ldp	@d[4],@d[5],[$key,#16]
156	ldp	@d[6],@d[7],[$ctr]		// load counter
157#ifdef	__AARCH64EB__
158	ror	@d[2],@d[2],#32
159	ror	@d[3],@d[3],#32
160	ror	@d[4],@d[4],#32
161	ror	@d[5],@d[5],#32
162	ror	@d[6],@d[6],#32
163	ror	@d[7],@d[7],#32
164#endif
165
166.Loop_outer:
167	mov.32	@x[0],@d[0]			// unpack key block
168	lsr	@x[1],@d[0],#32
169	mov.32	@x[2],@d[1]
170	lsr	@x[3],@d[1],#32
171	mov.32	@x[4],@d[2]
172	lsr	@x[5],@d[2],#32
173	mov.32	@x[6],@d[3]
174	lsr	@x[7],@d[3],#32
175	mov.32	@x[8],@d[4]
176	lsr	@x[9],@d[4],#32
177	mov.32	@x[10],@d[5]
178	lsr	@x[11],@d[5],#32
179	mov.32	@x[12],@d[6]
180	lsr	@x[13],@d[6],#32
181	mov.32	@x[14],@d[7]
182	lsr	@x[15],@d[7],#32
183
184	mov	$ctr,#10
185	subs	$len,$len,#64
186.Loop:
187	sub	$ctr,$ctr,#1
188___
189	foreach (&ROUND(0, 4, 8,12)) { eval; }
190	foreach (&ROUND(0, 5,10,15)) { eval; }
191$code.=<<___;
192	cbnz	$ctr,.Loop
193
194	add.32	@x[0],@x[0],@d[0]		// accumulate key block
195	add	@x[1],@x[1],@d[0],lsr#32
196	add.32	@x[2],@x[2],@d[1]
197	add	@x[3],@x[3],@d[1],lsr#32
198	add.32	@x[4],@x[4],@d[2]
199	add	@x[5],@x[5],@d[2],lsr#32
200	add.32	@x[6],@x[6],@d[3]
201	add	@x[7],@x[7],@d[3],lsr#32
202	add.32	@x[8],@x[8],@d[4]
203	add	@x[9],@x[9],@d[4],lsr#32
204	add.32	@x[10],@x[10],@d[5]
205	add	@x[11],@x[11],@d[5],lsr#32
206	add.32	@x[12],@x[12],@d[6]
207	add	@x[13],@x[13],@d[6],lsr#32
208	add.32	@x[14],@x[14],@d[7]
209	add	@x[15],@x[15],@d[7],lsr#32
210
211	b.lo	.Ltail
212
213	add	@x[0],@x[0],@x[1],lsl#32	// pack
214	add	@x[2],@x[2],@x[3],lsl#32
215	ldp	@x[1],@x[3],[$inp,#0]		// load input
216	add	@x[4],@x[4],@x[5],lsl#32
217	add	@x[6],@x[6],@x[7],lsl#32
218	ldp	@x[5],@x[7],[$inp,#16]
219	add	@x[8],@x[8],@x[9],lsl#32
220	add	@x[10],@x[10],@x[11],lsl#32
221	ldp	@x[9],@x[11],[$inp,#32]
222	add	@x[12],@x[12],@x[13],lsl#32
223	add	@x[14],@x[14],@x[15],lsl#32
224	ldp	@x[13],@x[15],[$inp,#48]
225	add	$inp,$inp,#64
226#ifdef	__AARCH64EB__
227	rev	@x[0],@x[0]
228	rev	@x[2],@x[2]
229	rev	@x[4],@x[4]
230	rev	@x[6],@x[6]
231	rev	@x[8],@x[8]
232	rev	@x[10],@x[10]
233	rev	@x[12],@x[12]
234	rev	@x[14],@x[14]
235#endif
236	eor	@x[0],@x[0],@x[1]
237	eor	@x[2],@x[2],@x[3]
238	eor	@x[4],@x[4],@x[5]
239	eor	@x[6],@x[6],@x[7]
240	eor	@x[8],@x[8],@x[9]
241	eor	@x[10],@x[10],@x[11]
242	eor	@x[12],@x[12],@x[13]
243	eor	@x[14],@x[14],@x[15]
244
245	stp	@x[0],@x[2],[$out,#0]		// store output
246	 add	@d[6],@d[6],#1			// increment counter
247	stp	@x[4],@x[6],[$out,#16]
248	stp	@x[8],@x[10],[$out,#32]
249	stp	@x[12],@x[14],[$out,#48]
250	add	$out,$out,#64
251
252	b.hi	.Loop_outer
253
254	ldp	x19,x20,[x29,#16]
255	add	sp,sp,#64
256	ldp	x21,x22,[x29,#32]
257	ldp	x23,x24,[x29,#48]
258	ldp	x25,x26,[x29,#64]
259	ldp	x27,x28,[x29,#80]
260	ldp	x29,x30,[sp],#96
261	AARCH64_VALIDATE_LINK_REGISTER
262	ret
263
264.align	4
265.Ltail:
266	add	$len,$len,#64
267.Less_than_64:
268	sub	$out,$out,#1
269	add	$inp,$inp,$len
270	add	$out,$out,$len
271	add	$ctr,sp,$len
272	neg	$len,$len
273
274	add	@x[0],@x[0],@x[1],lsl#32	// pack
275	add	@x[2],@x[2],@x[3],lsl#32
276	add	@x[4],@x[4],@x[5],lsl#32
277	add	@x[6],@x[6],@x[7],lsl#32
278	add	@x[8],@x[8],@x[9],lsl#32
279	add	@x[10],@x[10],@x[11],lsl#32
280	add	@x[12],@x[12],@x[13],lsl#32
281	add	@x[14],@x[14],@x[15],lsl#32
282#ifdef	__AARCH64EB__
283	rev	@x[0],@x[0]
284	rev	@x[2],@x[2]
285	rev	@x[4],@x[4]
286	rev	@x[6],@x[6]
287	rev	@x[8],@x[8]
288	rev	@x[10],@x[10]
289	rev	@x[12],@x[12]
290	rev	@x[14],@x[14]
291#endif
292	stp	@x[0],@x[2],[sp,#0]
293	stp	@x[4],@x[6],[sp,#16]
294	stp	@x[8],@x[10],[sp,#32]
295	stp	@x[12],@x[14],[sp,#48]
296
297.Loop_tail:
298	ldrb	w10,[$inp,$len]
299	ldrb	w11,[$ctr,$len]
300	add	$len,$len,#1
301	eor	w10,w10,w11
302	strb	w10,[$out,$len]
303	cbnz	$len,.Loop_tail
304
305	stp	xzr,xzr,[sp,#0]
306	stp	xzr,xzr,[sp,#16]
307	stp	xzr,xzr,[sp,#32]
308	stp	xzr,xzr,[sp,#48]
309
310	ldp	x19,x20,[x29,#16]
311	add	sp,sp,#64
312	ldp	x21,x22,[x29,#32]
313	ldp	x23,x24,[x29,#48]
314	ldp	x25,x26,[x29,#64]
315	ldp	x27,x28,[x29,#80]
316	ldp	x29,x30,[sp],#96
317	AARCH64_VALIDATE_LINK_REGISTER
318	ret
319.size	ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
320___
321
322{{{
323my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) =
324    map("v$_.4s",(0..7,16..23));
325my (@K)=map("v$_.4s",(24..30));
326my $ONE="v31.4s";
327
328sub NEONROUND {
329my $odd = pop;
330my ($a,$b,$c,$d,$t)=@_;
331
332	(
333	"&add		('$a','$a','$b')",
334	"&eor		('$d','$d','$a')",
335	"&rev32_16	('$d','$d')",		# vrot ($d,16)
336
337	"&add		('$c','$c','$d')",
338	"&eor		('$t','$b','$c')",
339	"&ushr		('$b','$t',20)",
340	"&sli		('$b','$t',12)",
341
342	"&add		('$a','$a','$b')",
343	"&eor		('$t','$d','$a')",
344	"&ushr		('$d','$t',24)",
345	"&sli		('$d','$t',8)",
346
347	"&add		('$c','$c','$d')",
348	"&eor		('$t','$b','$c')",
349	"&ushr		('$b','$t',25)",
350	"&sli		('$b','$t',7)",
351
352	"&ext		('$c','$c','$c',8)",
353	"&ext		('$d','$d','$d',$odd?4:12)",
354	"&ext		('$b','$b','$b',$odd?12:4)"
355	);
356}
357
358$code.=<<___;
359
360.globl	ChaCha20_ctr32_neon
361.type	ChaCha20_ctr32_neon,%function
362.align	5
363ChaCha20_ctr32_neon:
364	AARCH64_SIGN_LINK_REGISTER
365	stp	x29,x30,[sp,#-96]!
366	add	x29,sp,#0
367
368	adrp	@x[0],:pg_hi21:.Lsigma
369	add	@x[0],@x[0],:lo12:.Lsigma
370	stp	x19,x20,[sp,#16]
371	stp	x21,x22,[sp,#32]
372	stp	x23,x24,[sp,#48]
373	stp	x25,x26,[sp,#64]
374	stp	x27,x28,[sp,#80]
375	cmp	$len,#512
376	b.hs	.L512_or_more_neon
377
378	sub	sp,sp,#64
379
380	ldp	@d[0],@d[1],[@x[0]]		// load sigma
381	ld1	{@K[0]},[@x[0]],#16
382	ldp	@d[2],@d[3],[$key]		// load key
383	ldp	@d[4],@d[5],[$key,#16]
384	ld1	{@K[1],@K[2]},[$key]
385	ldp	@d[6],@d[7],[$ctr]		// load counter
386	ld1	{@K[3]},[$ctr]
387	ld1	{$ONE},[@x[0]]
388#ifdef	__AARCH64EB__
389	rev64	@K[0],@K[0]
390	ror	@d[2],@d[2],#32
391	ror	@d[3],@d[3],#32
392	ror	@d[4],@d[4],#32
393	ror	@d[5],@d[5],#32
394	ror	@d[6],@d[6],#32
395	ror	@d[7],@d[7],#32
396#endif
397	add	@K[3],@K[3],$ONE		// += 1
398	add	@K[4],@K[3],$ONE
399	add	@K[5],@K[4],$ONE
400	shl	$ONE,$ONE,#2			// 1 -> 4
401
402.Loop_outer_neon:
403	mov.32	@x[0],@d[0]			// unpack key block
404	lsr	@x[1],@d[0],#32
405	 mov	$A0,@K[0]
406	mov.32	@x[2],@d[1]
407	lsr	@x[3],@d[1],#32
408	 mov	$A1,@K[0]
409	mov.32	@x[4],@d[2]
410	lsr	@x[5],@d[2],#32
411	 mov	$A2,@K[0]
412	mov.32	@x[6],@d[3]
413	 mov	$B0,@K[1]
414	lsr	@x[7],@d[3],#32
415	 mov	$B1,@K[1]
416	mov.32	@x[8],@d[4]
417	 mov	$B2,@K[1]
418	lsr	@x[9],@d[4],#32
419	 mov	$D0,@K[3]
420	mov.32	@x[10],@d[5]
421	 mov	$D1,@K[4]
422	lsr	@x[11],@d[5],#32
423	 mov	$D2,@K[5]
424	mov.32	@x[12],@d[6]
425	 mov	$C0,@K[2]
426	lsr	@x[13],@d[6],#32
427	 mov	$C1,@K[2]
428	mov.32	@x[14],@d[7]
429	 mov	$C2,@K[2]
430	lsr	@x[15],@d[7],#32
431
432	mov	$ctr,#10
433	subs	$len,$len,#256
434.Loop_neon:
435	sub	$ctr,$ctr,#1
436___
437	my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
438	my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
439	my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
440	my @thread3=&ROUND(0,4,8,12);
441
442	foreach (@thread0) {
443		eval;			eval(shift(@thread3));
444		eval(shift(@thread1));	eval(shift(@thread3));
445		eval(shift(@thread2));	eval(shift(@thread3));
446	}
447
448	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
449	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
450	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
451	@thread3=&ROUND(0,5,10,15);
452
453	foreach (@thread0) {
454		eval;			eval(shift(@thread3));
455		eval(shift(@thread1));	eval(shift(@thread3));
456		eval(shift(@thread2));	eval(shift(@thread3));
457	}
458$code.=<<___;
459	cbnz	$ctr,.Loop_neon
460
461	add.32	@x[0],@x[0],@d[0]		// accumulate key block
462	 add	$A0,$A0,@K[0]
463	add	@x[1],@x[1],@d[0],lsr#32
464	 add	$A1,$A1,@K[0]
465	add.32	@x[2],@x[2],@d[1]
466	 add	$A2,$A2,@K[0]
467	add	@x[3],@x[3],@d[1],lsr#32
468	 add	$C0,$C0,@K[2]
469	add.32	@x[4],@x[4],@d[2]
470	 add	$C1,$C1,@K[2]
471	add	@x[5],@x[5],@d[2],lsr#32
472	 add	$C2,$C2,@K[2]
473	add.32	@x[6],@x[6],@d[3]
474	 add	$D0,$D0,@K[3]
475	add	@x[7],@x[7],@d[3],lsr#32
476	add.32	@x[8],@x[8],@d[4]
477	 add	$D1,$D1,@K[4]
478	add	@x[9],@x[9],@d[4],lsr#32
479	add.32	@x[10],@x[10],@d[5]
480	 add	$D2,$D2,@K[5]
481	add	@x[11],@x[11],@d[5],lsr#32
482	add.32	@x[12],@x[12],@d[6]
483	 add	$B0,$B0,@K[1]
484	add	@x[13],@x[13],@d[6],lsr#32
485	add.32	@x[14],@x[14],@d[7]
486	 add	$B1,$B1,@K[1]
487	add	@x[15],@x[15],@d[7],lsr#32
488	 add	$B2,$B2,@K[1]
489
490	b.lo	.Ltail_neon
491
492	add	@x[0],@x[0],@x[1],lsl#32	// pack
493	add	@x[2],@x[2],@x[3],lsl#32
494	ldp	@x[1],@x[3],[$inp,#0]		// load input
495	add	@x[4],@x[4],@x[5],lsl#32
496	add	@x[6],@x[6],@x[7],lsl#32
497	ldp	@x[5],@x[7],[$inp,#16]
498	add	@x[8],@x[8],@x[9],lsl#32
499	add	@x[10],@x[10],@x[11],lsl#32
500	ldp	@x[9],@x[11],[$inp,#32]
501	add	@x[12],@x[12],@x[13],lsl#32
502	add	@x[14],@x[14],@x[15],lsl#32
503	ldp	@x[13],@x[15],[$inp,#48]
504	add	$inp,$inp,#64
505#ifdef	__AARCH64EB__
506	rev	@x[0],@x[0]
507	rev	@x[2],@x[2]
508	rev	@x[4],@x[4]
509	rev	@x[6],@x[6]
510	rev	@x[8],@x[8]
511	rev	@x[10],@x[10]
512	rev	@x[12],@x[12]
513	rev	@x[14],@x[14]
514#endif
515	ld1.8	{$T0-$T3},[$inp],#64
516	eor	@x[0],@x[0],@x[1]
517	eor	@x[2],@x[2],@x[3]
518	eor	@x[4],@x[4],@x[5]
519	eor	@x[6],@x[6],@x[7]
520	eor	@x[8],@x[8],@x[9]
521	 eor	$A0,$A0,$T0
522	eor	@x[10],@x[10],@x[11]
523	 eor	$B0,$B0,$T1
524	eor	@x[12],@x[12],@x[13]
525	 eor	$C0,$C0,$T2
526	eor	@x[14],@x[14],@x[15]
527	 eor	$D0,$D0,$T3
528	 ld1.8	{$T0-$T3},[$inp],#64
529
530	stp	@x[0],@x[2],[$out,#0]		// store output
531	 add	@d[6],@d[6],#4			// increment counter
532	stp	@x[4],@x[6],[$out,#16]
533	 add	@K[3],@K[3],$ONE		// += 4
534	stp	@x[8],@x[10],[$out,#32]
535	 add	@K[4],@K[4],$ONE
536	stp	@x[12],@x[14],[$out,#48]
537	 add	@K[5],@K[5],$ONE
538	add	$out,$out,#64
539
540	st1.8	{$A0-$D0},[$out],#64
541	ld1.8	{$A0-$D0},[$inp],#64
542
543	eor	$A1,$A1,$T0
544	eor	$B1,$B1,$T1
545	eor	$C1,$C1,$T2
546	eor	$D1,$D1,$T3
547	st1.8	{$A1-$D1},[$out],#64
548
549	eor	$A2,$A2,$A0
550	eor	$B2,$B2,$B0
551	eor	$C2,$C2,$C0
552	eor	$D2,$D2,$D0
553	st1.8	{$A2-$D2},[$out],#64
554
555	b.hi	.Loop_outer_neon
556
557	ldp	x19,x20,[x29,#16]
558	add	sp,sp,#64
559	ldp	x21,x22,[x29,#32]
560	ldp	x23,x24,[x29,#48]
561	ldp	x25,x26,[x29,#64]
562	ldp	x27,x28,[x29,#80]
563	ldp	x29,x30,[sp],#96
564	AARCH64_VALIDATE_LINK_REGISTER
565	ret
566
567.Ltail_neon:
568	add	$len,$len,#256
569	cmp	$len,#64
570	b.lo	.Less_than_64
571
572	add	@x[0],@x[0],@x[1],lsl#32	// pack
573	add	@x[2],@x[2],@x[3],lsl#32
574	ldp	@x[1],@x[3],[$inp,#0]		// load input
575	add	@x[4],@x[4],@x[5],lsl#32
576	add	@x[6],@x[6],@x[7],lsl#32
577	ldp	@x[5],@x[7],[$inp,#16]
578	add	@x[8],@x[8],@x[9],lsl#32
579	add	@x[10],@x[10],@x[11],lsl#32
580	ldp	@x[9],@x[11],[$inp,#32]
581	add	@x[12],@x[12],@x[13],lsl#32
582	add	@x[14],@x[14],@x[15],lsl#32
583	ldp	@x[13],@x[15],[$inp,#48]
584	add	$inp,$inp,#64
585#ifdef	__AARCH64EB__
586	rev	@x[0],@x[0]
587	rev	@x[2],@x[2]
588	rev	@x[4],@x[4]
589	rev	@x[6],@x[6]
590	rev	@x[8],@x[8]
591	rev	@x[10],@x[10]
592	rev	@x[12],@x[12]
593	rev	@x[14],@x[14]
594#endif
595	eor	@x[0],@x[0],@x[1]
596	eor	@x[2],@x[2],@x[3]
597	eor	@x[4],@x[4],@x[5]
598	eor	@x[6],@x[6],@x[7]
599	eor	@x[8],@x[8],@x[9]
600	eor	@x[10],@x[10],@x[11]
601	eor	@x[12],@x[12],@x[13]
602	eor	@x[14],@x[14],@x[15]
603
604	stp	@x[0],@x[2],[$out,#0]		// store output
605	 add	@d[6],@d[6],#4			// increment counter
606	stp	@x[4],@x[6],[$out,#16]
607	stp	@x[8],@x[10],[$out,#32]
608	stp	@x[12],@x[14],[$out,#48]
609	add	$out,$out,#64
610	b.eq	.Ldone_neon
611	sub	$len,$len,#64
612	cmp	$len,#64
613	b.lo	.Less_than_128
614
615	ld1.8	{$T0-$T3},[$inp],#64
616	eor	$A0,$A0,$T0
617	eor	$B0,$B0,$T1
618	eor	$C0,$C0,$T2
619	eor	$D0,$D0,$T3
620	st1.8	{$A0-$D0},[$out],#64
621	b.eq	.Ldone_neon
622	sub	$len,$len,#64
623	cmp	$len,#64
624	b.lo	.Less_than_192
625
626	ld1.8	{$T0-$T3},[$inp],#64
627	eor	$A1,$A1,$T0
628	eor	$B1,$B1,$T1
629	eor	$C1,$C1,$T2
630	eor	$D1,$D1,$T3
631	st1.8	{$A1-$D1},[$out],#64
632	b.eq	.Ldone_neon
633	sub	$len,$len,#64
634
635	st1.8	{$A2-$D2},[sp]
636	b	.Last_neon
637
638.Less_than_128:
639	st1.8	{$A0-$D0},[sp]
640	b	.Last_neon
641.Less_than_192:
642	st1.8	{$A1-$D1},[sp]
643	b	.Last_neon
644
645.align	4
646.Last_neon:
647	sub	$out,$out,#1
648	add	$inp,$inp,$len
649	add	$out,$out,$len
650	add	$ctr,sp,$len
651	neg	$len,$len
652
653.Loop_tail_neon:
654	ldrb	w10,[$inp,$len]
655	ldrb	w11,[$ctr,$len]
656	add	$len,$len,#1
657	eor	w10,w10,w11
658	strb	w10,[$out,$len]
659	cbnz	$len,.Loop_tail_neon
660
661	stp	xzr,xzr,[sp,#0]
662	stp	xzr,xzr,[sp,#16]
663	stp	xzr,xzr,[sp,#32]
664	stp	xzr,xzr,[sp,#48]
665
666.Ldone_neon:
667	ldp	x19,x20,[x29,#16]
668	add	sp,sp,#64
669	ldp	x21,x22,[x29,#32]
670	ldp	x23,x24,[x29,#48]
671	ldp	x25,x26,[x29,#64]
672	ldp	x27,x28,[x29,#80]
673	ldp	x29,x30,[sp],#96
674	AARCH64_VALIDATE_LINK_REGISTER
675	ret
676.size	ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon
677___
678{
679my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
680my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
681    $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23));
682
683$code.=<<___;
684.type	ChaCha20_512_neon,%function
685.align	5
686ChaCha20_512_neon:
687	AARCH64_SIGN_LINK_REGISTER
688	stp	x29,x30,[sp,#-96]!
689	add	x29,sp,#0
690
691	adrp	@x[0],:pg_hi21:.Lsigma
692	add	@x[0],@x[0],:lo12:.Lsigma
693	stp	x19,x20,[sp,#16]
694	stp	x21,x22,[sp,#32]
695	stp	x23,x24,[sp,#48]
696	stp	x25,x26,[sp,#64]
697	stp	x27,x28,[sp,#80]
698
699.L512_or_more_neon:
700	sub	sp,sp,#128+64
701
702	ldp	@d[0],@d[1],[@x[0]]		// load sigma
703	ld1	{@K[0]},[@x[0]],#16
704	ldp	@d[2],@d[3],[$key]		// load key
705	ldp	@d[4],@d[5],[$key,#16]
706	ld1	{@K[1],@K[2]},[$key]
707	ldp	@d[6],@d[7],[$ctr]		// load counter
708	ld1	{@K[3]},[$ctr]
709	ld1	{$ONE},[@x[0]]
710#ifdef	__AARCH64EB__
711	rev64	@K[0],@K[0]
712	ror	@d[2],@d[2],#32
713	ror	@d[3],@d[3],#32
714	ror	@d[4],@d[4],#32
715	ror	@d[5],@d[5],#32
716	ror	@d[6],@d[6],#32
717	ror	@d[7],@d[7],#32
718#endif
719	add	@K[3],@K[3],$ONE		// += 1
720	stp	@K[0],@K[1],[sp,#0]		// off-load key block, invariant part
721	add	@K[3],@K[3],$ONE		// not typo
722	str	@K[2],[sp,#32]
723	add	@K[4],@K[3],$ONE
724	add	@K[5],@K[4],$ONE
725	add	@K[6],@K[5],$ONE
726	shl	$ONE,$ONE,#2			// 1 -> 4
727
728	stp	d8,d9,[sp,#128+0]		// meet ABI requirements
729	stp	d10,d11,[sp,#128+16]
730	stp	d12,d13,[sp,#128+32]
731	stp	d14,d15,[sp,#128+48]
732
733	sub	$len,$len,#512			// not typo
734
735.Loop_outer_512_neon:
736	 mov	$A0,@K[0]
737	 mov	$A1,@K[0]
738	 mov	$A2,@K[0]
739	 mov	$A3,@K[0]
740	 mov	$A4,@K[0]
741	 mov	$A5,@K[0]
742	 mov	$B0,@K[1]
743	mov.32	@x[0],@d[0]			// unpack key block
744	 mov	$B1,@K[1]
745	lsr	@x[1],@d[0],#32
746	 mov	$B2,@K[1]
747	mov.32	@x[2],@d[1]
748	 mov	$B3,@K[1]
749	lsr	@x[3],@d[1],#32
750	 mov	$B4,@K[1]
751	mov.32	@x[4],@d[2]
752	 mov	$B5,@K[1]
753	lsr	@x[5],@d[2],#32
754	 mov	$D0,@K[3]
755	mov.32	@x[6],@d[3]
756	 mov	$D1,@K[4]
757	lsr	@x[7],@d[3],#32
758	 mov	$D2,@K[5]
759	mov.32	@x[8],@d[4]
760	 mov	$D3,@K[6]
761	lsr	@x[9],@d[4],#32
762	 mov	$C0,@K[2]
763	mov.32	@x[10],@d[5]
764	 mov	$C1,@K[2]
765	lsr	@x[11],@d[5],#32
766	 add	$D4,$D0,$ONE			// +4
767	mov.32	@x[12],@d[6]
768	 add	$D5,$D1,$ONE			// +4
769	lsr	@x[13],@d[6],#32
770	 mov	$C2,@K[2]
771	mov.32	@x[14],@d[7]
772	 mov	$C3,@K[2]
773	lsr	@x[15],@d[7],#32
774	 mov	$C4,@K[2]
775	 stp	@K[3],@K[4],[sp,#48]		// off-load key block, variable part
776	 mov	$C5,@K[2]
777	 str	@K[5],[sp,#80]
778
779	mov	$ctr,#5
780	subs	$len,$len,#512
781.Loop_upper_neon:
782	sub	$ctr,$ctr,#1
783___
784	my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
785	my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
786	my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
787	my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
788	my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
789	my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
790	my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
791	my $diff = ($#thread0+1)*6 - $#thread67 - 1;
792	my $i = 0;
793
794	foreach (@thread0) {
795		eval;			eval(shift(@thread67));
796		eval(shift(@thread1));	eval(shift(@thread67));
797		eval(shift(@thread2));	eval(shift(@thread67));
798		eval(shift(@thread3));	eval(shift(@thread67));
799		eval(shift(@thread4));	eval(shift(@thread67));
800		eval(shift(@thread5));	eval(shift(@thread67));
801	}
802
803	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
804	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
805	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
806	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
807	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
808	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
809	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
810
811	foreach (@thread0) {
812		eval;			eval(shift(@thread67));
813		eval(shift(@thread1));	eval(shift(@thread67));
814		eval(shift(@thread2));	eval(shift(@thread67));
815		eval(shift(@thread3));	eval(shift(@thread67));
816		eval(shift(@thread4));	eval(shift(@thread67));
817		eval(shift(@thread5));	eval(shift(@thread67));
818	}
819$code.=<<___;
820	cbnz	$ctr,.Loop_upper_neon
821
822	add.32	@x[0],@x[0],@d[0]		// accumulate key block
823	add	@x[1],@x[1],@d[0],lsr#32
824	add.32	@x[2],@x[2],@d[1]
825	add	@x[3],@x[3],@d[1],lsr#32
826	add.32	@x[4],@x[4],@d[2]
827	add	@x[5],@x[5],@d[2],lsr#32
828	add.32	@x[6],@x[6],@d[3]
829	add	@x[7],@x[7],@d[3],lsr#32
830	add.32	@x[8],@x[8],@d[4]
831	add	@x[9],@x[9],@d[4],lsr#32
832	add.32	@x[10],@x[10],@d[5]
833	add	@x[11],@x[11],@d[5],lsr#32
834	add.32	@x[12],@x[12],@d[6]
835	add	@x[13],@x[13],@d[6],lsr#32
836	add.32	@x[14],@x[14],@d[7]
837	add	@x[15],@x[15],@d[7],lsr#32
838
839	add	@x[0],@x[0],@x[1],lsl#32	// pack
840	add	@x[2],@x[2],@x[3],lsl#32
841	ldp	@x[1],@x[3],[$inp,#0]		// load input
842	add	@x[4],@x[4],@x[5],lsl#32
843	add	@x[6],@x[6],@x[7],lsl#32
844	ldp	@x[5],@x[7],[$inp,#16]
845	add	@x[8],@x[8],@x[9],lsl#32
846	add	@x[10],@x[10],@x[11],lsl#32
847	ldp	@x[9],@x[11],[$inp,#32]
848	add	@x[12],@x[12],@x[13],lsl#32
849	add	@x[14],@x[14],@x[15],lsl#32
850	ldp	@x[13],@x[15],[$inp,#48]
851	add	$inp,$inp,#64
852#ifdef	__AARCH64EB__
853	rev	@x[0],@x[0]
854	rev	@x[2],@x[2]
855	rev	@x[4],@x[4]
856	rev	@x[6],@x[6]
857	rev	@x[8],@x[8]
858	rev	@x[10],@x[10]
859	rev	@x[12],@x[12]
860	rev	@x[14],@x[14]
861#endif
862	eor	@x[0],@x[0],@x[1]
863	eor	@x[2],@x[2],@x[3]
864	eor	@x[4],@x[4],@x[5]
865	eor	@x[6],@x[6],@x[7]
866	eor	@x[8],@x[8],@x[9]
867	eor	@x[10],@x[10],@x[11]
868	eor	@x[12],@x[12],@x[13]
869	eor	@x[14],@x[14],@x[15]
870
871	 stp	@x[0],@x[2],[$out,#0]		// store output
872	 add	@d[6],@d[6],#1			// increment counter
873	mov.32	@x[0],@d[0]			// unpack key block
874	lsr	@x[1],@d[0],#32
875	 stp	@x[4],@x[6],[$out,#16]
876	mov.32	@x[2],@d[1]
877	lsr	@x[3],@d[1],#32
878	 stp	@x[8],@x[10],[$out,#32]
879	mov.32	@x[4],@d[2]
880	lsr	@x[5],@d[2],#32
881	 stp	@x[12],@x[14],[$out,#48]
882	 add	$out,$out,#64
883	mov.32	@x[6],@d[3]
884	lsr	@x[7],@d[3],#32
885	mov.32	@x[8],@d[4]
886	lsr	@x[9],@d[4],#32
887	mov.32	@x[10],@d[5]
888	lsr	@x[11],@d[5],#32
889	mov.32	@x[12],@d[6]
890	lsr	@x[13],@d[6],#32
891	mov.32	@x[14],@d[7]
892	lsr	@x[15],@d[7],#32
893
894	mov	$ctr,#5
895.Loop_lower_neon:
896	sub	$ctr,$ctr,#1
897___
898	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
899	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
900	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
901	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
902	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
903	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
904	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
905
906	foreach (@thread0) {
907		eval;			eval(shift(@thread67));
908		eval(shift(@thread1));	eval(shift(@thread67));
909		eval(shift(@thread2));	eval(shift(@thread67));
910		eval(shift(@thread3));	eval(shift(@thread67));
911		eval(shift(@thread4));	eval(shift(@thread67));
912		eval(shift(@thread5));	eval(shift(@thread67));
913	}
914
915	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
916	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
917	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
918	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
919	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
920	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
921	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
922
923	foreach (@thread0) {
924		eval;			eval(shift(@thread67));
925		eval(shift(@thread1));	eval(shift(@thread67));
926		eval(shift(@thread2));	eval(shift(@thread67));
927		eval(shift(@thread3));	eval(shift(@thread67));
928		eval(shift(@thread4));	eval(shift(@thread67));
929		eval(shift(@thread5));	eval(shift(@thread67));
930	}
931$code.=<<___;
932	cbnz	$ctr,.Loop_lower_neon
933
934	add.32	@x[0],@x[0],@d[0]		// accumulate key block
935	 ldp	@K[0],@K[1],[sp,#0]
936	add	@x[1],@x[1],@d[0],lsr#32
937	 ldp	@K[2],@K[3],[sp,#32]
938	add.32	@x[2],@x[2],@d[1]
939	 ldp	@K[4],@K[5],[sp,#64]
940	add	@x[3],@x[3],@d[1],lsr#32
941	 add	$A0,$A0,@K[0]
942	add.32	@x[4],@x[4],@d[2]
943	 add	$A1,$A1,@K[0]
944	add	@x[5],@x[5],@d[2],lsr#32
945	 add	$A2,$A2,@K[0]
946	add.32	@x[6],@x[6],@d[3]
947	 add	$A3,$A3,@K[0]
948	add	@x[7],@x[7],@d[3],lsr#32
949	 add	$A4,$A4,@K[0]
950	add.32	@x[8],@x[8],@d[4]
951	 add	$A5,$A5,@K[0]
952	add	@x[9],@x[9],@d[4],lsr#32
953	 add	$C0,$C0,@K[2]
954	add.32	@x[10],@x[10],@d[5]
955	 add	$C1,$C1,@K[2]
956	add	@x[11],@x[11],@d[5],lsr#32
957	 add	$C2,$C2,@K[2]
958	add.32	@x[12],@x[12],@d[6]
959	 add	$C3,$C3,@K[2]
960	add	@x[13],@x[13],@d[6],lsr#32
961	 add	$C4,$C4,@K[2]
962	add.32	@x[14],@x[14],@d[7]
963	 add	$C5,$C5,@K[2]
964	add	@x[15],@x[15],@d[7],lsr#32
965	 add	$D4,$D4,$ONE			// +4
966	add	@x[0],@x[0],@x[1],lsl#32	// pack
967	 add	$D5,$D5,$ONE			// +4
968	add	@x[2],@x[2],@x[3],lsl#32
969	 add	$D0,$D0,@K[3]
970	ldp	@x[1],@x[3],[$inp,#0]		// load input
971	 add	$D1,$D1,@K[4]
972	add	@x[4],@x[4],@x[5],lsl#32
973	 add	$D2,$D2,@K[5]
974	add	@x[6],@x[6],@x[7],lsl#32
975	 add	$D3,$D3,@K[6]
976	ldp	@x[5],@x[7],[$inp,#16]
977	 add	$D4,$D4,@K[3]
978	add	@x[8],@x[8],@x[9],lsl#32
979	 add	$D5,$D5,@K[4]
980	add	@x[10],@x[10],@x[11],lsl#32
981	 add	$B0,$B0,@K[1]
982	ldp	@x[9],@x[11],[$inp,#32]
983	 add	$B1,$B1,@K[1]
984	add	@x[12],@x[12],@x[13],lsl#32
985	 add	$B2,$B2,@K[1]
986	add	@x[14],@x[14],@x[15],lsl#32
987	 add	$B3,$B3,@K[1]
988	ldp	@x[13],@x[15],[$inp,#48]
989	 add	$B4,$B4,@K[1]
990	add	$inp,$inp,#64
991	 add	$B5,$B5,@K[1]
992
993#ifdef	__AARCH64EB__
994	rev	@x[0],@x[0]
995	rev	@x[2],@x[2]
996	rev	@x[4],@x[4]
997	rev	@x[6],@x[6]
998	rev	@x[8],@x[8]
999	rev	@x[10],@x[10]
1000	rev	@x[12],@x[12]
1001	rev	@x[14],@x[14]
1002#endif
1003	ld1.8	{$T0-$T3},[$inp],#64
1004	eor	@x[0],@x[0],@x[1]
1005	eor	@x[2],@x[2],@x[3]
1006	eor	@x[4],@x[4],@x[5]
1007	eor	@x[6],@x[6],@x[7]
1008	eor	@x[8],@x[8],@x[9]
1009	 eor	$A0,$A0,$T0
1010	eor	@x[10],@x[10],@x[11]
1011	 eor	$B0,$B0,$T1
1012	eor	@x[12],@x[12],@x[13]
1013	 eor	$C0,$C0,$T2
1014	eor	@x[14],@x[14],@x[15]
1015	 eor	$D0,$D0,$T3
1016	 ld1.8	{$T0-$T3},[$inp],#64
1017
1018	stp	@x[0],@x[2],[$out,#0]		// store output
1019	 add	@d[6],@d[6],#7			// increment counter
1020	stp	@x[4],@x[6],[$out,#16]
1021	stp	@x[8],@x[10],[$out,#32]
1022	stp	@x[12],@x[14],[$out,#48]
1023	add	$out,$out,#64
1024	st1.8	{$A0-$D0},[$out],#64
1025
1026	ld1.8	{$A0-$D0},[$inp],#64
1027	eor	$A1,$A1,$T0
1028	eor	$B1,$B1,$T1
1029	eor	$C1,$C1,$T2
1030	eor	$D1,$D1,$T3
1031	st1.8	{$A1-$D1},[$out],#64
1032
1033	ld1.8	{$A1-$D1},[$inp],#64
1034	eor	$A2,$A2,$A0
1035	 ldp	@K[0],@K[1],[sp,#0]
1036	eor	$B2,$B2,$B0
1037	 ldp	@K[2],@K[3],[sp,#32]
1038	eor	$C2,$C2,$C0
1039	eor	$D2,$D2,$D0
1040	st1.8	{$A2-$D2},[$out],#64
1041
1042	ld1.8	{$A2-$D2},[$inp],#64
1043	eor	$A3,$A3,$A1
1044	eor	$B3,$B3,$B1
1045	eor	$C3,$C3,$C1
1046	eor	$D3,$D3,$D1
1047	st1.8	{$A3-$D3},[$out],#64
1048
1049	ld1.8	{$A3-$D3},[$inp],#64
1050	eor	$A4,$A4,$A2
1051	eor	$B4,$B4,$B2
1052	eor	$C4,$C4,$C2
1053	eor	$D4,$D4,$D2
1054	st1.8	{$A4-$D4},[$out],#64
1055
1056	shl	$A0,$ONE,#1			// 4 -> 8
1057	eor	$A5,$A5,$A3
1058	eor	$B5,$B5,$B3
1059	eor	$C5,$C5,$C3
1060	eor	$D5,$D5,$D3
1061	st1.8	{$A5-$D5},[$out],#64
1062
1063	add	@K[3],@K[3],$A0			// += 8
1064	add	@K[4],@K[4],$A0
1065	add	@K[5],@K[5],$A0
1066	add	@K[6],@K[6],$A0
1067
1068	b.hs	.Loop_outer_512_neon
1069
1070	adds	$len,$len,#512
1071	ushr	$A0,$ONE,#2			// 4 -> 1
1072
1073	ldp	d8,d9,[sp,#128+0]		// meet ABI requirements
1074	ldp	d10,d11,[sp,#128+16]
1075	ldp	d12,d13,[sp,#128+32]
1076	ldp	d14,d15,[sp,#128+48]
1077
1078	stp	@K[0],$ONE,[sp,#0]		// wipe off-load area
1079	stp	@K[0],$ONE,[sp,#32]
1080	stp	@K[0],$ONE,[sp,#64]
1081
1082	b.eq	.Ldone_512_neon
1083
1084	cmp	$len,#192
1085	sub	@K[3],@K[3],$A0			// -= 1
1086	sub	@K[4],@K[4],$A0
1087	sub	@K[5],@K[5],$A0
1088	add	sp,sp,#128
1089	b.hs	.Loop_outer_neon
1090
1091	eor	@K[1],@K[1],@K[1]
1092	eor	@K[2],@K[2],@K[2]
1093	eor	@K[3],@K[3],@K[3]
1094	eor	@K[4],@K[4],@K[4]
1095	eor	@K[5],@K[5],@K[5]
1096	eor	@K[6],@K[6],@K[6]
1097	b	.Loop_outer
1098
1099.Ldone_512_neon:
1100	ldp	x19,x20,[x29,#16]
1101	add	sp,sp,#128+64
1102	ldp	x21,x22,[x29,#32]
1103	ldp	x23,x24,[x29,#48]
1104	ldp	x25,x26,[x29,#64]
1105	ldp	x27,x28,[x29,#80]
1106	ldp	x29,x30,[sp],#96
1107	AARCH64_VALIDATE_LINK_REGISTER
1108	ret
1109.size	ChaCha20_512_neon,.-ChaCha20_512_neon
1110___
1111}
1112}}}
1113
1114foreach (split("\n",$code)) {
1115	s/\`([^\`]*)\`/eval $1/geo;
1116
1117	(s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1))	or
1118	(m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1))	or
1119	(s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1))	or
1120	(m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1))	or
1121	(s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
1122
1123	#s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1124
1125	print $_,"\n";
1126}
1127close STDOUT or die "error closing STDOUT: $!";	# flush
1128