xref: /aosp_15_r20/external/boringssl/src/crypto/fipsmodule/sha/asm/sha256-586.pl (revision 8fb009dc861624b67b6cdb62ea21f0f22d0c584b)
1#! /usr/bin/env perl
2# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# SHA256 block transform for x86. September 2007.
18#
19# Performance improvement over compiler generated code varies from
20# 10% to 40% [see below]. Not very impressive on some µ-archs, but
21# it's 5 times smaller and optimizes amount of writes.
22#
23# May 2012.
24#
25# Optimization including two of Pavel Semjanov's ideas, alternative
26# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
27# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
28# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
29# on P4, where it kills performance, nor Sandy Bridge, where folded
30# loop is approximately as fast...
31#
32# June 2012.
33#
34# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
35# May version, >60% over original. Add AVX+shrd code path, >25%
36# improvement on Sandy Bridge over May version, 60% over original.
37#
38# May 2013.
39#
40# Replace AMD XOP code path with SSSE3 to cover more processors.
41# (Biggest improvement coefficient is on upcoming Atom Silvermont,
42# not shown.) Add AVX+BMI code path.
43#
44# March 2014.
45#
46# Add support for Intel SHA Extensions.
47#
48# Performance in clock cycles per processed byte (less is better):
49#
50#		gcc	icc	x86 asm(*)	SIMD	x86_64 asm(**)
51# Pentium	46	57	40/38		-	-
52# PIII		36	33	27/24		-	-
53# P4		41	38	28		-	17.3
54# AMD K8	27	25	19/15.5		-	14.9
55# Core2		26	23	18/15.6		14.3	13.8
56# Westmere	27	-	19/15.7		13.4	12.3
57# Sandy Bridge	25	-	15.9		12.4	11.6
58# Ivy Bridge	24	-	15.0		11.4	10.3
59# Haswell	22	-	13.9		9.46	7.80
60# Skylake	20	-	14.9		9.50	7.70
61# Bulldozer	36	-	27/22		17.0	13.6
62# VIA Nano	36	-	25/22		16.8	16.5
63# Atom		50	-	30/25		21.9	18.9
64# Silvermont	40	-	34/31		22.9	20.6
65# Goldmont	29	-	20		16.3(***)
66#
67# (*)	numbers after slash are for unrolled loop, where applicable;
68# (**)	x86_64 assembly performance is presented for reference
69#	purposes, results are best-available;
70# (***)	SHAEXT result is 4.1, strangely enough better than 64-bit one;
71
72$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
73push(@INC,"${dir}","${dir}../../../perlasm");
74require "x86asm.pl";
75
76$output=pop;
77open STDOUT,">$output";
78
79&asm_init($ARGV[0]);
80
81$xmm = 1;
82
83# In upstream, this is controlled by shelling out to the compiler to check
84# versions, but BoringSSL is intended to be used with pre-generated perlasm
85# output, so this isn't useful anyway.
86#
87# TODO(davidben): Enable AVX+BMI2 code after testing by setting $avx to 2.
88$avx = 1;
89
90$avx = 0 unless ($xmm);
91
92$shaext=$xmm;	### set to zero if compiling for 1.0.1
93
94# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's
95# been tested.
96$shaext = 0;
97
98$unroll_after = 64*4;	# If pre-evicted from L1P cache first spin of
99			# fully unrolled loop was measured to run about
100			# 3-4x slower. If slowdown coefficient is N and
101			# unrolled loop is m times faster, then you break
102			# even at (N-1)/(m-1) blocks. Then it needs to be
103			# adjusted for probability of code being evicted,
104			# code size/cache size=1/4. Typical m is 1.15...
105
106$A="eax";
107$E="edx";
108$T="ebx";
109$Aoff=&DWP(4,"esp");
110$Boff=&DWP(8,"esp");
111$Coff=&DWP(12,"esp");
112$Doff=&DWP(16,"esp");
113$Eoff=&DWP(20,"esp");
114$Foff=&DWP(24,"esp");
115$Goff=&DWP(28,"esp");
116$Hoff=&DWP(32,"esp");
117$Xoff=&DWP(36,"esp");
118$K256="ebp";
119
120sub BODY_16_63() {
121	&mov	($T,"ecx");			# "ecx" is preloaded
122	 &mov	("esi",&DWP(4*(9+15+16-14),"esp"));
123	&ror	("ecx",18-7);
124	 &mov	("edi","esi");
125	&ror	("esi",19-17);
126	 &xor	("ecx",$T);
127	 &shr	($T,3);
128	&ror	("ecx",7);
129	 &xor	("esi","edi");
130	 &xor	($T,"ecx");			# T = sigma0(X[-15])
131	&ror	("esi",17);
132	 &add	($T,&DWP(4*(9+15+16),"esp"));	# T += X[-16]
133	&shr	("edi",10);
134	 &add	($T,&DWP(4*(9+15+16-9),"esp"));	# T += X[-7]
135	#&xor	("edi","esi")			# sigma1(X[-2])
136	# &add	($T,"edi");			# T += sigma1(X[-2])
137	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
138
139	&BODY_00_15(1);
140}
141sub BODY_00_15() {
142    my $in_16_63=shift;
143
144	&mov	("ecx",$E);
145	 &xor	("edi","esi")			if ($in_16_63);	# sigma1(X[-2])
146	 &mov	("esi",$Foff);
147	&ror	("ecx",25-11);
148	 &add	($T,"edi")			if ($in_16_63);	# T += sigma1(X[-2])
149	 &mov	("edi",$Goff);
150	&xor	("ecx",$E);
151	 &xor	("esi","edi");
152	 &mov	($T,&DWP(4*(9+15),"esp"))	if (!$in_16_63);
153	 &mov	(&DWP(4*(9+15),"esp"),$T)	if ($in_16_63);	# save X[0]
154	&ror	("ecx",11-6);
155	 &and	("esi",$E);
156	 &mov	($Eoff,$E);		# modulo-scheduled
157	&xor	($E,"ecx");
158	 &add	($T,$Hoff);		# T += h
159	 &xor	("esi","edi");		# Ch(e,f,g)
160	&ror	($E,6);			# Sigma1(e)
161	 &mov	("ecx",$A);
162	 &add	($T,"esi");		# T += Ch(e,f,g)
163
164	&ror	("ecx",22-13);
165	 &add	($T,$E);		# T += Sigma1(e)
166	 &mov	("edi",$Boff);
167	&xor	("ecx",$A);
168	 &mov	($Aoff,$A);		# modulo-scheduled
169	 &lea	("esp",&DWP(-4,"esp"));
170	&ror	("ecx",13-2);
171	 &mov	("esi",&DWP(0,$K256));
172	&xor	("ecx",$A);
173	 &mov	($E,$Eoff);		# e in next iteration, d in this one
174	 &xor	($A,"edi");		# a ^= b
175	&ror	("ecx",2);		# Sigma0(a)
176
177	 &add	($T,"esi");		# T+= K[i]
178	 &mov	(&DWP(0,"esp"),$A);	# (b^c) in next round
179	&add	($E,$T);		# d += T
180	 &and	($A,&DWP(4,"esp"));	# a &= (b^c)
181	&add	($T,"ecx");		# T += Sigma0(a)
182	 &xor	($A,"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
183	 &mov	("ecx",&DWP(4*(9+15+16-1),"esp"))	if ($in_16_63);	# preload T
184	&add	($K256,4);
185	 &add	($A,$T);		# h += T
186}
187
188&static_label("K256");
189
190&function_begin("sha256_block_data_order_nohw");
191	&mov	("esi",wparam(0));	# ctx
192	&mov	("edi",wparam(1));	# inp
193	&mov	("eax",wparam(2));	# num
194	&mov	("ebx","esp");		# saved sp
195
196	&call	(&label("pic_point"));	# make it PIC!
197&set_label("pic_point");
198	&blindpop($K256);
199	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
200
201	&sub	("esp",16);
202	&and	("esp",-64);
203
204	&shl	("eax",6);
205	&add	("eax","edi");
206	&mov	(&DWP(0,"esp"),"esi");	# ctx
207	&mov	(&DWP(4,"esp"),"edi");	# inp
208	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
209	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
210						if (!$i386 && $xmm) {
211						if ($unroll_after) {
212&set_label("no_xmm");
213	&sub	("eax","edi");
214	&cmp	("eax",$unroll_after);
215	&jae	(&label("unrolled"));
216						} }
217	&jmp	(&label("loop"));
218
219sub COMPACT_LOOP() {
220my $suffix=shift;
221
222&set_label("loop$suffix",$suffix?32:16);
223    # copy input block to stack reversing byte and dword order
224    for($i=0;$i<4;$i++) {
225	&mov	("eax",&DWP($i*16+0,"edi"));
226	&mov	("ebx",&DWP($i*16+4,"edi"));
227	&mov	("ecx",&DWP($i*16+8,"edi"));
228	&bswap	("eax");
229	&mov	("edx",&DWP($i*16+12,"edi"));
230	&bswap	("ebx");
231	&push	("eax");
232	&bswap	("ecx");
233	&push	("ebx");
234	&bswap	("edx");
235	&push	("ecx");
236	&push	("edx");
237    }
238	&add	("edi",64);
239	&lea	("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
240	&mov	(&DWP(4*(9+16)+4,"esp"),"edi");
241
242	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
243	&mov	($A,&DWP(0,"esi"));
244	&mov	("ebx",&DWP(4,"esi"));
245	&mov	("ecx",&DWP(8,"esi"));
246	&mov	("edi",&DWP(12,"esi"));
247	# &mov	($Aoff,$A);
248	&mov	($Boff,"ebx");
249	&xor	("ebx","ecx");
250	&mov	($Coff,"ecx");
251	&mov	($Doff,"edi");
252	&mov	(&DWP(0,"esp"),"ebx");	# magic
253	&mov	($E,&DWP(16,"esi"));
254	&mov	("ebx",&DWP(20,"esi"));
255	&mov	("ecx",&DWP(24,"esi"));
256	&mov	("edi",&DWP(28,"esi"));
257	# &mov	($Eoff,$E);
258	&mov	($Foff,"ebx");
259	&mov	($Goff,"ecx");
260	&mov	($Hoff,"edi");
261
262&set_label("00_15$suffix",16);
263
264	&BODY_00_15();
265
266	&cmp	("esi",0xc19bf174);
267	&jne	(&label("00_15$suffix"));
268
269	&mov	("ecx",&DWP(4*(9+15+16-1),"esp"));	# preloaded in BODY_00_15(1)
270	&jmp	(&label("16_63$suffix"));
271
272&set_label("16_63$suffix",16);
273
274	&BODY_16_63();
275
276	&cmp	("esi",0xc67178f2);
277	&jne	(&label("16_63$suffix"));
278
279	&mov	("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
280	# &mov	($A,$Aoff);
281	&mov	("ebx",$Boff);
282	# &mov	("edi",$Coff);
283	&mov	("ecx",$Doff);
284	&add	($A,&DWP(0,"esi"));
285	&add	("ebx",&DWP(4,"esi"));
286	&add	("edi",&DWP(8,"esi"));
287	&add	("ecx",&DWP(12,"esi"));
288	&mov	(&DWP(0,"esi"),$A);
289	&mov	(&DWP(4,"esi"),"ebx");
290	&mov	(&DWP(8,"esi"),"edi");
291	&mov	(&DWP(12,"esi"),"ecx");
292	# &mov	($E,$Eoff);
293	&mov	("eax",$Foff);
294	&mov	("ebx",$Goff);
295	&mov	("ecx",$Hoff);
296	&mov	("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
297	&add	($E,&DWP(16,"esi"));
298	&add	("eax",&DWP(20,"esi"));
299	&add	("ebx",&DWP(24,"esi"));
300	&add	("ecx",&DWP(28,"esi"));
301	&mov	(&DWP(16,"esi"),$E);
302	&mov	(&DWP(20,"esi"),"eax");
303	&mov	(&DWP(24,"esi"),"ebx");
304	&mov	(&DWP(28,"esi"),"ecx");
305
306	&lea	("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
307	&sub	($K256,4*64);			# rewind K
308
309	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
310	&jb	(&label("loop$suffix"));
311}
312	&COMPACT_LOOP();
313	&mov	("esp",&DWP(12,"esp"));		# restore sp
314&function_end_A();
315						if (!$i386 && !$xmm) {
316	# ~20% improvement on Sandy Bridge
317	local *ror = sub { &shrd(@_[0],@_) };
318	&COMPACT_LOOP("_shrd");
319	&mov	("esp",&DWP(12,"esp"));		# restore sp
320&function_end_A();
321						}
322
323&set_label("K256",64);	# Yes! I keep it in the code segment!
324@K256=(	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
325	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
326	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
327	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
328	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
329	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
330	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
331	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
332	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
333	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
334	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
335	0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
336	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
337	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
338	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
339	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2	);
340&data_word(@K256);
341&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# byte swap mask
342&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
343
344($a,$b,$c,$d,$e,$f,$g,$h)=(0..7);	# offsets
345sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
346
347if (!$i386 && $unroll_after) {
348my @AH=($A,$K256);
349
350&set_label("unrolled",16);
351	&lea	("esp",&DWP(-96,"esp"));
352	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
353	&mov	($AH[0],&DWP(0,"esi"));
354	&mov	($AH[1],&DWP(4,"esi"));
355	&mov	("ecx",&DWP(8,"esi"));
356	&mov	("ebx",&DWP(12,"esi"));
357	#&mov	(&DWP(0,"esp"),$AH[0]);
358	&mov	(&DWP(4,"esp"),$AH[1]);
359	&xor	($AH[1],"ecx");		# magic
360	&mov	(&DWP(8,"esp"),"ecx");
361	&mov	(&DWP(12,"esp"),"ebx");
362	&mov	($E,&DWP(16,"esi"));
363	&mov	("ebx",&DWP(20,"esi"));
364	&mov	("ecx",&DWP(24,"esi"));
365	&mov	("esi",&DWP(28,"esi"));
366	#&mov	(&DWP(16,"esp"),$E);
367	&mov	(&DWP(20,"esp"),"ebx");
368	&mov	(&DWP(24,"esp"),"ecx");
369	&mov	(&DWP(28,"esp"),"esi");
370	&jmp	(&label("grand_loop"));
371
372&set_label("grand_loop",16);
373    # copy input block to stack reversing byte order
374    for($i=0;$i<5;$i++) {
375	&mov	("ebx",&DWP(12*$i+0,"edi"));
376	&mov	("ecx",&DWP(12*$i+4,"edi"));
377	&bswap	("ebx");
378	&mov	("esi",&DWP(12*$i+8,"edi"));
379	&bswap	("ecx");
380	&mov	(&DWP(32+12*$i+0,"esp"),"ebx");
381	&bswap	("esi");
382	&mov	(&DWP(32+12*$i+4,"esp"),"ecx");
383	&mov	(&DWP(32+12*$i+8,"esp"),"esi");
384    }
385	&mov	("ebx",&DWP($i*12,"edi"));
386	&add	("edi",64);
387	&bswap	("ebx");
388	&mov	(&DWP(96+4,"esp"),"edi");
389	&mov	(&DWP(32+12*$i,"esp"),"ebx");
390
391    my ($t1,$t2) = ("ecx","esi");
392
393    for ($i=0;$i<64;$i++) {
394
395      if ($i>=16) {
396	&mov	($T,$t1);			# $t1 is preloaded
397	# &mov	($t2,&DWP(32+4*(($i+14)&15),"esp"));
398	&ror	($t1,18-7);
399	 &mov	("edi",$t2);
400	&ror	($t2,19-17);
401	 &xor	($t1,$T);
402	 &shr	($T,3);
403	&ror	($t1,7);
404	 &xor	($t2,"edi");
405	 &xor	($T,$t1);			# T = sigma0(X[-15])
406	&ror	($t2,17);
407	 &add	($T,&DWP(32+4*($i&15),"esp"));	# T += X[-16]
408	&shr	("edi",10);
409	 &add	($T,&DWP(32+4*(($i+9)&15),"esp"));	# T += X[-7]
410	#&xor	("edi",$t2)			# sigma1(X[-2])
411	# &add	($T,"edi");			# T += sigma1(X[-2])
412	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
413      }
414	&mov	($t1,$E);
415	 &xor	("edi",$t2)			if ($i>=16);	# sigma1(X[-2])
416	 &mov	($t2,&off($f));
417	&ror	($E,25-11);
418	 &add	($T,"edi")			if ($i>=16);	# T += sigma1(X[-2])
419	 &mov	("edi",&off($g));
420	&xor	($E,$t1);
421	 &mov	($T,&DWP(32+4*($i&15),"esp"))	if ($i<16);	# X[i]
422	 &mov	(&DWP(32+4*($i&15),"esp"),$T)	if ($i>=16 && $i<62);	# save X[0]
423	 &xor	($t2,"edi");
424	&ror	($E,11-6);
425	 &and	($t2,$t1);
426	 &mov	(&off($e),$t1);		# save $E, modulo-scheduled
427	&xor	($E,$t1);
428	 &add	($T,&off($h));		# T += h
429	 &xor	("edi",$t2);		# Ch(e,f,g)
430	&ror	($E,6);			# Sigma1(e)
431	 &mov	($t1,$AH[0]);
432	 &add	($T,"edi");		# T += Ch(e,f,g)
433
434	&ror	($t1,22-13);
435	 &mov	($t2,$AH[0]);
436	 &mov	("edi",&off($b));
437	&xor	($t1,$AH[0]);
438	 &mov	(&off($a),$AH[0]);	# save $A, modulo-scheduled
439	 &xor	($AH[0],"edi");		# a ^= b, (b^c) in next round
440	&ror	($t1,13-2);
441	 &and	($AH[1],$AH[0]);	# (b^c) &= (a^b)
442	 &lea	($E,&DWP(@K256[$i],$T,$E));	# T += Sigma1(1)+K[i]
443	&xor	($t1,$t2);
444	 &xor	($AH[1],"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
445	 &mov	($t2,&DWP(32+4*(($i+2)&15),"esp"))	if ($i>=15 && $i<63);
446	&ror	($t1,2);		# Sigma0(a)
447
448	 &add	($AH[1],$E);		# h += T
449	 &add	($E,&off($d));		# d += T
450	&add	($AH[1],$t1);		# h += Sigma0(a)
451	 &mov	($t1,&DWP(32+4*(($i+15)&15),"esp"))	if ($i>=15 && $i<63);
452
453	@AH = reverse(@AH);		# rotate(a,h)
454	($t1,$t2) = ($t2,$t1);		# rotate(t1,t2)
455    }
456	&mov	("esi",&DWP(96,"esp"));	#ctx
457					#&mov	($AH[0],&DWP(0,"esp"));
458	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
459					#&mov	("edi", &DWP(8,"esp"));
460	&mov	("ecx",&DWP(12,"esp"));
461	&add	($AH[0],&DWP(0,"esi"));
462	&add	($AH[1],&DWP(4,"esi"));
463	&add	("edi",&DWP(8,"esi"));
464	&add	("ecx",&DWP(12,"esi"));
465	&mov	(&DWP(0,"esi"),$AH[0]);
466	&mov	(&DWP(4,"esi"),$AH[1]);
467	&mov	(&DWP(8,"esi"),"edi");
468	&mov	(&DWP(12,"esi"),"ecx");
469	 #&mov	(&DWP(0,"esp"),$AH[0]);
470	 &mov	(&DWP(4,"esp"),$AH[1]);
471	 &xor	($AH[1],"edi");		# magic
472	 &mov	(&DWP(8,"esp"),"edi");
473	 &mov	(&DWP(12,"esp"),"ecx");
474	#&mov	($E,&DWP(16,"esp"));
475	&mov	("edi",&DWP(20,"esp"));
476	&mov	("ebx",&DWP(24,"esp"));
477	&mov	("ecx",&DWP(28,"esp"));
478	&add	($E,&DWP(16,"esi"));
479	&add	("edi",&DWP(20,"esi"));
480	&add	("ebx",&DWP(24,"esi"));
481	&add	("ecx",&DWP(28,"esi"));
482	&mov	(&DWP(16,"esi"),$E);
483	&mov	(&DWP(20,"esi"),"edi");
484	&mov	(&DWP(24,"esi"),"ebx");
485	&mov	(&DWP(28,"esi"),"ecx");
486	 #&mov	(&DWP(16,"esp"),$E);
487	 &mov	(&DWP(20,"esp"),"edi");
488	&mov	("edi",&DWP(96+4,"esp"));	# inp
489	 &mov	(&DWP(24,"esp"),"ebx");
490	 &mov	(&DWP(28,"esp"),"ecx");
491
492	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
493	&jb	(&label("grand_loop"));
494
495	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
496&function_end_A();
497}
498&function_end_B("sha256_block_data_order_nohw");
499
500						if (!$i386 && $xmm) {{{
501if ($shaext) {
502######################################################################
503# Intel SHA Extensions implementation of SHA256 update function.
504#
505my ($ctx,$inp,$end)=("esi","edi","eax");
506my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
507my @MSG=map("xmm$_",(3..6));
508
509sub sha256op38 {
510 my ($opcodelet,$dst,$src)=@_;
511    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
512    {	&data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);	}
513}
514sub sha256rnds2	{ sha256op38(0xcb,@_); }
515sub sha256msg1	{ sha256op38(0xcc,@_); }
516sub sha256msg2	{ sha256op38(0xcd,@_); }
517
518&function_begin("sha256_block_data_order_hw");
519	&mov	("esi",wparam(0));	# ctx
520	&mov	("edi",wparam(1));	# inp
521	&mov	("eax",wparam(2));	# num
522	&mov	("ebx","esp");		# saved sp
523
524	&call	(&label("pic_point"));	# make it PIC!
525&set_label("pic_point");
526	&blindpop($K256);
527	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
528
529	&sub	("esp",16);
530	&and	("esp",-64);
531
532	&shl	("eax",6);
533	&add	("eax","edi");
534	&mov	(&DWP(0,"esp"),"esi");	# ctx
535	&mov	(&DWP(4,"esp"),"edi");	# inp
536	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
537	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
538
539	# TODO(davidben): The preamble above this point comes from the original
540	# merged sha256_block_data_order function, which performed some common
541	# setup and then jumped to the particular SHA-256 implementation. The
542	# parts of the preamble that do not apply to this function can be
543	# removed.
544
545	&sub		("esp",32);
546
547	&movdqu		($ABEF,&QWP(0,$ctx));		# DCBA
548	&lea		($K256,&DWP(0x80,$K256));
549	&movdqu		($CDGH,&QWP(16,$ctx));		# HGFE
550	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
551
552	&pshufd		($Wi,$ABEF,0x1b);		# ABCD
553	&pshufd		($ABEF,$ABEF,0xb1);		# CDAB
554	&pshufd		($CDGH,$CDGH,0x1b);		# EFGH
555	&palignr	($ABEF,$CDGH,8);		# ABEF
556	&punpcklqdq	($CDGH,$Wi);			# CDGH
557	&jmp		(&label("loop_shaext"));
558
559&set_label("loop_shaext",16);
560	&movdqu		(@MSG[0],&QWP(0,$inp));
561	&movdqu		(@MSG[1],&QWP(0x10,$inp));
562	&movdqu		(@MSG[2],&QWP(0x20,$inp));
563	&pshufb		(@MSG[0],$TMP);
564	&movdqu		(@MSG[3],&QWP(0x30,$inp));
565	&movdqa		(&QWP(16,"esp"),$CDGH);		# offload
566
567	&movdqa		($Wi,&QWP(0*16-0x80,$K256));
568	&paddd		($Wi,@MSG[0]);
569	&pshufb		(@MSG[1],$TMP);
570	&sha256rnds2	($CDGH,$ABEF);			# 0-3
571	&pshufd		($Wi,$Wi,0x0e);
572	&nop		();
573	&movdqa		(&QWP(0,"esp"),$ABEF);		# offload
574	&sha256rnds2	($ABEF,$CDGH);
575
576	&movdqa		($Wi,&QWP(1*16-0x80,$K256));
577	&paddd		($Wi,@MSG[1]);
578	&pshufb		(@MSG[2],$TMP);
579	&sha256rnds2	($CDGH,$ABEF);			# 4-7
580	&pshufd		($Wi,$Wi,0x0e);
581	&lea		($inp,&DWP(0x40,$inp));
582	&sha256msg1	(@MSG[0],@MSG[1]);
583	&sha256rnds2	($ABEF,$CDGH);
584
585	&movdqa		($Wi,&QWP(2*16-0x80,$K256));
586	&paddd		($Wi,@MSG[2]);
587	&pshufb		(@MSG[3],$TMP);
588	&sha256rnds2	($CDGH,$ABEF);			# 8-11
589	&pshufd		($Wi,$Wi,0x0e);
590	&movdqa		($TMP,@MSG[3]);
591	&palignr	($TMP,@MSG[2],4);
592	&nop		();
593	&paddd		(@MSG[0],$TMP);
594	&sha256msg1	(@MSG[1],@MSG[2]);
595	&sha256rnds2	($ABEF,$CDGH);
596
597	&movdqa		($Wi,&QWP(3*16-0x80,$K256));
598	&paddd		($Wi,@MSG[3]);
599	&sha256msg2	(@MSG[0],@MSG[3]);
600	&sha256rnds2	($CDGH,$ABEF);			# 12-15
601	&pshufd		($Wi,$Wi,0x0e);
602	&movdqa		($TMP,@MSG[0]);
603	&palignr	($TMP,@MSG[3],4);
604	&nop		();
605	&paddd		(@MSG[1],$TMP);
606	&sha256msg1	(@MSG[2],@MSG[3]);
607	&sha256rnds2	($ABEF,$CDGH);
608
609for($i=4;$i<16-3;$i++) {
610	&movdqa		($Wi,&QWP($i*16-0x80,$K256));
611	&paddd		($Wi,@MSG[0]);
612	&sha256msg2	(@MSG[1],@MSG[0]);
613	&sha256rnds2	($CDGH,$ABEF);			# 16-19...
614	&pshufd		($Wi,$Wi,0x0e);
615	&movdqa		($TMP,@MSG[1]);
616	&palignr	($TMP,@MSG[0],4);
617	&nop		();
618	&paddd		(@MSG[2],$TMP);
619	&sha256msg1	(@MSG[3],@MSG[0]);
620	&sha256rnds2	($ABEF,$CDGH);
621
622	push(@MSG,shift(@MSG));
623}
624	&movdqa		($Wi,&QWP(13*16-0x80,$K256));
625	&paddd		($Wi,@MSG[0]);
626	&sha256msg2	(@MSG[1],@MSG[0]);
627	&sha256rnds2	($CDGH,$ABEF);			# 52-55
628	&pshufd		($Wi,$Wi,0x0e);
629	&movdqa		($TMP,@MSG[1])
630	&palignr	($TMP,@MSG[0],4);
631	&sha256rnds2	($ABEF,$CDGH);
632	&paddd		(@MSG[2],$TMP);
633
634	&movdqa		($Wi,&QWP(14*16-0x80,$K256));
635	&paddd		($Wi,@MSG[1]);
636	&sha256rnds2	($CDGH,$ABEF);			# 56-59
637	&pshufd		($Wi,$Wi,0x0e);
638	&sha256msg2	(@MSG[2],@MSG[1]);
639	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
640	&sha256rnds2	($ABEF,$CDGH);
641
642	&movdqa		($Wi,&QWP(15*16-0x80,$K256));
643	&paddd		($Wi,@MSG[2]);
644	&nop		();
645	&sha256rnds2	($CDGH,$ABEF);			# 60-63
646	&pshufd		($Wi,$Wi,0x0e);
647	&cmp		($end,$inp);
648	&nop		();
649	&sha256rnds2	($ABEF,$CDGH);
650
651	&paddd		($CDGH,&QWP(16,"esp"));
652	&paddd		($ABEF,&QWP(0,"esp"));
653	&jnz		(&label("loop_shaext"));
654
655	&pshufd		($CDGH,$CDGH,0xb1);		# DCHG
656	&pshufd		($TMP,$ABEF,0x1b);		# FEBA
657	&pshufd		($ABEF,$ABEF,0xb1);		# BAFE
658	&punpckhqdq	($ABEF,$CDGH);			# DCBA
659	&palignr	($CDGH,$TMP,8);			# HGFE
660
661	&mov		("esp",&DWP(32+12,"esp"));
662	&movdqu		(&QWP(0,$ctx),$ABEF);
663	&movdqu		(&QWP(16,$ctx),$CDGH);
664&function_end("sha256_block_data_order_shaext");
665}
666
667my @X = map("xmm$_",(0..3));
668my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
669my @AH = ($A,$T);
670
671&function_begin("sha256_block_data_order_ssse3");
672	&mov	("esi",wparam(0));	# ctx
673	&mov	("edi",wparam(1));	# inp
674	&mov	("eax",wparam(2));	# num
675	&mov	("ebx","esp");		# saved sp
676
677	&call	(&label("pic_point"));	# make it PIC!
678&set_label("pic_point");
679	&blindpop($K256);
680	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
681
682	&sub	("esp",16);
683	&and	("esp",-64);
684
685	&shl	("eax",6);
686	&add	("eax","edi");
687	&mov	(&DWP(0,"esp"),"esi");	# ctx
688	&mov	(&DWP(4,"esp"),"edi");	# inp
689	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
690	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
691
692	# TODO(davidben): The preamble above this point comes from the original
693	# merged sha256_block_data_order function, which performed some common
694	# setup and then jumped to the particular SHA-256 implementation. The
695	# parts of the preamble that do not apply to this function can be
696	# removed.
697
698	&lea	("esp",&DWP(-96,"esp"));
699	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
700	&mov	($AH[0],&DWP(0,"esi"));
701	&mov	($AH[1],&DWP(4,"esi"));
702	&mov	("ecx",&DWP(8,"esi"));
703	&mov	("edi",&DWP(12,"esi"));
704	#&mov	(&DWP(0,"esp"),$AH[0]);
705	&mov	(&DWP(4,"esp"),$AH[1]);
706	&xor	($AH[1],"ecx");			# magic
707	&mov	(&DWP(8,"esp"),"ecx");
708	&mov	(&DWP(12,"esp"),"edi");
709	&mov	($E,&DWP(16,"esi"));
710	&mov	("edi",&DWP(20,"esi"));
711	&mov	("ecx",&DWP(24,"esi"));
712	&mov	("esi",&DWP(28,"esi"));
713	#&mov	(&DWP(16,"esp"),$E);
714	&mov	(&DWP(20,"esp"),"edi");
715	&mov	("edi",&DWP(96+4,"esp"));	# inp
716	&mov	(&DWP(24,"esp"),"ecx");
717	&mov	(&DWP(28,"esp"),"esi");
718	&movdqa	($t3,&QWP(256,$K256));
719	&jmp	(&label("grand_ssse3"));
720
721&set_label("grand_ssse3",16);
722	# load input, reverse byte order, add K256[0..15], save to stack
723	&movdqu	(@X[0],&QWP(0,"edi"));
724	&movdqu	(@X[1],&QWP(16,"edi"));
725	&movdqu	(@X[2],&QWP(32,"edi"));
726	&movdqu	(@X[3],&QWP(48,"edi"));
727	&add	("edi",64);
728	&pshufb	(@X[0],$t3);
729	&mov	(&DWP(96+4,"esp"),"edi");
730	&pshufb	(@X[1],$t3);
731	&movdqa	($t0,&QWP(0,$K256));
732	&pshufb	(@X[2],$t3);
733	&movdqa	($t1,&QWP(16,$K256));
734	&paddd	($t0,@X[0]);
735	&pshufb	(@X[3],$t3);
736	&movdqa	($t2,&QWP(32,$K256));
737	&paddd	($t1,@X[1]);
738	&movdqa	($t3,&QWP(48,$K256));
739	&movdqa	(&QWP(32+0,"esp"),$t0);
740	&paddd	($t2,@X[2]);
741	&movdqa	(&QWP(32+16,"esp"),$t1);
742	&paddd	($t3,@X[3]);
743	&movdqa	(&QWP(32+32,"esp"),$t2);
744	&movdqa	(&QWP(32+48,"esp"),$t3);
745	&jmp	(&label("ssse3_00_47"));
746
747&set_label("ssse3_00_47",16);
748	&add		($K256,64);
749
750sub SSSE3_00_47 () {
751my $j = shift;
752my $body = shift;
753my @X = @_;
754my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
755
756	  eval(shift(@insns));
757	&movdqa		($t0,@X[1]);
758	  eval(shift(@insns));			# @
759	  eval(shift(@insns));
760	&movdqa		($t3,@X[3]);
761	  eval(shift(@insns));
762	  eval(shift(@insns));
763	&palignr	($t0,@X[0],4);		# X[1..4]
764	  eval(shift(@insns));
765	  eval(shift(@insns));			# @
766	  eval(shift(@insns));
767	 &palignr	($t3,@X[2],4);		# X[9..12]
768	  eval(shift(@insns));
769	  eval(shift(@insns));
770	  eval(shift(@insns));
771	&movdqa		($t1,$t0);
772	  eval(shift(@insns));			# @
773	  eval(shift(@insns));
774	&movdqa		($t2,$t0);
775	  eval(shift(@insns));
776	  eval(shift(@insns));
777	&psrld		($t0,3);
778	  eval(shift(@insns));
779	  eval(shift(@insns));			# @
780	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
781	  eval(shift(@insns));
782	  eval(shift(@insns));
783	&psrld		($t2,7);
784	  eval(shift(@insns));
785	  eval(shift(@insns));
786	  eval(shift(@insns));			# @
787	  eval(shift(@insns));
788	 &pshufd	($t3,@X[3],0b11111010);	# X[14..15]
789	  eval(shift(@insns));
790	  eval(shift(@insns));
791	&pslld		($t1,32-18);
792	  eval(shift(@insns));
793	  eval(shift(@insns));			# @
794	&pxor		($t0,$t2);
795	  eval(shift(@insns));
796	  eval(shift(@insns));
797	&psrld		($t2,18-7);
798	  eval(shift(@insns));
799	  eval(shift(@insns));
800	  eval(shift(@insns));			# @
801	&pxor		($t0,$t1);
802	  eval(shift(@insns));
803	  eval(shift(@insns));
804	&pslld		($t1,18-7);
805	  eval(shift(@insns));
806	  eval(shift(@insns));
807	  eval(shift(@insns));			# @
808	&pxor		($t0,$t2);
809	  eval(shift(@insns));
810	  eval(shift(@insns));
811	 &movdqa	($t2,$t3);
812	  eval(shift(@insns));
813	  eval(shift(@insns));
814	  eval(shift(@insns));			# @
815	&pxor		($t0,$t1);		# sigma0(X[1..4])
816	  eval(shift(@insns));
817	  eval(shift(@insns));
818	 &psrld		($t3,10);
819	  eval(shift(@insns));
820	  eval(shift(@insns));
821	  eval(shift(@insns));			# @
822	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
823	  eval(shift(@insns));
824	  eval(shift(@insns));
825	 &psrlq		($t2,17);
826	  eval(shift(@insns));
827	  eval(shift(@insns));
828	  eval(shift(@insns));			# @
829	 &pxor		($t3,$t2);
830	  eval(shift(@insns));
831	  eval(shift(@insns));
832	 &psrlq		($t2,19-17);
833	  eval(shift(@insns));
834	  eval(shift(@insns));
835	  eval(shift(@insns));			# @
836	 &pxor		($t3,$t2);
837	  eval(shift(@insns));
838	  eval(shift(@insns));
839	 &pshufd	($t3,$t3,0b10000000);
840	  eval(shift(@insns));
841	  eval(shift(@insns));
842	  eval(shift(@insns));			# @
843	  eval(shift(@insns));
844	  eval(shift(@insns));
845	  eval(shift(@insns));
846	  eval(shift(@insns));
847	  eval(shift(@insns));			# @
848	  eval(shift(@insns));
849	 &psrldq	($t3,8);
850	  eval(shift(@insns));
851	  eval(shift(@insns));
852	  eval(shift(@insns));
853	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
854	  eval(shift(@insns));			# @
855	  eval(shift(@insns));
856	  eval(shift(@insns));
857	  eval(shift(@insns));
858	  eval(shift(@insns));
859	  eval(shift(@insns));			# @
860	  eval(shift(@insns));
861	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
862	  eval(shift(@insns));
863	  eval(shift(@insns));
864	  eval(shift(@insns));
865	 &movdqa	($t2,$t3);
866	  eval(shift(@insns));			# @
867	 &psrld		($t3,10);
868	  eval(shift(@insns));
869	 &psrlq		($t2,17);
870	  eval(shift(@insns));
871	  eval(shift(@insns));
872	  eval(shift(@insns));
873	  eval(shift(@insns));			# @
874	 &pxor		($t3,$t2);
875	  eval(shift(@insns));
876	  eval(shift(@insns));
877	 &psrlq		($t2,19-17);
878	  eval(shift(@insns));
879	  eval(shift(@insns));
880	  eval(shift(@insns));			# @
881	 &pxor		($t3,$t2);
882	  eval(shift(@insns));
883	  eval(shift(@insns));
884	  eval(shift(@insns));
885	 &pshufd	($t3,$t3,0b00001000);
886	  eval(shift(@insns));
887	  eval(shift(@insns));			# @
888	&movdqa		($t2,&QWP(16*$j,$K256));
889	  eval(shift(@insns));
890	  eval(shift(@insns));
891	 &pslldq	($t3,8);
892	  eval(shift(@insns));
893	  eval(shift(@insns));
894	  eval(shift(@insns));			# @
895	  eval(shift(@insns));
896	  eval(shift(@insns));
897	  eval(shift(@insns));
898	  eval(shift(@insns));
899	  eval(shift(@insns));			# @
900	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
901	  eval(shift(@insns));
902	  eval(shift(@insns));
903	  eval(shift(@insns));
904	  eval(shift(@insns));
905	&paddd		($t2,@X[0]);
906	  eval(shift(@insns));			# @
907
908	foreach (@insns) { eval; }		# remaining instructions
909
910	&movdqa		(&QWP(32+16*$j,"esp"),$t2);
911}
912
913sub body_00_15 () {
914	(
915	'&mov	("ecx",$E);',
916	'&ror	($E,25-11);',
917	 '&mov	("esi",&off($f));',
918	'&xor	($E,"ecx");',
919	 '&mov	("edi",&off($g));',
920	 '&xor	("esi","edi");',
921	'&ror	($E,11-6);',
922	 '&and	("esi","ecx");',
923	 '&mov	(&off($e),"ecx");',	# save $E, modulo-scheduled
924	'&xor	($E,"ecx");',
925	 '&xor	("edi","esi");',	# Ch(e,f,g)
926	'&ror	($E,6);',		# T = Sigma1(e)
927	 '&mov	("ecx",$AH[0]);',
928	 '&add	($E,"edi");',		# T += Ch(e,f,g)
929	 '&mov	("edi",&off($b));',
930	'&mov	("esi",$AH[0]);',
931
932	'&ror	("ecx",22-13);',
933	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
934	'&xor	("ecx",$AH[0]);',
935	 '&xor	($AH[0],"edi");',	# a ^= b, (b^c) in next round
936	 '&add	($E,&off($h));',	# T += h
937	'&ror	("ecx",13-2);',
938	 '&and	($AH[1],$AH[0]);',	# (b^c) &= (a^b)
939	'&xor	("ecx","esi");',
940	 '&add	($E,&DWP(32+4*($i&15),"esp"));',	# T += K[i]+X[i]
941	 '&xor	($AH[1],"edi");',	# h = Maj(a,b,c) = Ch(a^b,c,b)
942	'&ror	("ecx",2);',		# Sigma0(a)
943
944	 '&add	($AH[1],$E);',		# h += T
945	 '&add	($E,&off($d));',	# d += T
946	'&add	($AH[1],"ecx");'.	# h += Sigma0(a)
947
948	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
949	);
950}
951
952    for ($i=0,$j=0; $j<4; $j++) {
953	&SSSE3_00_47($j,\&body_00_15,@X);
954	push(@X,shift(@X));		# rotate(@X)
955    }
956	&cmp	(&DWP(16*$j,$K256),0x00010203);
957	&jne	(&label("ssse3_00_47"));
958
959    for ($i=0; $i<16; ) {
960	foreach(body_00_15()) { eval; }
961    }
962
963	&mov	("esi",&DWP(96,"esp"));	#ctx
964					#&mov	($AH[0],&DWP(0,"esp"));
965	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
966					#&mov	("edi", &DWP(8,"esp"));
967	&mov	("ecx",&DWP(12,"esp"));
968	&add	($AH[0],&DWP(0,"esi"));
969	&add	($AH[1],&DWP(4,"esi"));
970	&add	("edi",&DWP(8,"esi"));
971	&add	("ecx",&DWP(12,"esi"));
972	&mov	(&DWP(0,"esi"),$AH[0]);
973	&mov	(&DWP(4,"esi"),$AH[1]);
974	&mov	(&DWP(8,"esi"),"edi");
975	&mov	(&DWP(12,"esi"),"ecx");
976	 #&mov	(&DWP(0,"esp"),$AH[0]);
977	 &mov	(&DWP(4,"esp"),$AH[1]);
978	 &xor	($AH[1],"edi");			# magic
979	 &mov	(&DWP(8,"esp"),"edi");
980	 &mov	(&DWP(12,"esp"),"ecx");
981	#&mov	($E,&DWP(16,"esp"));
982	&mov	("edi",&DWP(20,"esp"));
983	&mov	("ecx",&DWP(24,"esp"));
984	&add	($E,&DWP(16,"esi"));
985	&add	("edi",&DWP(20,"esi"));
986	&add	("ecx",&DWP(24,"esi"));
987	&mov	(&DWP(16,"esi"),$E);
988	&mov	(&DWP(20,"esi"),"edi");
989	 &mov	(&DWP(20,"esp"),"edi");
990	&mov	("edi",&DWP(28,"esp"));
991	&mov	(&DWP(24,"esi"),"ecx");
992	 #&mov	(&DWP(16,"esp"),$E);
993	&add	("edi",&DWP(28,"esi"));
994	 &mov	(&DWP(24,"esp"),"ecx");
995	&mov	(&DWP(28,"esi"),"edi");
996	 &mov	(&DWP(28,"esp"),"edi");
997	&mov	("edi",&DWP(96+4,"esp"));	# inp
998
999	&movdqa	($t3,&QWP(64,$K256));
1000	&sub	($K256,3*64);			# rewind K
1001	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
1002	&jb	(&label("grand_ssse3"));
1003
1004	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
1005&function_end("sha256_block_data_order_ssse3");
1006
1007						if ($avx) {
1008&function_begin("sha256_block_data_order_avx");
1009	&mov	("esi",wparam(0));	# ctx
1010	&mov	("edi",wparam(1));	# inp
1011	&mov	("eax",wparam(2));	# num
1012	&mov	("ebx","esp");		# saved sp
1013
1014	&call	(&label("pic_point"));	# make it PIC!
1015&set_label("pic_point");
1016	&blindpop($K256);
1017	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
1018
1019	&sub	("esp",16);
1020	&and	("esp",-64);
1021
1022	&shl	("eax",6);
1023	&add	("eax","edi");
1024	&mov	(&DWP(0,"esp"),"esi");	# ctx
1025	&mov	(&DWP(4,"esp"),"edi");	# inp
1026	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
1027	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
1028
1029	# TODO(davidben): The preamble above this point comes from the original
1030	# merged sha256_block_data_order function, which performed some common
1031	# setup and then jumped to the particular SHA-256 implementation. The
1032	# parts of the preamble that do not apply to this function can be
1033	# removed.
1034
1035	&lea	("esp",&DWP(-96,"esp"));
1036	&vzeroall	();
1037	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1038	&mov	($AH[0],&DWP(0,"esi"));
1039	&mov	($AH[1],&DWP(4,"esi"));
1040	&mov	("ecx",&DWP(8,"esi"));
1041	&mov	("edi",&DWP(12,"esi"));
1042	#&mov	(&DWP(0,"esp"),$AH[0]);
1043	&mov	(&DWP(4,"esp"),$AH[1]);
1044	&xor	($AH[1],"ecx");			# magic
1045	&mov	(&DWP(8,"esp"),"ecx");
1046	&mov	(&DWP(12,"esp"),"edi");
1047	&mov	($E,&DWP(16,"esi"));
1048	&mov	("edi",&DWP(20,"esi"));
1049	&mov	("ecx",&DWP(24,"esi"));
1050	&mov	("esi",&DWP(28,"esi"));
1051	#&mov	(&DWP(16,"esp"),$E);
1052	&mov	(&DWP(20,"esp"),"edi");
1053	&mov	("edi",&DWP(96+4,"esp"));	# inp
1054	&mov	(&DWP(24,"esp"),"ecx");
1055	&mov	(&DWP(28,"esp"),"esi");
1056	&vmovdqa	($t3,&QWP(256,$K256));
1057	&jmp	(&label("grand_avx"));
1058
1059&set_label("grand_avx",32);
1060	# load input, reverse byte order, add K256[0..15], save to stack
1061	&vmovdqu	(@X[0],&QWP(0,"edi"));
1062	&vmovdqu	(@X[1],&QWP(16,"edi"));
1063	&vmovdqu	(@X[2],&QWP(32,"edi"));
1064	&vmovdqu	(@X[3],&QWP(48,"edi"));
1065	&add		("edi",64);
1066	&vpshufb	(@X[0],@X[0],$t3);
1067	&mov		(&DWP(96+4,"esp"),"edi");
1068	&vpshufb	(@X[1],@X[1],$t3);
1069	&vpshufb	(@X[2],@X[2],$t3);
1070	&vpaddd		($t0,@X[0],&QWP(0,$K256));
1071	&vpshufb	(@X[3],@X[3],$t3);
1072	&vpaddd		($t1,@X[1],&QWP(16,$K256));
1073	&vpaddd		($t2,@X[2],&QWP(32,$K256));
1074	&vpaddd		($t3,@X[3],&QWP(48,$K256));
1075	&vmovdqa	(&QWP(32+0,"esp"),$t0);
1076	&vmovdqa	(&QWP(32+16,"esp"),$t1);
1077	&vmovdqa	(&QWP(32+32,"esp"),$t2);
1078	&vmovdqa	(&QWP(32+48,"esp"),$t3);
1079	&jmp		(&label("avx_00_47"));
1080
1081&set_label("avx_00_47",16);
1082	&add		($K256,64);
1083
1084sub Xupdate_AVX () {
1085	(
1086	'&vpalignr	($t0,@X[1],@X[0],4);',	# X[1..4]
1087	 '&vpalignr	($t3,@X[3],@X[2],4);',	# X[9..12]
1088	'&vpsrld	($t2,$t0,7);',
1089	 '&vpaddd	(@X[0],@X[0],$t3);',	# X[0..3] += X[9..16]
1090	'&vpsrld	($t3,$t0,3);',
1091	'&vpslld	($t1,$t0,14);',
1092	'&vpxor		($t0,$t3,$t2);',
1093	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
1094	'&vpsrld	($t2,$t2,18-7);',
1095	'&vpxor		($t0,$t0,$t1);',
1096	'&vpslld	($t1,$t1,25-14);',
1097	'&vpxor		($t0,$t0,$t2);',
1098	 '&vpsrld	($t2,$t3,10);',
1099	'&vpxor		($t0,$t0,$t1);',	# sigma0(X[1..4])
1100	 '&vpsrlq	($t1,$t3,17);',
1101	'&vpaddd	(@X[0],@X[0],$t0);',	# X[0..3] += sigma0(X[1..4])
1102	 '&vpxor	($t2,$t2,$t1);',
1103	 '&vpsrlq	($t3,$t3,19);',
1104	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[14..15]
1105	 '&vpshufd	($t3,$t2,0b10000100);',
1106	'&vpsrldq	($t3,$t3,8);',
1107	'&vpaddd	(@X[0],@X[0],$t3);',	# X[0..1] += sigma1(X[14..15])
1108	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
1109	 '&vpsrld	($t2,$t3,10);',
1110	 '&vpsrlq	($t1,$t3,17);',
1111	 '&vpxor	($t2,$t2,$t1);',
1112	 '&vpsrlq	($t3,$t3,19);',
1113	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[16..17]
1114	 '&vpshufd	($t3,$t2,0b11101000);',
1115	'&vpslldq	($t3,$t3,8);',
1116	'&vpaddd	(@X[0],@X[0],$t3);'	# X[2..3] += sigma1(X[16..17])
1117	);
1118}
1119
1120local *ror = sub { &shrd(@_[0],@_) };
1121sub AVX_00_47 () {
1122my $j = shift;
1123my $body = shift;
1124my @X = @_;
1125my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
1126my $insn;
1127
1128	foreach (Xupdate_AVX()) {		# 31 instructions
1129	    eval;
1130	    eval(shift(@insns));
1131	    eval(shift(@insns));
1132	    eval($insn = shift(@insns));
1133	    eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
1134	}
1135	&vpaddd		($t2,@X[0],&QWP(16*$j,$K256));
1136	foreach (@insns) { eval; }		# remaining instructions
1137	&vmovdqa	(&QWP(32+16*$j,"esp"),$t2);
1138}
1139
1140    for ($i=0,$j=0; $j<4; $j++) {
1141	&AVX_00_47($j,\&body_00_15,@X);
1142	push(@X,shift(@X));		# rotate(@X)
1143    }
1144	&cmp	(&DWP(16*$j,$K256),0x00010203);
1145	&jne	(&label("avx_00_47"));
1146
1147    for ($i=0; $i<16; ) {
1148	foreach(body_00_15()) { eval; }
1149    }
1150
1151	&mov	("esi",&DWP(96,"esp"));	#ctx
1152					#&mov	($AH[0],&DWP(0,"esp"));
1153	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
1154					#&mov	("edi", &DWP(8,"esp"));
1155	&mov	("ecx",&DWP(12,"esp"));
1156	&add	($AH[0],&DWP(0,"esi"));
1157	&add	($AH[1],&DWP(4,"esi"));
1158	&add	("edi",&DWP(8,"esi"));
1159	&add	("ecx",&DWP(12,"esi"));
1160	&mov	(&DWP(0,"esi"),$AH[0]);
1161	&mov	(&DWP(4,"esi"),$AH[1]);
1162	&mov	(&DWP(8,"esi"),"edi");
1163	&mov	(&DWP(12,"esi"),"ecx");
1164	 #&mov	(&DWP(0,"esp"),$AH[0]);
1165	 &mov	(&DWP(4,"esp"),$AH[1]);
1166	 &xor	($AH[1],"edi");			# magic
1167	 &mov	(&DWP(8,"esp"),"edi");
1168	 &mov	(&DWP(12,"esp"),"ecx");
1169	#&mov	($E,&DWP(16,"esp"));
1170	&mov	("edi",&DWP(20,"esp"));
1171	&mov	("ecx",&DWP(24,"esp"));
1172	&add	($E,&DWP(16,"esi"));
1173	&add	("edi",&DWP(20,"esi"));
1174	&add	("ecx",&DWP(24,"esi"));
1175	&mov	(&DWP(16,"esi"),$E);
1176	&mov	(&DWP(20,"esi"),"edi");
1177	 &mov	(&DWP(20,"esp"),"edi");
1178	&mov	("edi",&DWP(28,"esp"));
1179	&mov	(&DWP(24,"esi"),"ecx");
1180	 #&mov	(&DWP(16,"esp"),$E);
1181	&add	("edi",&DWP(28,"esi"));
1182	 &mov	(&DWP(24,"esp"),"ecx");
1183	&mov	(&DWP(28,"esi"),"edi");
1184	 &mov	(&DWP(28,"esp"),"edi");
1185	&mov	("edi",&DWP(96+4,"esp"));	# inp
1186
1187	&vmovdqa	($t3,&QWP(64,$K256));
1188	&sub	($K256,3*64);			# rewind K
1189	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
1190	&jb	(&label("grand_avx"));
1191
1192	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
1193	&vzeroall	();
1194&function_end("sha256_block_data_order_avx");
1195
1196						if ($avx>1) {
1197sub bodyx_00_15 () {			# +10%
1198	(
1199	'&rorx	("ecx",$E,6)',
1200	'&rorx	("esi",$E,11)',
1201	 '&mov	(&off($e),$E)',		# save $E, modulo-scheduled
1202	'&rorx	("edi",$E,25)',
1203	'&xor	("ecx","esi")',
1204	 '&andn	("esi",$E,&off($g))',
1205	'&xor	("ecx","edi")',		# Sigma1(e)
1206	 '&and	($E,&off($f))',
1207	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
1208	 '&or	($E,"esi")',		# T = Ch(e,f,g)
1209
1210	'&rorx	("edi",$AH[0],2)',
1211	'&rorx	("esi",$AH[0],13)',
1212	 '&lea	($E,&DWP(0,$E,"ecx"))',	# T += Sigma1(e)
1213	'&rorx	("ecx",$AH[0],22)',
1214	'&xor	("esi","edi")',
1215	 '&mov	("edi",&off($b))',
1216	'&xor	("ecx","esi")',		# Sigma0(a)
1217
1218	 '&xor	($AH[0],"edi")',	# a ^= b, (b^c) in next round
1219	 '&add	($E,&off($h))',		# T += h
1220	 '&and	($AH[1],$AH[0])',	# (b^c) &= (a^b)
1221	 '&add	($E,&DWP(32+4*($i&15),"esp"))',	# T += K[i]+X[i]
1222	 '&xor	($AH[1],"edi")',	# h = Maj(a,b,c) = Ch(a^b,c,b)
1223
1224	 '&add	("ecx",$E)',		# h += T
1225	 '&add	($E,&off($d))',		# d += T
1226	'&lea	($AH[1],&DWP(0,$AH[1],"ecx"));'.	# h += Sigma0(a)
1227
1228	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
1229	);
1230}
1231
1232# If enabled, this function should be gated on AVX, BMI1, and BMI2.
1233&function_begin("sha256_block_data_order_avx_bmi");
1234	&mov	("esi",wparam(0));	# ctx
1235	&mov	("edi",wparam(1));	# inp
1236	&mov	("eax",wparam(2));	# num
1237	&mov	("ebx","esp");		# saved sp
1238
1239	&call	(&label("pic_point"));	# make it PIC!
1240&set_label("pic_point");
1241	&blindpop($K256);
1242	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
1243
1244	&sub	("esp",16);
1245	&and	("esp",-64);
1246
1247	&shl	("eax",6);
1248	&add	("eax","edi");
1249	&mov	(&DWP(0,"esp"),"esi");	# ctx
1250	&mov	(&DWP(4,"esp"),"edi");	# inp
1251	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
1252	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
1253
1254	# TODO(davidben): The preamble above this point comes from the original
1255	# merged sha256_block_data_order function, which performed some common
1256	# setup and then jumped to the particular SHA-256 implementation. The
1257	# parts of the preamble that do not apply to this function can be
1258	# removed.
1259
1260	&lea	("esp",&DWP(-96,"esp"));
1261	&vzeroall	();
1262	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1263	&mov	($AH[0],&DWP(0,"esi"));
1264	&mov	($AH[1],&DWP(4,"esi"));
1265	&mov	("ecx",&DWP(8,"esi"));
1266	&mov	("edi",&DWP(12,"esi"));
1267	#&mov	(&DWP(0,"esp"),$AH[0]);
1268	&mov	(&DWP(4,"esp"),$AH[1]);
1269	&xor	($AH[1],"ecx");			# magic
1270	&mov	(&DWP(8,"esp"),"ecx");
1271	&mov	(&DWP(12,"esp"),"edi");
1272	&mov	($E,&DWP(16,"esi"));
1273	&mov	("edi",&DWP(20,"esi"));
1274	&mov	("ecx",&DWP(24,"esi"));
1275	&mov	("esi",&DWP(28,"esi"));
1276	#&mov	(&DWP(16,"esp"),$E);
1277	&mov	(&DWP(20,"esp"),"edi");
1278	&mov	("edi",&DWP(96+4,"esp"));	# inp
1279	&mov	(&DWP(24,"esp"),"ecx");
1280	&mov	(&DWP(28,"esp"),"esi");
1281	&vmovdqa	($t3,&QWP(256,$K256));
1282	&jmp	(&label("grand_avx_bmi"));
1283
1284&set_label("grand_avx_bmi",32);
1285	# load input, reverse byte order, add K256[0..15], save to stack
1286	&vmovdqu	(@X[0],&QWP(0,"edi"));
1287	&vmovdqu	(@X[1],&QWP(16,"edi"));
1288	&vmovdqu	(@X[2],&QWP(32,"edi"));
1289	&vmovdqu	(@X[3],&QWP(48,"edi"));
1290	&add		("edi",64);
1291	&vpshufb	(@X[0],@X[0],$t3);
1292	&mov		(&DWP(96+4,"esp"),"edi");
1293	&vpshufb	(@X[1],@X[1],$t3);
1294	&vpshufb	(@X[2],@X[2],$t3);
1295	&vpaddd		($t0,@X[0],&QWP(0,$K256));
1296	&vpshufb	(@X[3],@X[3],$t3);
1297	&vpaddd		($t1,@X[1],&QWP(16,$K256));
1298	&vpaddd		($t2,@X[2],&QWP(32,$K256));
1299	&vpaddd		($t3,@X[3],&QWP(48,$K256));
1300	&vmovdqa	(&QWP(32+0,"esp"),$t0);
1301	&vmovdqa	(&QWP(32+16,"esp"),$t1);
1302	&vmovdqa	(&QWP(32+32,"esp"),$t2);
1303	&vmovdqa	(&QWP(32+48,"esp"),$t3);
1304	&jmp		(&label("avx_bmi_00_47"));
1305
1306&set_label("avx_bmi_00_47",16);
1307	&add		($K256,64);
1308
1309    for ($i=0,$j=0; $j<4; $j++) {
1310	&AVX_00_47($j,\&bodyx_00_15,@X);
1311	push(@X,shift(@X));		# rotate(@X)
1312    }
1313	&cmp	(&DWP(16*$j,$K256),0x00010203);
1314	&jne	(&label("avx_bmi_00_47"));
1315
1316    for ($i=0; $i<16; ) {
1317	foreach(bodyx_00_15()) { eval; }
1318    }
1319
1320	&mov	("esi",&DWP(96,"esp"));	#ctx
1321					#&mov	($AH[0],&DWP(0,"esp"));
1322	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
1323					#&mov	("edi", &DWP(8,"esp"));
1324	&mov	("ecx",&DWP(12,"esp"));
1325	&add	($AH[0],&DWP(0,"esi"));
1326	&add	($AH[1],&DWP(4,"esi"));
1327	&add	("edi",&DWP(8,"esi"));
1328	&add	("ecx",&DWP(12,"esi"));
1329	&mov	(&DWP(0,"esi"),$AH[0]);
1330	&mov	(&DWP(4,"esi"),$AH[1]);
1331	&mov	(&DWP(8,"esi"),"edi");
1332	&mov	(&DWP(12,"esi"),"ecx");
1333	 #&mov	(&DWP(0,"esp"),$AH[0]);
1334	 &mov	(&DWP(4,"esp"),$AH[1]);
1335	 &xor	($AH[1],"edi");			# magic
1336	 &mov	(&DWP(8,"esp"),"edi");
1337	 &mov	(&DWP(12,"esp"),"ecx");
1338	#&mov	($E,&DWP(16,"esp"));
1339	&mov	("edi",&DWP(20,"esp"));
1340	&mov	("ecx",&DWP(24,"esp"));
1341	&add	($E,&DWP(16,"esi"));
1342	&add	("edi",&DWP(20,"esi"));
1343	&add	("ecx",&DWP(24,"esi"));
1344	&mov	(&DWP(16,"esi"),$E);
1345	&mov	(&DWP(20,"esi"),"edi");
1346	 &mov	(&DWP(20,"esp"),"edi");
1347	&mov	("edi",&DWP(28,"esp"));
1348	&mov	(&DWP(24,"esi"),"ecx");
1349	 #&mov	(&DWP(16,"esp"),$E);
1350	&add	("edi",&DWP(28,"esi"));
1351	 &mov	(&DWP(24,"esp"),"ecx");
1352	&mov	(&DWP(28,"esi"),"edi");
1353	 &mov	(&DWP(28,"esp"),"edi");
1354	&mov	("edi",&DWP(96+4,"esp"));	# inp
1355
1356	&vmovdqa	($t3,&QWP(64,$K256));
1357	&sub	($K256,3*64);			# rewind K
1358	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
1359	&jb	(&label("grand_avx_bmi"));
1360
1361	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
1362	&vzeroall	();
1363&function_end("sha256_block_data_order_avx_bmi");
1364						}
1365						}
1366						}}}
1367
1368&asm_finish();
1369
1370close STDOUT or die "error closing STDOUT: $!";
1371