Home | History | Annotate | Download | only in asm
      1 #!/usr/bin/env perl
      2 #
      3 # ====================================================================
      4 # Written by Andy Polyakov <appro (at] openssl.org> for the OpenSSL
      5 # project. The module is, however, dual licensed under OpenSSL and
      6 # CRYPTOGAMS licenses depending on where you obtain it. For further
      7 # details see http://www.openssl.org/~appro/cryptogams/.
      8 # ====================================================================
      9 #
     10 # SHA256 block transform for x86. September 2007.
     11 #
     12 # Performance improvement over compiler generated code varies from
     13 # 10% to 40% [see below]. Not very impressive on some -archs, but
     14 # it's 5 times smaller and optimizies amount of writes.
     15 #
     16 # May 2012.
     17 #
     18 # Optimization including two of Pavel Semjanov's ideas, alternative
     19 # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
     20 # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
     21 # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
     22 # on P4, where it kills performance, nor Sandy Bridge, where folded
     23 # loop is approximately as fast...
     24 #
     25 # June 2012.
     26 #
     27 # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
     28 # May version, >60% over original. Add AVX+shrd code path, >25%
     29 # improvement on Sandy Bridge over May version, 60% over original.
     30 #
     31 # May 2013.
     32 #
     33 # Replace AMD XOP code path with SSSE3 to cover more processors.
     34 # (Biggest improvement coefficient is on upcoming Atom Silvermont,
     35 # not shown.) Add AVX+BMI code path.
     36 #
     37 # March 2014.
     38 #
     39 # Add support for Intel SHA Extensions.
     40 #
     41 # Performance in clock cycles per processed byte (less is better):
     42 #
     43 #		gcc	icc	x86 asm(*)	SIMD	x86_64 asm(**)	
     44 # Pentium	46	57	40/38		-	-
     45 # PIII		36	33	27/24		-	-
     46 # P4		41	38	28		-	17.3
     47 # AMD K8	27	25	19/15.5		-	14.9
     48 # Core2		26	23	18/15.6		14.3	13.8
     49 # Westmere	27	-	19/15.7		13.4	12.3
     50 # Sandy Bridge	25	-	15.9		12.4	11.6
     51 # Ivy Bridge	24	-	15.0		11.4	10.3
     52 # Haswell	22	-	13.9		9.46	7.80
     53 # Bulldozer	36	-	27/22		17.0	13.6
     54 # VIA Nano	36	-	25/22		16.8	16.5
     55 # Atom		50	-	30/25		21.9	18.9
     56 #
     57 # (*)	numbers after slash are for unrolled loop, where applicable;
     58 # (**)	x86_64 assembly performance is presented for reference
     59 #	purposes, results are best-available;
     60 
     61 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
     62 push(@INC,"${dir}","${dir}../../perlasm");
     63 require "x86asm.pl";
     64 
     65 &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
     66 
     67 $xmm=$avx=0;
     68 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
     69 
     70 if ($xmm &&	`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
     71 			=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
     72 	$avx = ($1>=2.19) + ($1>=2.22);
     73 }
     74 
     75 if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
     76 		`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
     77 	$avx = ($1>=2.03) + ($1>=2.10);
     78 }
     79 
     80 if ($xmm && !$avx && $ARGV[0] eq "win32" &&
     81 		`ml 2>&1` =~ /Version ([0-9]+)\./) {
     82 	$avx = ($1>=10) + ($1>=11);
     83 }
     84 
     85 if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
     86 	$avx = ($2>=3.0) + ($2>3.0);
     87 }
     88 
     89 $shaext=$xmm;	### set to zero if compiling for 1.0.1
     90 
     91 $unroll_after = 64*4;	# If pre-evicted from L1P cache first spin of
     92 			# fully unrolled loop was measured to run about
     93 			# 3-4x slower. If slowdown coefficient is N and
     94 			# unrolled loop is m times faster, then you break
     95 			# even at (N-1)/(m-1) blocks. Then it needs to be
     96 			# adjusted for probability of code being evicted,
     97 			# code size/cache size=1/4. Typical m is 1.15...
     98 
     99 $A="eax";
    100 $E="edx";
    101 $T="ebx";
    102 $Aoff=&DWP(4,"esp");
    103 $Boff=&DWP(8,"esp");
    104 $Coff=&DWP(12,"esp");
    105 $Doff=&DWP(16,"esp");
    106 $Eoff=&DWP(20,"esp");
    107 $Foff=&DWP(24,"esp");
    108 $Goff=&DWP(28,"esp");
    109 $Hoff=&DWP(32,"esp");
    110 $Xoff=&DWP(36,"esp");
    111 $K256="ebp";
    112 
    113 sub BODY_16_63() {
    114 	&mov	($T,"ecx");			# "ecx" is preloaded
    115 	 &mov	("esi",&DWP(4*(9+15+16-14),"esp"));
    116 	&ror	("ecx",18-7);
    117 	 &mov	("edi","esi");
    118 	&ror	("esi",19-17);
    119 	 &xor	("ecx",$T);
    120 	 &shr	($T,3);
    121 	&ror	("ecx",7);
    122 	 &xor	("esi","edi");
    123 	 &xor	($T,"ecx");			# T = sigma0(X[-15])
    124 	&ror	("esi",17);
    125 	 &add	($T,&DWP(4*(9+15+16),"esp"));	# T += X[-16]
    126 	&shr	("edi",10);
    127 	 &add	($T,&DWP(4*(9+15+16-9),"esp"));	# T += X[-7]
    128 	#&xor	("edi","esi")			# sigma1(X[-2])
    129 	# &add	($T,"edi");			# T += sigma1(X[-2])
    130 	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
    131 
    132 	&BODY_00_15(1);
    133 }
    134 sub BODY_00_15() {
    135     my $in_16_63=shift;
    136 
    137 	&mov	("ecx",$E);
    138 	 &xor	("edi","esi")			if ($in_16_63);	# sigma1(X[-2])
    139 	 &mov	("esi",$Foff);
    140 	&ror	("ecx",25-11);
    141 	 &add	($T,"edi")			if ($in_16_63);	# T += sigma1(X[-2])
    142 	 &mov	("edi",$Goff);
    143 	&xor	("ecx",$E);
    144 	 &xor	("esi","edi");
    145 	 &mov	($T,&DWP(4*(9+15),"esp"))	if (!$in_16_63);
    146 	 &mov	(&DWP(4*(9+15),"esp"),$T)	if ($in_16_63);	# save X[0]
    147 	&ror	("ecx",11-6);
    148 	 &and	("esi",$E);
    149 	 &mov	($Eoff,$E);		# modulo-scheduled
    150 	&xor	($E,"ecx");
    151 	 &add	($T,$Hoff);		# T += h
    152 	 &xor	("esi","edi");		# Ch(e,f,g)
    153 	&ror	($E,6);			# Sigma1(e)
    154 	 &mov	("ecx",$A);
    155 	 &add	($T,"esi");		# T += Ch(e,f,g)
    156 
    157 	&ror	("ecx",22-13);
    158 	 &add	($T,$E);		# T += Sigma1(e)
    159 	 &mov	("edi",$Boff);
    160 	&xor	("ecx",$A);
    161 	 &mov	($Aoff,$A);		# modulo-scheduled
    162 	 &lea	("esp",&DWP(-4,"esp"));
    163 	&ror	("ecx",13-2);
    164 	 &mov	("esi",&DWP(0,$K256));
    165 	&xor	("ecx",$A);
    166 	 &mov	($E,$Eoff);		# e in next iteration, d in this one
    167 	 &xor	($A,"edi");		# a ^= b
    168 	&ror	("ecx",2);		# Sigma0(a)
    169 
    170 	 &add	($T,"esi");		# T+= K[i]
    171 	 &mov	(&DWP(0,"esp"),$A);	# (b^c) in next round
    172 	&add	($E,$T);		# d += T
    173 	 &and	($A,&DWP(4,"esp"));	# a &= (b^c)
    174 	&add	($T,"ecx");		# T += Sigma0(a)
    175 	 &xor	($A,"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
    176 	 &mov	("ecx",&DWP(4*(9+15+16-1),"esp"))	if ($in_16_63);	# preload T
    177 	&add	($K256,4);
    178 	 &add	($A,$T);		# h += T
    179 }
    180 
    181 &external_label("OPENSSL_ia32cap_P")		if (!$i386);
    182 
    183 &function_begin("sha256_block_data_order");
    184 	&mov	("esi",wparam(0));	# ctx
    185 	&mov	("edi",wparam(1));	# inp
    186 	&mov	("eax",wparam(2));	# num
    187 	&mov	("ebx","esp");		# saved sp
    188 
    189 	&call	(&label("pic_point"));	# make it PIC!
    190 &set_label("pic_point");
    191 	&blindpop($K256);
    192 	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
    193 
    194 	&sub	("esp",16);
    195 	&and	("esp",-64);
    196 
    197 	&shl	("eax",6);
    198 	&add	("eax","edi");
    199 	&mov	(&DWP(0,"esp"),"esi");	# ctx
    200 	&mov	(&DWP(4,"esp"),"edi");	# inp
    201 	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
    202 	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
    203 						if (!$i386 && $xmm) {
    204 	&picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
    205 	&mov	("ecx",&DWP(0,"edx"));
    206 	&mov	("ebx",&DWP(4,"edx"));
    207 	&test	("ecx",1<<20);		# check for P4
    208 	&jnz	(&label("loop"));
    209 	&mov	("edx",&DWP(8,"edx"))	if ($xmm);
    210 	&test	("ecx",1<<24);		# check for FXSR
    211 	&jz	($unroll_after?&label("no_xmm"):&label("loop"));
    212 	&and	("ecx",1<<30);		# mask "Intel CPU" bit
    213 	&and	("ebx",1<<28|1<<9);	# mask AVX and SSSE3 bits
    214 	&test	("edx",1<<29)		if ($shaext);	# check for SHA
    215 	&jnz	(&label("shaext"))	if ($shaext);
    216 	&or	("ecx","ebx");
    217 	&and	("ecx",1<<28|1<<30);
    218 	&cmp	("ecx",1<<28|1<<30);
    219 					if ($xmm) {
    220 	&je	(&label("AVX"))		if ($avx);
    221 	&test	("ebx",1<<9);		# check for SSSE3
    222 	&jnz	(&label("SSSE3"));
    223 					} else {
    224 	&je	(&label("loop_shrd"));
    225 					}
    226 						if ($unroll_after) {
    227 &set_label("no_xmm");
    228 	&sub	("eax","edi");
    229 	&cmp	("eax",$unroll_after);
    230 	&jae	(&label("unrolled"));
    231 						} }
    232 	&jmp	(&label("loop"));
    233 
    234 sub COMPACT_LOOP() {
    235 my $suffix=shift;
    236 
    237 &set_label("loop$suffix",$suffix?32:16);
    238     # copy input block to stack reversing byte and dword order
    239     for($i=0;$i<4;$i++) {
    240 	&mov	("eax",&DWP($i*16+0,"edi"));
    241 	&mov	("ebx",&DWP($i*16+4,"edi"));
    242 	&mov	("ecx",&DWP($i*16+8,"edi"));
    243 	&bswap	("eax");
    244 	&mov	("edx",&DWP($i*16+12,"edi"));
    245 	&bswap	("ebx");
    246 	&push	("eax");
    247 	&bswap	("ecx");
    248 	&push	("ebx");
    249 	&bswap	("edx");
    250 	&push	("ecx");
    251 	&push	("edx");
    252     }
    253 	&add	("edi",64);
    254 	&lea	("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
    255 	&mov	(&DWP(4*(9+16)+4,"esp"),"edi");
    256 
    257 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    258 	&mov	($A,&DWP(0,"esi"));
    259 	&mov	("ebx",&DWP(4,"esi"));
    260 	&mov	("ecx",&DWP(8,"esi"));
    261 	&mov	("edi",&DWP(12,"esi"));
    262 	# &mov	($Aoff,$A);
    263 	&mov	($Boff,"ebx");
    264 	&xor	("ebx","ecx");
    265 	&mov	($Coff,"ecx");
    266 	&mov	($Doff,"edi");
    267 	&mov	(&DWP(0,"esp"),"ebx");	# magic
    268 	&mov	($E,&DWP(16,"esi"));	
    269 	&mov	("ebx",&DWP(20,"esi"));
    270 	&mov	("ecx",&DWP(24,"esi"));
    271 	&mov	("edi",&DWP(28,"esi"));
    272 	# &mov	($Eoff,$E);
    273 	&mov	($Foff,"ebx");
    274 	&mov	($Goff,"ecx");
    275 	&mov	($Hoff,"edi");
    276 
    277 &set_label("00_15$suffix",16);
    278 
    279 	&BODY_00_15();
    280 
    281 	&cmp	("esi",0xc19bf174);
    282 	&jne	(&label("00_15$suffix"));
    283 
    284 	&mov	("ecx",&DWP(4*(9+15+16-1),"esp"));	# preloaded in BODY_00_15(1)
    285 	&jmp	(&label("16_63$suffix"));
    286 
    287 &set_label("16_63$suffix",16);
    288 
    289 	&BODY_16_63();
    290 
    291 	&cmp	("esi",0xc67178f2);
    292 	&jne	(&label("16_63$suffix"));
    293 
    294 	&mov	("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
    295 	# &mov	($A,$Aoff);
    296 	&mov	("ebx",$Boff);
    297 	# &mov	("edi",$Coff);
    298 	&mov	("ecx",$Doff);
    299 	&add	($A,&DWP(0,"esi"));
    300 	&add	("ebx",&DWP(4,"esi"));
    301 	&add	("edi",&DWP(8,"esi"));
    302 	&add	("ecx",&DWP(12,"esi"));
    303 	&mov	(&DWP(0,"esi"),$A);
    304 	&mov	(&DWP(4,"esi"),"ebx");
    305 	&mov	(&DWP(8,"esi"),"edi");
    306 	&mov	(&DWP(12,"esi"),"ecx");
    307 	# &mov	($E,$Eoff);
    308 	&mov	("eax",$Foff);
    309 	&mov	("ebx",$Goff);
    310 	&mov	("ecx",$Hoff);
    311 	&mov	("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
    312 	&add	($E,&DWP(16,"esi"));
    313 	&add	("eax",&DWP(20,"esi"));
    314 	&add	("ebx",&DWP(24,"esi"));
    315 	&add	("ecx",&DWP(28,"esi"));
    316 	&mov	(&DWP(16,"esi"),$E);
    317 	&mov	(&DWP(20,"esi"),"eax");
    318 	&mov	(&DWP(24,"esi"),"ebx");
    319 	&mov	(&DWP(28,"esi"),"ecx");
    320 
    321 	&lea	("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
    322 	&sub	($K256,4*64);			# rewind K
    323 
    324 	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
    325 	&jb	(&label("loop$suffix"));
    326 }
    327 	&COMPACT_LOOP();
    328 	&mov	("esp",&DWP(12,"esp"));		# restore sp
    329 &function_end_A();
    330 						if (!$i386 && !$xmm) {
    331 	# ~20% improvement on Sandy Bridge
    332 	local *ror = sub { &shrd(@_[0],@_) };
    333 	&COMPACT_LOOP("_shrd");
    334 	&mov	("esp",&DWP(12,"esp"));		# restore sp
    335 &function_end_A();
    336 						}
    337 
    338 &set_label("K256",64);	# Yes! I keep it in the code segment!
    339 @K256=(	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
    340 	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
    341 	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
    342 	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
    343 	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
    344 	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
    345 	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
    346 	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
    347 	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
    348 	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
    349 	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
    350 	0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
    351 	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
    352 	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
    353 	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
    354 	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2	);
    355 &data_word(@K256);
    356 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# byte swap mask
    357 &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
    358 
    359 ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7);	# offsets
    360 sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
    361 
    362 if (!$i386 && $unroll_after) {
    363 my @AH=($A,$K256);
    364 
    365 &set_label("unrolled",16);
    366 	&lea	("esp",&DWP(-96,"esp"));
    367 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    368 	&mov	($AH[0],&DWP(0,"esi"));
    369 	&mov	($AH[1],&DWP(4,"esi"));
    370 	&mov	("ecx",&DWP(8,"esi"));
    371 	&mov	("ebx",&DWP(12,"esi"));
    372 	#&mov	(&DWP(0,"esp"),$AH[0]);
    373 	&mov	(&DWP(4,"esp"),$AH[1]);
    374 	&xor	($AH[1],"ecx");		# magic
    375 	&mov	(&DWP(8,"esp"),"ecx");
    376 	&mov	(&DWP(12,"esp"),"ebx");
    377 	&mov	($E,&DWP(16,"esi"));	
    378 	&mov	("ebx",&DWP(20,"esi"));
    379 	&mov	("ecx",&DWP(24,"esi"));
    380 	&mov	("esi",&DWP(28,"esi"));
    381 	#&mov	(&DWP(16,"esp"),$E);
    382 	&mov	(&DWP(20,"esp"),"ebx");
    383 	&mov	(&DWP(24,"esp"),"ecx");
    384 	&mov	(&DWP(28,"esp"),"esi");
    385 	&jmp	(&label("grand_loop"));
    386 
    387 &set_label("grand_loop",16);
    388     # copy input block to stack reversing byte order
    389     for($i=0;$i<5;$i++) {
    390 	&mov	("ebx",&DWP(12*$i+0,"edi"));
    391 	&mov	("ecx",&DWP(12*$i+4,"edi"));
    392 	&bswap	("ebx");
    393 	&mov	("esi",&DWP(12*$i+8,"edi"));
    394 	&bswap	("ecx");
    395 	&mov	(&DWP(32+12*$i+0,"esp"),"ebx");
    396 	&bswap	("esi");
    397 	&mov	(&DWP(32+12*$i+4,"esp"),"ecx");
    398 	&mov	(&DWP(32+12*$i+8,"esp"),"esi");
    399     }
    400 	&mov	("ebx",&DWP($i*12,"edi"));
    401 	&add	("edi",64);
    402 	&bswap	("ebx");
    403 	&mov	(&DWP(96+4,"esp"),"edi");
    404 	&mov	(&DWP(32+12*$i,"esp"),"ebx");
    405 
    406     my ($t1,$t2) = ("ecx","esi");
    407 
    408     for ($i=0;$i<64;$i++) {
    409 
    410       if ($i>=16) {
    411 	&mov	($T,$t1);			# $t1 is preloaded
    412 	# &mov	($t2,&DWP(32+4*(($i+14)&15),"esp"));
    413 	&ror	($t1,18-7);
    414 	 &mov	("edi",$t2);
    415 	&ror	($t2,19-17);
    416 	 &xor	($t1,$T);
    417 	 &shr	($T,3);
    418 	&ror	($t1,7);
    419 	 &xor	($t2,"edi");
    420 	 &xor	($T,$t1);			# T = sigma0(X[-15])
    421 	&ror	($t2,17);
    422 	 &add	($T,&DWP(32+4*($i&15),"esp"));	# T += X[-16]
    423 	&shr	("edi",10);
    424 	 &add	($T,&DWP(32+4*(($i+9)&15),"esp"));	# T += X[-7]
    425 	#&xor	("edi",$t2)			# sigma1(X[-2])
    426 	# &add	($T,"edi");			# T += sigma1(X[-2])
    427 	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
    428       }
    429 	&mov	($t1,$E);
    430 	 &xor	("edi",$t2)			if ($i>=16);	# sigma1(X[-2])
    431 	 &mov	($t2,&off($f));
    432 	&ror	($E,25-11);
    433 	 &add	($T,"edi")			if ($i>=16);	# T += sigma1(X[-2])
    434 	 &mov	("edi",&off($g));
    435 	&xor	($E,$t1);
    436 	 &mov	($T,&DWP(32+4*($i&15),"esp"))	if ($i<16);	# X[i]
    437 	 &mov	(&DWP(32+4*($i&15),"esp"),$T)	if ($i>=16 && $i<62);	# save X[0]
    438 	 &xor	($t2,"edi");
    439 	&ror	($E,11-6);
    440 	 &and	($t2,$t1);
    441 	 &mov	(&off($e),$t1);		# save $E, modulo-scheduled
    442 	&xor	($E,$t1);
    443 	 &add	($T,&off($h));		# T += h
    444 	 &xor	("edi",$t2);		# Ch(e,f,g)
    445 	&ror	($E,6);			# Sigma1(e)
    446 	 &mov	($t1,$AH[0]);
    447 	 &add	($T,"edi");		# T += Ch(e,f,g)
    448 
    449 	&ror	($t1,22-13);
    450 	 &mov	($t2,$AH[0]);
    451 	 &mov	("edi",&off($b));
    452 	&xor	($t1,$AH[0]);
    453 	 &mov	(&off($a),$AH[0]);	# save $A, modulo-scheduled
    454 	 &xor	($AH[0],"edi");		# a ^= b, (b^c) in next round
    455 	&ror	($t1,13-2);
    456 	 &and	($AH[1],$AH[0]);	# (b^c) &= (a^b)
    457 	 &lea	($E,&DWP(@K256[$i],$T,$E));	# T += Sigma1(1)+K[i]
    458 	&xor	($t1,$t2);
    459 	 &xor	($AH[1],"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
    460 	 &mov	($t2,&DWP(32+4*(($i+2)&15),"esp"))	if ($i>=15 && $i<63);
    461 	&ror	($t1,2);		# Sigma0(a)
    462 
    463 	 &add	($AH[1],$E);		# h += T
    464 	 &add	($E,&off($d));		# d += T
    465 	&add	($AH[1],$t1);		# h += Sigma0(a)
    466 	 &mov	($t1,&DWP(32+4*(($i+15)&15),"esp"))	if ($i>=15 && $i<63);
    467 
    468 	@AH = reverse(@AH);		# rotate(a,h)
    469 	($t1,$t2) = ($t2,$t1);		# rotate(t1,t2)
    470     }
    471 	&mov	("esi",&DWP(96,"esp"));	#ctx
    472 					#&mov	($AH[0],&DWP(0,"esp"));
    473 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
    474 					#&mov	("edi", &DWP(8,"esp"));
    475 	&mov	("ecx",&DWP(12,"esp"));
    476 	&add	($AH[0],&DWP(0,"esi"));
    477 	&add	($AH[1],&DWP(4,"esi"));
    478 	&add	("edi",&DWP(8,"esi"));
    479 	&add	("ecx",&DWP(12,"esi"));
    480 	&mov	(&DWP(0,"esi"),$AH[0]);
    481 	&mov	(&DWP(4,"esi"),$AH[1]);
    482 	&mov	(&DWP(8,"esi"),"edi");
    483 	&mov	(&DWP(12,"esi"),"ecx");
    484 	 #&mov	(&DWP(0,"esp"),$AH[0]);
    485 	 &mov	(&DWP(4,"esp"),$AH[1]);
    486 	 &xor	($AH[1],"edi");		# magic
    487 	 &mov	(&DWP(8,"esp"),"edi");
    488 	 &mov	(&DWP(12,"esp"),"ecx");
    489 	#&mov	($E,&DWP(16,"esp"));
    490 	&mov	("edi",&DWP(20,"esp"));
    491 	&mov	("ebx",&DWP(24,"esp"));
    492 	&mov	("ecx",&DWP(28,"esp"));
    493 	&add	($E,&DWP(16,"esi"));
    494 	&add	("edi",&DWP(20,"esi"));
    495 	&add	("ebx",&DWP(24,"esi"));
    496 	&add	("ecx",&DWP(28,"esi"));
    497 	&mov	(&DWP(16,"esi"),$E);
    498 	&mov	(&DWP(20,"esi"),"edi");
    499 	&mov	(&DWP(24,"esi"),"ebx");
    500 	&mov	(&DWP(28,"esi"),"ecx");
    501 	 #&mov	(&DWP(16,"esp"),$E);
    502 	 &mov	(&DWP(20,"esp"),"edi");
    503 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    504 	 &mov	(&DWP(24,"esp"),"ebx");
    505 	 &mov	(&DWP(28,"esp"),"ecx");
    506 
    507 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
    508 	&jb	(&label("grand_loop"));
    509 
    510 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
    511 &function_end_A();
    512 }
    513 						if (!$i386 && $xmm) {{{
    514 if ($shaext) {
    515 ######################################################################
    516 # Intel SHA Extensions implementation of SHA256 update function.
    517 #
    518 my ($ctx,$inp,$end)=("esi","edi","eax");
    519 my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
    520 my @MSG=map("xmm$_",(3..6));
    521 
    522 sub sha256op38 {
    523  my ($opcodelet,$dst,$src)=@_;
    524     if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
    525     {	&data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);	}
    526 }
    527 sub sha256rnds2	{ sha256op38(0xcb,@_); }
    528 sub sha256msg1	{ sha256op38(0xcc,@_); }
    529 sub sha256msg2	{ sha256op38(0xcd,@_); }
    530 
    531 &set_label("shaext",32);
    532 	&sub		("esp",32);
    533 
    534 	&movdqu		($ABEF,&QWP(0,$ctx));		# DCBA
    535 	&lea		($K256,&DWP(0x80,$K256));
    536 	&movdqu		($CDGH,&QWP(16,$ctx));		# HGFE
    537 	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
    538 
    539 	&pshufd		($Wi,$ABEF,0x1b);		# ABCD
    540 	&pshufd		($ABEF,$ABEF,0xb1);		# CDAB
    541 	&pshufd		($CDGH,$CDGH,0x1b);		# EFGH
    542 	&palignr	($ABEF,$CDGH,8);		# ABEF
    543 	&punpcklqdq	($CDGH,$Wi);			# CDGH
    544 	&jmp		(&label("loop_shaext"));
    545 
    546 &set_label("loop_shaext",16);
    547 	&movdqu		(@MSG[0],&QWP(0,$inp));
    548 	&movdqu		(@MSG[1],&QWP(0x10,$inp));
    549 	&movdqu		(@MSG[2],&QWP(0x20,$inp));
    550 	&pshufb		(@MSG[0],$TMP);
    551 	&movdqu		(@MSG[3],&QWP(0x30,$inp));
    552 	&movdqa		(&QWP(16,"esp"),$CDGH);		# offload
    553 
    554 	&movdqa		($Wi,&QWP(0*16-0x80,$K256));
    555 	&paddd		($Wi,@MSG[0]);
    556 	&pshufb		(@MSG[1],$TMP);
    557 	&sha256rnds2	($CDGH,$ABEF);			# 0-3
    558 	&pshufd		($Wi,$Wi,0x0e);
    559 	&nop		();
    560 	&movdqa		(&QWP(0,"esp"),$ABEF);		# offload
    561 	&sha256rnds2	($ABEF,$CDGH);
    562 
    563 	&movdqa		($Wi,&QWP(1*16-0x80,$K256));
    564 	&paddd		($Wi,@MSG[1]);
    565 	&pshufb		(@MSG[2],$TMP);
    566 	&sha256rnds2	($CDGH,$ABEF);			# 4-7
    567 	&pshufd		($Wi,$Wi,0x0e);
    568 	&lea		($inp,&DWP(0x40,$inp));
    569 	&sha256msg1	(@MSG[0],@MSG[1]);
    570 	&sha256rnds2	($ABEF,$CDGH);
    571 
    572 	&movdqa		($Wi,&QWP(2*16-0x80,$K256));
    573 	&paddd		($Wi,@MSG[2]);
    574 	&pshufb		(@MSG[3],$TMP);
    575 	&sha256rnds2	($CDGH,$ABEF);			# 8-11
    576 	&pshufd		($Wi,$Wi,0x0e);
    577 	&movdqa		($TMP,@MSG[3]);
    578 	&palignr	($TMP,@MSG[2],4);
    579 	&nop		();
    580 	&paddd		(@MSG[0],$TMP);
    581 	&sha256msg1	(@MSG[1],@MSG[2]);
    582 	&sha256rnds2	($ABEF,$CDGH);
    583 
    584 	&movdqa		($Wi,&QWP(3*16-0x80,$K256));
    585 	&paddd		($Wi,@MSG[3]);
    586 	&sha256msg2	(@MSG[0],@MSG[3]);
    587 	&sha256rnds2	($CDGH,$ABEF);			# 12-15
    588 	&pshufd		($Wi,$Wi,0x0e);
    589 	&movdqa		($TMP,@MSG[0]);
    590 	&palignr	($TMP,@MSG[3],4);
    591 	&nop		();
    592 	&paddd		(@MSG[1],$TMP);
    593 	&sha256msg1	(@MSG[2],@MSG[3]);
    594 	&sha256rnds2	($ABEF,$CDGH);
    595 
    596 for($i=4;$i<16-3;$i++) {
    597 	&movdqa		($Wi,&QWP($i*16-0x80,$K256));
    598 	&paddd		($Wi,@MSG[0]);
    599 	&sha256msg2	(@MSG[1],@MSG[0]);
    600 	&sha256rnds2	($CDGH,$ABEF);			# 16-19...
    601 	&pshufd		($Wi,$Wi,0x0e);
    602 	&movdqa		($TMP,@MSG[1]);
    603 	&palignr	($TMP,@MSG[0],4);
    604 	&nop		();
    605 	&paddd		(@MSG[2],$TMP);
    606 	&sha256msg1	(@MSG[3],@MSG[0]);
    607 	&sha256rnds2	($ABEF,$CDGH);
    608 
    609 	push(@MSG,shift(@MSG));
    610 }
    611 	&movdqa		($Wi,&QWP(13*16-0x80,$K256));
    612 	&paddd		($Wi,@MSG[0]);
    613 	&sha256msg2	(@MSG[1],@MSG[0]);
    614 	&sha256rnds2	($CDGH,$ABEF);			# 52-55
    615 	&pshufd		($Wi,$Wi,0x0e);
    616 	&movdqa		($TMP,@MSG[1])
    617 	&palignr	($TMP,@MSG[0],4);
    618 	&sha256rnds2	($ABEF,$CDGH);
    619 	&paddd		(@MSG[2],$TMP);
    620 
    621 	&movdqa		($Wi,&QWP(14*16-0x80,$K256));
    622 	&paddd		($Wi,@MSG[1]);
    623 	&sha256rnds2	($CDGH,$ABEF);			# 56-59
    624 	&pshufd		($Wi,$Wi,0x0e);
    625 	&sha256msg2	(@MSG[2],@MSG[1]);
    626 	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
    627 	&sha256rnds2	($ABEF,$CDGH);
    628 
    629 	&movdqa		($Wi,&QWP(15*16-0x80,$K256));
    630 	&paddd		($Wi,@MSG[2]);
    631 	&nop		();
    632 	&sha256rnds2	($CDGH,$ABEF);			# 60-63
    633 	&pshufd		($Wi,$Wi,0x0e);
    634 	&cmp		($end,$inp);
    635 	&nop		();
    636 	&sha256rnds2	($ABEF,$CDGH);
    637 
    638 	&paddd		($CDGH,&QWP(16,"esp"));
    639 	&paddd		($ABEF,&QWP(0,"esp"));
    640 	&jnz		(&label("loop_shaext"));
    641 
    642 	&pshufd		($CDGH,$CDGH,0xb1);		# DCHG
    643 	&pshufd		($TMP,$ABEF,0x1b);		# FEBA
    644 	&pshufd		($ABEF,$ABEF,0xb1);		# BAFE
    645 	&punpckhqdq	($ABEF,$CDGH);			# DCBA
    646 	&palignr	($CDGH,$TMP,8);			# HGFE
    647 
    648 	&mov		("esp",&DWP(32+12,"esp"));
    649 	&movdqu		(&QWP(0,$ctx),$ABEF);
    650 	&movdqu		(&QWP(16,$ctx),$CDGH);
    651 &function_end_A();
    652 }
    653 
    654 my @X = map("xmm$_",(0..3));
    655 my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
    656 my @AH = ($A,$T);
    657 
    658 &set_label("SSSE3",32);
    659 	&lea	("esp",&DWP(-96,"esp"));
    660 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    661 	&mov	($AH[0],&DWP(0,"esi"));
    662 	&mov	($AH[1],&DWP(4,"esi"));
    663 	&mov	("ecx",&DWP(8,"esi"));
    664 	&mov	("edi",&DWP(12,"esi"));
    665 	#&mov	(&DWP(0,"esp"),$AH[0]);
    666 	&mov	(&DWP(4,"esp"),$AH[1]);
    667 	&xor	($AH[1],"ecx");			# magic
    668 	&mov	(&DWP(8,"esp"),"ecx");
    669 	&mov	(&DWP(12,"esp"),"edi");
    670 	&mov	($E,&DWP(16,"esi"));
    671 	&mov	("edi",&DWP(20,"esi"));
    672 	&mov	("ecx",&DWP(24,"esi"));
    673 	&mov	("esi",&DWP(28,"esi"));
    674 	#&mov	(&DWP(16,"esp"),$E);
    675 	&mov	(&DWP(20,"esp"),"edi");
    676 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    677 	&mov	(&DWP(24,"esp"),"ecx");
    678 	&mov	(&DWP(28,"esp"),"esi");
    679 	&movdqa	($t3,&QWP(256,$K256));
    680 	&jmp	(&label("grand_ssse3"));
    681 
    682 &set_label("grand_ssse3",16);
    683 	# load input, reverse byte order, add K256[0..15], save to stack
    684 	&movdqu	(@X[0],&QWP(0,"edi"));
    685 	&movdqu	(@X[1],&QWP(16,"edi"));
    686 	&movdqu	(@X[2],&QWP(32,"edi"));
    687 	&movdqu	(@X[3],&QWP(48,"edi"));
    688 	&add	("edi",64);
    689 	&pshufb	(@X[0],$t3);
    690 	&mov	(&DWP(96+4,"esp"),"edi");
    691 	&pshufb	(@X[1],$t3);
    692 	&movdqa	($t0,&QWP(0,$K256));
    693 	&pshufb	(@X[2],$t3);
    694 	&movdqa	($t1,&QWP(16,$K256));
    695 	&paddd	($t0,@X[0]);
    696 	&pshufb	(@X[3],$t3);
    697 	&movdqa	($t2,&QWP(32,$K256));
    698 	&paddd	($t1,@X[1]);
    699 	&movdqa	($t3,&QWP(48,$K256));
    700 	&movdqa	(&QWP(32+0,"esp"),$t0);
    701 	&paddd	($t2,@X[2]);
    702 	&movdqa	(&QWP(32+16,"esp"),$t1);
    703 	&paddd	($t3,@X[3]);
    704 	&movdqa	(&QWP(32+32,"esp"),$t2);
    705 	&movdqa	(&QWP(32+48,"esp"),$t3);
    706 	&jmp	(&label("ssse3_00_47"));
    707 
    708 &set_label("ssse3_00_47",16);
    709 	&add		($K256,64);
    710 
    711 sub SSSE3_00_47 () {
    712 my $j = shift;
    713 my $body = shift;
    714 my @X = @_;
    715 my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
    716 
    717 	  eval(shift(@insns));
    718 	&movdqa		($t0,@X[1]);
    719 	  eval(shift(@insns));			# @
    720 	  eval(shift(@insns));
    721 	&movdqa		($t3,@X[3]);
    722 	  eval(shift(@insns));
    723 	  eval(shift(@insns));
    724 	&palignr	($t0,@X[0],4);		# X[1..4]
    725 	  eval(shift(@insns));
    726 	  eval(shift(@insns));			# @
    727 	  eval(shift(@insns));
    728 	 &palignr	($t3,@X[2],4);		# X[9..12]
    729 	  eval(shift(@insns));
    730 	  eval(shift(@insns));
    731 	  eval(shift(@insns));
    732 	&movdqa		($t1,$t0);
    733 	  eval(shift(@insns));			# @
    734 	  eval(shift(@insns));
    735 	&movdqa		($t2,$t0);
    736 	  eval(shift(@insns));
    737 	  eval(shift(@insns));
    738 	&psrld		($t0,3);
    739 	  eval(shift(@insns));
    740 	  eval(shift(@insns));			# @
    741 	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
    742 	  eval(shift(@insns));
    743 	  eval(shift(@insns));
    744 	&psrld		($t2,7);
    745 	  eval(shift(@insns));
    746 	  eval(shift(@insns));
    747 	  eval(shift(@insns));			# @
    748 	  eval(shift(@insns));
    749 	 &pshufd	($t3,@X[3],0b11111010);	# X[14..15]
    750 	  eval(shift(@insns));
    751 	  eval(shift(@insns));
    752 	&pslld		($t1,32-18);
    753 	  eval(shift(@insns));
    754 	  eval(shift(@insns));			# @
    755 	&pxor		($t0,$t2);
    756 	  eval(shift(@insns));
    757 	  eval(shift(@insns));
    758 	&psrld		($t2,18-7);
    759 	  eval(shift(@insns));
    760 	  eval(shift(@insns));
    761 	  eval(shift(@insns));			# @
    762 	&pxor		($t0,$t1);
    763 	  eval(shift(@insns));
    764 	  eval(shift(@insns));
    765 	&pslld		($t1,18-7);
    766 	  eval(shift(@insns));
    767 	  eval(shift(@insns));
    768 	  eval(shift(@insns));			# @
    769 	&pxor		($t0,$t2);
    770 	  eval(shift(@insns));
    771 	  eval(shift(@insns));
    772 	 &movdqa	($t2,$t3);
    773 	  eval(shift(@insns));
    774 	  eval(shift(@insns));
    775 	  eval(shift(@insns));			# @
    776 	&pxor		($t0,$t1);		# sigma0(X[1..4])
    777 	  eval(shift(@insns));
    778 	  eval(shift(@insns));
    779 	 &psrld		($t3,10);
    780 	  eval(shift(@insns));
    781 	  eval(shift(@insns));
    782 	  eval(shift(@insns));			# @
    783 	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
    784 	  eval(shift(@insns));
    785 	  eval(shift(@insns));
    786 	 &psrlq		($t2,17);
    787 	  eval(shift(@insns));
    788 	  eval(shift(@insns));
    789 	  eval(shift(@insns));			# @
    790 	 &pxor		($t3,$t2);
    791 	  eval(shift(@insns));
    792 	  eval(shift(@insns));
    793 	 &psrlq		($t2,19-17);
    794 	  eval(shift(@insns));
    795 	  eval(shift(@insns));
    796 	  eval(shift(@insns));			# @
    797 	 &pxor		($t3,$t2);
    798 	  eval(shift(@insns));
    799 	  eval(shift(@insns));
    800 	 &pshufd	($t3,$t3,0b10000000);
    801 	  eval(shift(@insns));
    802 	  eval(shift(@insns));
    803 	  eval(shift(@insns));			# @
    804 	  eval(shift(@insns));
    805 	  eval(shift(@insns));
    806 	  eval(shift(@insns));
    807 	  eval(shift(@insns));
    808 	  eval(shift(@insns));			# @
    809 	  eval(shift(@insns));
    810 	 &psrldq	($t3,8);
    811 	  eval(shift(@insns));
    812 	  eval(shift(@insns));
    813 	  eval(shift(@insns));
    814 	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
    815 	  eval(shift(@insns));			# @
    816 	  eval(shift(@insns));
    817 	  eval(shift(@insns));
    818 	  eval(shift(@insns));
    819 	  eval(shift(@insns));
    820 	  eval(shift(@insns));			# @
    821 	  eval(shift(@insns));
    822 	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
    823 	  eval(shift(@insns));
    824 	  eval(shift(@insns));
    825 	  eval(shift(@insns));
    826 	 &movdqa	($t2,$t3);
    827 	  eval(shift(@insns));			# @
    828 	 &psrld		($t3,10);
    829 	  eval(shift(@insns));
    830 	 &psrlq		($t2,17);
    831 	  eval(shift(@insns));
    832 	  eval(shift(@insns));
    833 	  eval(shift(@insns));
    834 	  eval(shift(@insns));			# @
    835 	 &pxor		($t3,$t2);
    836 	  eval(shift(@insns));
    837 	  eval(shift(@insns));
    838 	 &psrlq		($t2,19-17);
    839 	  eval(shift(@insns));
    840 	  eval(shift(@insns));
    841 	  eval(shift(@insns));			# @
    842 	 &pxor		($t3,$t2);
    843 	  eval(shift(@insns));
    844 	  eval(shift(@insns));
    845 	  eval(shift(@insns));
    846 	 &pshufd	($t3,$t3,0b00001000);
    847 	  eval(shift(@insns));
    848 	  eval(shift(@insns));			# @
    849 	&movdqa		($t2,&QWP(16*$j,$K256));
    850 	  eval(shift(@insns));
    851 	  eval(shift(@insns));
    852 	 &pslldq	($t3,8);
    853 	  eval(shift(@insns));
    854 	  eval(shift(@insns));
    855 	  eval(shift(@insns));			# @
    856 	  eval(shift(@insns));
    857 	  eval(shift(@insns));
    858 	  eval(shift(@insns));
    859 	  eval(shift(@insns));
    860 	  eval(shift(@insns));			# @
    861 	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
    862 	  eval(shift(@insns));
    863 	  eval(shift(@insns));
    864 	  eval(shift(@insns));
    865 	  eval(shift(@insns));
    866 	&paddd		($t2,@X[0]);
    867 	  eval(shift(@insns));			# @
    868 
    869 	foreach (@insns) { eval; }		# remaining instructions
    870 
    871 	&movdqa		(&QWP(32+16*$j,"esp"),$t2);
    872 }
    873 
    874 sub body_00_15 () {
    875 	(
    876 	'&mov	("ecx",$E);',
    877 	'&ror	($E,25-11);',
    878 	 '&mov	("esi",&off($f));',
    879 	'&xor	($E,"ecx");',
    880 	 '&mov	("edi",&off($g));',
    881 	 '&xor	("esi","edi");',
    882 	'&ror	($E,11-6);',
    883 	 '&and	("esi","ecx");',
    884 	 '&mov	(&off($e),"ecx");',	# save $E, modulo-scheduled
    885 	'&xor	($E,"ecx");',
    886 	 '&xor	("edi","esi");',	# Ch(e,f,g)
    887 	'&ror	($E,6);',		# T = Sigma1(e)
    888 	 '&mov	("ecx",$AH[0]);',
    889 	 '&add	($E,"edi");',		# T += Ch(e,f,g)
    890 	 '&mov	("edi",&off($b));',
    891 	'&mov	("esi",$AH[0]);',
    892 
    893 	'&ror	("ecx",22-13);',
    894 	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
    895 	'&xor	("ecx",$AH[0]);',
    896 	 '&xor	($AH[0],"edi");',	# a ^= b, (b^c) in next round
    897 	 '&add	($E,&off($h));',	# T += h
    898 	'&ror	("ecx",13-2);',
    899 	 '&and	($AH[1],$AH[0]);',	# (b^c) &= (a^b)
    900 	'&xor	("ecx","esi");',
    901 	 '&add	($E,&DWP(32+4*($i&15),"esp"));',	# T += K[i]+X[i]
    902 	 '&xor	($AH[1],"edi");',	# h = Maj(a,b,c) = Ch(a^b,c,b)
    903 	'&ror	("ecx",2);',		# Sigma0(a)
    904 
    905 	 '&add	($AH[1],$E);',		# h += T
    906 	 '&add	($E,&off($d));',	# d += T
    907 	'&add	($AH[1],"ecx");'.	# h += Sigma0(a)
    908 
    909 	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
    910 	);
    911 }
    912 
    913     for ($i=0,$j=0; $j<4; $j++) {
    914 	&SSSE3_00_47($j,\&body_00_15,@X);
    915 	push(@X,shift(@X));		# rotate(@X)
    916     }
    917 	&cmp	(&DWP(16*$j,$K256),0x00010203);
    918 	&jne	(&label("ssse3_00_47"));
    919 
    920     for ($i=0; $i<16; ) {
    921 	foreach(body_00_15()) { eval; }
    922     }
    923 
    924 	&mov	("esi",&DWP(96,"esp"));	#ctx
    925 					#&mov	($AH[0],&DWP(0,"esp"));
    926 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
    927 					#&mov	("edi", &DWP(8,"esp"));
    928 	&mov	("ecx",&DWP(12,"esp"));
    929 	&add	($AH[0],&DWP(0,"esi"));
    930 	&add	($AH[1],&DWP(4,"esi"));
    931 	&add	("edi",&DWP(8,"esi"));
    932 	&add	("ecx",&DWP(12,"esi"));
    933 	&mov	(&DWP(0,"esi"),$AH[0]);
    934 	&mov	(&DWP(4,"esi"),$AH[1]);
    935 	&mov	(&DWP(8,"esi"),"edi");
    936 	&mov	(&DWP(12,"esi"),"ecx");
    937 	 #&mov	(&DWP(0,"esp"),$AH[0]);
    938 	 &mov	(&DWP(4,"esp"),$AH[1]);
    939 	 &xor	($AH[1],"edi");			# magic
    940 	 &mov	(&DWP(8,"esp"),"edi");
    941 	 &mov	(&DWP(12,"esp"),"ecx");
    942 	#&mov	($E,&DWP(16,"esp"));
    943 	&mov	("edi",&DWP(20,"esp"));
    944 	&mov	("ecx",&DWP(24,"esp"));
    945 	&add	($E,&DWP(16,"esi"));
    946 	&add	("edi",&DWP(20,"esi"));
    947 	&add	("ecx",&DWP(24,"esi"));
    948 	&mov	(&DWP(16,"esi"),$E);
    949 	&mov	(&DWP(20,"esi"),"edi");
    950 	 &mov	(&DWP(20,"esp"),"edi");
    951 	&mov	("edi",&DWP(28,"esp"));
    952 	&mov	(&DWP(24,"esi"),"ecx");
    953 	 #&mov	(&DWP(16,"esp"),$E);
    954 	&add	("edi",&DWP(28,"esi"));
    955 	 &mov	(&DWP(24,"esp"),"ecx");
    956 	&mov	(&DWP(28,"esi"),"edi");
    957 	 &mov	(&DWP(28,"esp"),"edi");
    958 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    959 
    960 	&movdqa	($t3,&QWP(64,$K256));
    961 	&sub	($K256,3*64);			# rewind K
    962 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
    963 	&jb	(&label("grand_ssse3"));
    964 
    965 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
    966 &function_end_A();
    967 						if ($avx) {
    968 &set_label("AVX",32);
    969 						if ($avx>1) {
    970 	&and	("edx",1<<8|1<<3);		# check for BMI2+BMI1
    971 	&cmp	("edx",1<<8|1<<3);
    972 	&je	(&label("AVX_BMI"));
    973 						}
    974 	&lea	("esp",&DWP(-96,"esp"));
    975 	&vzeroall	();
    976 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    977 	&mov	($AH[0],&DWP(0,"esi"));
    978 	&mov	($AH[1],&DWP(4,"esi"));
    979 	&mov	("ecx",&DWP(8,"esi"));
    980 	&mov	("edi",&DWP(12,"esi"));
    981 	#&mov	(&DWP(0,"esp"),$AH[0]);
    982 	&mov	(&DWP(4,"esp"),$AH[1]);
    983 	&xor	($AH[1],"ecx");			# magic
    984 	&mov	(&DWP(8,"esp"),"ecx");
    985 	&mov	(&DWP(12,"esp"),"edi");
    986 	&mov	($E,&DWP(16,"esi"));
    987 	&mov	("edi",&DWP(20,"esi"));
    988 	&mov	("ecx",&DWP(24,"esi"));
    989 	&mov	("esi",&DWP(28,"esi"));
    990 	#&mov	(&DWP(16,"esp"),$E);
    991 	&mov	(&DWP(20,"esp"),"edi");
    992 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    993 	&mov	(&DWP(24,"esp"),"ecx");
    994 	&mov	(&DWP(28,"esp"),"esi");
    995 	&vmovdqa	($t3,&QWP(256,$K256));
    996 	&jmp	(&label("grand_avx"));
    997 
    998 &set_label("grand_avx",32);
    999 	# load input, reverse byte order, add K256[0..15], save to stack
   1000 	&vmovdqu	(@X[0],&QWP(0,"edi"));
   1001 	&vmovdqu	(@X[1],&QWP(16,"edi"));
   1002 	&vmovdqu	(@X[2],&QWP(32,"edi"));
   1003 	&vmovdqu	(@X[3],&QWP(48,"edi"));
   1004 	&add		("edi",64);
   1005 	&vpshufb	(@X[0],@X[0],$t3);
   1006 	&mov		(&DWP(96+4,"esp"),"edi");
   1007 	&vpshufb	(@X[1],@X[1],$t3);
   1008 	&vpshufb	(@X[2],@X[2],$t3);
   1009 	&vpaddd		($t0,@X[0],&QWP(0,$K256));
   1010 	&vpshufb	(@X[3],@X[3],$t3);
   1011 	&vpaddd		($t1,@X[1],&QWP(16,$K256));
   1012 	&vpaddd		($t2,@X[2],&QWP(32,$K256));
   1013 	&vpaddd		($t3,@X[3],&QWP(48,$K256));
   1014 	&vmovdqa	(&QWP(32+0,"esp"),$t0);
   1015 	&vmovdqa	(&QWP(32+16,"esp"),$t1);
   1016 	&vmovdqa	(&QWP(32+32,"esp"),$t2);
   1017 	&vmovdqa	(&QWP(32+48,"esp"),$t3);
   1018 	&jmp		(&label("avx_00_47"));
   1019 
   1020 &set_label("avx_00_47",16);
   1021 	&add		($K256,64);
   1022 
   1023 sub Xupdate_AVX () {
   1024 	(
   1025 	'&vpalignr	($t0,@X[1],@X[0],4);',	# X[1..4]
   1026 	 '&vpalignr	($t3,@X[3],@X[2],4);',	# X[9..12]
   1027 	'&vpsrld	($t2,$t0,7);',
   1028 	 '&vpaddd	(@X[0],@X[0],$t3);',	# X[0..3] += X[9..16]
   1029 	'&vpsrld	($t3,$t0,3);',
   1030 	'&vpslld	($t1,$t0,14);',
   1031 	'&vpxor		($t0,$t3,$t2);',
   1032 	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
   1033 	'&vpsrld	($t2,$t2,18-7);',
   1034 	'&vpxor		($t0,$t0,$t1);',
   1035 	'&vpslld	($t1,$t1,25-14);',
   1036 	'&vpxor		($t0,$t0,$t2);',
   1037 	 '&vpsrld	($t2,$t3,10);',
   1038 	'&vpxor		($t0,$t0,$t1);',	# sigma0(X[1..4])
   1039 	 '&vpsrlq	($t1,$t3,17);',
   1040 	'&vpaddd	(@X[0],@X[0],$t0);',	# X[0..3] += sigma0(X[1..4])
   1041 	 '&vpxor	($t2,$t2,$t1);',
   1042 	 '&vpsrlq	($t3,$t3,19);',
   1043 	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[14..15]
   1044 	 '&vpshufd	($t3,$t2,0b10000100);',
   1045 	'&vpsrldq	($t3,$t3,8);',
   1046 	'&vpaddd	(@X[0],@X[0],$t3);',	# X[0..1] += sigma1(X[14..15])
   1047 	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
   1048 	 '&vpsrld	($t2,$t3,10);',
   1049 	 '&vpsrlq	($t1,$t3,17);',
   1050 	 '&vpxor	($t2,$t2,$t1);',
   1051 	 '&vpsrlq	($t3,$t3,19);',
   1052 	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[16..17]
   1053 	 '&vpshufd	($t3,$t2,0b11101000);',
   1054 	'&vpslldq	($t3,$t3,8);',
   1055 	'&vpaddd	(@X[0],@X[0],$t3);'	# X[2..3] += sigma1(X[16..17])
   1056 	);
   1057 }
   1058 
   1059 local *ror = sub { &shrd(@_[0],@_) };
   1060 sub AVX_00_47 () {
   1061 my $j = shift;
   1062 my $body = shift;
   1063 my @X = @_;
   1064 my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
   1065 my $insn;
   1066 
   1067 	foreach (Xupdate_AVX()) {		# 31 instructions
   1068 	    eval;
   1069 	    eval(shift(@insns));
   1070 	    eval(shift(@insns));
   1071 	    eval($insn = shift(@insns));
   1072 	    eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
   1073 	}
   1074 	&vpaddd		($t2,@X[0],&QWP(16*$j,$K256));
   1075 	foreach (@insns) { eval; }		# remaining instructions
   1076 	&vmovdqa	(&QWP(32+16*$j,"esp"),$t2);
   1077 }
   1078 
   1079     for ($i=0,$j=0; $j<4; $j++) {
   1080 	&AVX_00_47($j,\&body_00_15,@X);
   1081 	push(@X,shift(@X));		# rotate(@X)
   1082     }
   1083 	&cmp	(&DWP(16*$j,$K256),0x00010203);
   1084 	&jne	(&label("avx_00_47"));
   1085 
   1086     for ($i=0; $i<16; ) {
   1087 	foreach(body_00_15()) { eval; }
   1088     }
   1089 
   1090 	&mov	("esi",&DWP(96,"esp"));	#ctx
   1091 					#&mov	($AH[0],&DWP(0,"esp"));
   1092 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
   1093 					#&mov	("edi", &DWP(8,"esp"));
   1094 	&mov	("ecx",&DWP(12,"esp"));
   1095 	&add	($AH[0],&DWP(0,"esi"));
   1096 	&add	($AH[1],&DWP(4,"esi"));
   1097 	&add	("edi",&DWP(8,"esi"));
   1098 	&add	("ecx",&DWP(12,"esi"));
   1099 	&mov	(&DWP(0,"esi"),$AH[0]);
   1100 	&mov	(&DWP(4,"esi"),$AH[1]);
   1101 	&mov	(&DWP(8,"esi"),"edi");
   1102 	&mov	(&DWP(12,"esi"),"ecx");
   1103 	 #&mov	(&DWP(0,"esp"),$AH[0]);
   1104 	 &mov	(&DWP(4,"esp"),$AH[1]);
   1105 	 &xor	($AH[1],"edi");			# magic
   1106 	 &mov	(&DWP(8,"esp"),"edi");
   1107 	 &mov	(&DWP(12,"esp"),"ecx");
   1108 	#&mov	($E,&DWP(16,"esp"));
   1109 	&mov	("edi",&DWP(20,"esp"));
   1110 	&mov	("ecx",&DWP(24,"esp"));
   1111 	&add	($E,&DWP(16,"esi"));
   1112 	&add	("edi",&DWP(20,"esi"));
   1113 	&add	("ecx",&DWP(24,"esi"));
   1114 	&mov	(&DWP(16,"esi"),$E);
   1115 	&mov	(&DWP(20,"esi"),"edi");
   1116 	 &mov	(&DWP(20,"esp"),"edi");
   1117 	&mov	("edi",&DWP(28,"esp"));
   1118 	&mov	(&DWP(24,"esi"),"ecx");
   1119 	 #&mov	(&DWP(16,"esp"),$E);
   1120 	&add	("edi",&DWP(28,"esi"));
   1121 	 &mov	(&DWP(24,"esp"),"ecx");
   1122 	&mov	(&DWP(28,"esi"),"edi");
   1123 	 &mov	(&DWP(28,"esp"),"edi");
   1124 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1125 
   1126 	&vmovdqa	($t3,&QWP(64,$K256));
   1127 	&sub	($K256,3*64);			# rewind K
   1128 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
   1129 	&jb	(&label("grand_avx"));
   1130 
   1131 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
   1132 	&vzeroall	();
   1133 &function_end_A();
   1134 						if ($avx>1) {
   1135 sub bodyx_00_15 () {			# +10%
   1136 	(
   1137 	'&rorx	("ecx",$E,6)',
   1138 	'&rorx	("esi",$E,11)',
   1139 	 '&mov	(&off($e),$E)',		# save $E, modulo-scheduled
   1140 	'&rorx	("edi",$E,25)',
   1141 	'&xor	("ecx","esi")',
   1142 	 '&andn	("esi",$E,&off($g))',
   1143 	'&xor	("ecx","edi")',		# Sigma1(e)
   1144 	 '&and	($E,&off($f))',
   1145 	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
   1146 	 '&or	($E,"esi")',		# T = Ch(e,f,g)
   1147 
   1148 	'&rorx	("edi",$AH[0],2)',
   1149 	'&rorx	("esi",$AH[0],13)',
   1150 	 '&lea	($E,&DWP(0,$E,"ecx"))',	# T += Sigma1(e)
   1151 	'&rorx	("ecx",$AH[0],22)',
   1152 	'&xor	("esi","edi")',
   1153 	 '&mov	("edi",&off($b))',
   1154 	'&xor	("ecx","esi")',		# Sigma0(a)
   1155 
   1156 	 '&xor	($AH[0],"edi")',	# a ^= b, (b^c) in next round
   1157 	 '&add	($E,&off($h))',		# T += h
   1158 	 '&and	($AH[1],$AH[0])',	# (b^c) &= (a^b)
   1159 	 '&add	($E,&DWP(32+4*($i&15),"esp"))',	# T += K[i]+X[i]
   1160 	 '&xor	($AH[1],"edi")',	# h = Maj(a,b,c) = Ch(a^b,c,b)
   1161 
   1162 	 '&add	("ecx",$E)',		# h += T
   1163 	 '&add	($E,&off($d))',		# d += T
   1164 	'&lea	($AH[1],&DWP(0,$AH[1],"ecx"));'.	# h += Sigma0(a)
   1165 
   1166 	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
   1167 	);
   1168 }
   1169 
   1170 &set_label("AVX_BMI",32);
   1171 	&lea	("esp",&DWP(-96,"esp"));
   1172 	&vzeroall	();
   1173 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
   1174 	&mov	($AH[0],&DWP(0,"esi"));
   1175 	&mov	($AH[1],&DWP(4,"esi"));
   1176 	&mov	("ecx",&DWP(8,"esi"));
   1177 	&mov	("edi",&DWP(12,"esi"));
   1178 	#&mov	(&DWP(0,"esp"),$AH[0]);
   1179 	&mov	(&DWP(4,"esp"),$AH[1]);
   1180 	&xor	($AH[1],"ecx");			# magic
   1181 	&mov	(&DWP(8,"esp"),"ecx");
   1182 	&mov	(&DWP(12,"esp"),"edi");
   1183 	&mov	($E,&DWP(16,"esi"));
   1184 	&mov	("edi",&DWP(20,"esi"));
   1185 	&mov	("ecx",&DWP(24,"esi"));
   1186 	&mov	("esi",&DWP(28,"esi"));
   1187 	#&mov	(&DWP(16,"esp"),$E);
   1188 	&mov	(&DWP(20,"esp"),"edi");
   1189 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1190 	&mov	(&DWP(24,"esp"),"ecx");
   1191 	&mov	(&DWP(28,"esp"),"esi");
   1192 	&vmovdqa	($t3,&QWP(256,$K256));
   1193 	&jmp	(&label("grand_avx_bmi"));
   1194 
   1195 &set_label("grand_avx_bmi",32);
   1196 	# load input, reverse byte order, add K256[0..15], save to stack
   1197 	&vmovdqu	(@X[0],&QWP(0,"edi"));
   1198 	&vmovdqu	(@X[1],&QWP(16,"edi"));
   1199 	&vmovdqu	(@X[2],&QWP(32,"edi"));
   1200 	&vmovdqu	(@X[3],&QWP(48,"edi"));
   1201 	&add		("edi",64);
   1202 	&vpshufb	(@X[0],@X[0],$t3);
   1203 	&mov		(&DWP(96+4,"esp"),"edi");
   1204 	&vpshufb	(@X[1],@X[1],$t3);
   1205 	&vpshufb	(@X[2],@X[2],$t3);
   1206 	&vpaddd		($t0,@X[0],&QWP(0,$K256));
   1207 	&vpshufb	(@X[3],@X[3],$t3);
   1208 	&vpaddd		($t1,@X[1],&QWP(16,$K256));
   1209 	&vpaddd		($t2,@X[2],&QWP(32,$K256));
   1210 	&vpaddd		($t3,@X[3],&QWP(48,$K256));
   1211 	&vmovdqa	(&QWP(32+0,"esp"),$t0);
   1212 	&vmovdqa	(&QWP(32+16,"esp"),$t1);
   1213 	&vmovdqa	(&QWP(32+32,"esp"),$t2);
   1214 	&vmovdqa	(&QWP(32+48,"esp"),$t3);
   1215 	&jmp		(&label("avx_bmi_00_47"));
   1216 
   1217 &set_label("avx_bmi_00_47",16);
   1218 	&add		($K256,64);
   1219 
   1220     for ($i=0,$j=0; $j<4; $j++) {
   1221 	&AVX_00_47($j,\&bodyx_00_15,@X);
   1222 	push(@X,shift(@X));		# rotate(@X)
   1223     }
   1224 	&cmp	(&DWP(16*$j,$K256),0x00010203);
   1225 	&jne	(&label("avx_bmi_00_47"));
   1226 
   1227     for ($i=0; $i<16; ) {
   1228 	foreach(bodyx_00_15()) { eval; }
   1229     }
   1230 
   1231 	&mov	("esi",&DWP(96,"esp"));	#ctx
   1232 					#&mov	($AH[0],&DWP(0,"esp"));
   1233 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
   1234 					#&mov	("edi", &DWP(8,"esp"));
   1235 	&mov	("ecx",&DWP(12,"esp"));
   1236 	&add	($AH[0],&DWP(0,"esi"));
   1237 	&add	($AH[1],&DWP(4,"esi"));
   1238 	&add	("edi",&DWP(8,"esi"));
   1239 	&add	("ecx",&DWP(12,"esi"));
   1240 	&mov	(&DWP(0,"esi"),$AH[0]);
   1241 	&mov	(&DWP(4,"esi"),$AH[1]);
   1242 	&mov	(&DWP(8,"esi"),"edi");
   1243 	&mov	(&DWP(12,"esi"),"ecx");
   1244 	 #&mov	(&DWP(0,"esp"),$AH[0]);
   1245 	 &mov	(&DWP(4,"esp"),$AH[1]);
   1246 	 &xor	($AH[1],"edi");			# magic
   1247 	 &mov	(&DWP(8,"esp"),"edi");
   1248 	 &mov	(&DWP(12,"esp"),"ecx");
   1249 	#&mov	($E,&DWP(16,"esp"));
   1250 	&mov	("edi",&DWP(20,"esp"));
   1251 	&mov	("ecx",&DWP(24,"esp"));
   1252 	&add	($E,&DWP(16,"esi"));
   1253 	&add	("edi",&DWP(20,"esi"));
   1254 	&add	("ecx",&DWP(24,"esi"));
   1255 	&mov	(&DWP(16,"esi"),$E);
   1256 	&mov	(&DWP(20,"esi"),"edi");
   1257 	 &mov	(&DWP(20,"esp"),"edi");
   1258 	&mov	("edi",&DWP(28,"esp"));
   1259 	&mov	(&DWP(24,"esi"),"ecx");
   1260 	 #&mov	(&DWP(16,"esp"),$E);
   1261 	&add	("edi",&DWP(28,"esi"));
   1262 	 &mov	(&DWP(24,"esp"),"ecx");
   1263 	&mov	(&DWP(28,"esi"),"edi");
   1264 	 &mov	(&DWP(28,"esp"),"edi");
   1265 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1266 
   1267 	&vmovdqa	($t3,&QWP(64,$K256));
   1268 	&sub	($K256,3*64);			# rewind K
   1269 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
   1270 	&jb	(&label("grand_avx_bmi"));
   1271 
   1272 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
   1273 	&vzeroall	();
   1274 &function_end_A();
   1275 						}
   1276 						}
   1277 						}}}
   1278 &function_end_B("sha256_block_data_order");
   1279 
   1280 &asm_finish();
   1281