Home | History | Annotate | Download | only in asm
      1 #! /usr/bin/env perl
      2 # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
      3 #
      4 # Licensed under the OpenSSL license (the "License").  You may not use
      5 # this file except in compliance with the License.  You can obtain a copy
      6 # in the file LICENSE in the source distribution or at
      7 # https://www.openssl.org/source/license.html
      8 
      9 #
     10 # ====================================================================
     11 # Written by Andy Polyakov <appro (at] openssl.org> for the OpenSSL
     12 # project. The module is, however, dual licensed under OpenSSL and
     13 # CRYPTOGAMS licenses depending on where you obtain it. For further
     14 # details see http://www.openssl.org/~appro/cryptogams/.
     15 # ====================================================================
     16 #
     17 # SHA256 block transform for x86. September 2007.
     18 #
     19 # Performance improvement over compiler generated code varies from
     20 # 10% to 40% [see below]. Not very impressive on some -archs, but
     21 # it's 5 times smaller and optimizes amount of writes.
     22 #
     23 # May 2012.
     24 #
     25 # Optimization including two of Pavel Semjanov's ideas, alternative
     26 # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
     27 # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
     28 # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
     29 # on P4, where it kills performance, nor Sandy Bridge, where folded
     30 # loop is approximately as fast...
     31 #
     32 # June 2012.
     33 #
     34 # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
     35 # May version, >60% over original. Add AVX+shrd code path, >25%
     36 # improvement on Sandy Bridge over May version, 60% over original.
     37 #
     38 # May 2013.
     39 #
     40 # Replace AMD XOP code path with SSSE3 to cover more processors.
     41 # (Biggest improvement coefficient is on upcoming Atom Silvermont,
     42 # not shown.) Add AVX+BMI code path.
     43 #
     44 # March 2014.
     45 #
     46 # Add support for Intel SHA Extensions.
     47 #
     48 # Performance in clock cycles per processed byte (less is better):
     49 #
     50 #		gcc	icc	x86 asm(*)	SIMD	x86_64 asm(**)
     51 # Pentium	46	57	40/38		-	-
     52 # PIII		36	33	27/24		-	-
     53 # P4		41	38	28		-	17.3
     54 # AMD K8	27	25	19/15.5		-	14.9
     55 # Core2		26	23	18/15.6		14.3	13.8
     56 # Westmere	27	-	19/15.7		13.4	12.3
     57 # Sandy Bridge	25	-	15.9		12.4	11.6
     58 # Ivy Bridge	24	-	15.0		11.4	10.3
     59 # Haswell	22	-	13.9		9.46	7.80
     60 # Skylake	20	-	14.9		9.50	7.70
     61 # Bulldozer	36	-	27/22		17.0	13.6
     62 # VIA Nano	36	-	25/22		16.8	16.5
     63 # Atom		50	-	30/25		21.9	18.9
     64 # Silvermont	40	-	34/31		22.9	20.6
     65 # Goldmont	29	-	20		16.3(***)
     66 #
     67 # (*)	numbers after slash are for unrolled loop, where applicable;
     68 # (**)	x86_64 assembly performance is presented for reference
     69 #	purposes, results are best-available;
     70 # (***)	SHAEXT result is 4.1, strangely enough better than 64-bit one;
     71 
     72 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
     73 push(@INC,"${dir}","${dir}../../../perlasm");
     74 require "x86asm.pl";
     75 
     76 $output=pop;
     77 open STDOUT,">$output";
     78 
     79 &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
     80 
     81 $xmm=$avx=0;
     82 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
     83 
     84 # In upstream, this is controlled by shelling out to the compiler to check
     85 # versions, but BoringSSL is intended to be used with pre-generated perlasm
     86 # output, so this isn't useful anyway.
     87 #
     88 # TODO(davidben): Enable AVX2 code after testing by setting $avx to 2.
     89 $avx = 1;
     90 
     91 $avx = 0 unless ($xmm);
     92 
     93 $shaext=$xmm;	### set to zero if compiling for 1.0.1
     94 
     95 # TODO(davidben): Consider enabling the Intel SHA Extensions code once it's
     96 # been tested.
     97 $shaext = 0;
     98 
     99 $unroll_after = 64*4;	# If pre-evicted from L1P cache first spin of
    100 			# fully unrolled loop was measured to run about
    101 			# 3-4x slower. If slowdown coefficient is N and
    102 			# unrolled loop is m times faster, then you break
    103 			# even at (N-1)/(m-1) blocks. Then it needs to be
    104 			# adjusted for probability of code being evicted,
    105 			# code size/cache size=1/4. Typical m is 1.15...
    106 
    107 $A="eax";
    108 $E="edx";
    109 $T="ebx";
    110 $Aoff=&DWP(4,"esp");
    111 $Boff=&DWP(8,"esp");
    112 $Coff=&DWP(12,"esp");
    113 $Doff=&DWP(16,"esp");
    114 $Eoff=&DWP(20,"esp");
    115 $Foff=&DWP(24,"esp");
    116 $Goff=&DWP(28,"esp");
    117 $Hoff=&DWP(32,"esp");
    118 $Xoff=&DWP(36,"esp");
    119 $K256="ebp";
    120 
    121 sub BODY_16_63() {
    122 	&mov	($T,"ecx");			# "ecx" is preloaded
    123 	 &mov	("esi",&DWP(4*(9+15+16-14),"esp"));
    124 	&ror	("ecx",18-7);
    125 	 &mov	("edi","esi");
    126 	&ror	("esi",19-17);
    127 	 &xor	("ecx",$T);
    128 	 &shr	($T,3);
    129 	&ror	("ecx",7);
    130 	 &xor	("esi","edi");
    131 	 &xor	($T,"ecx");			# T = sigma0(X[-15])
    132 	&ror	("esi",17);
    133 	 &add	($T,&DWP(4*(9+15+16),"esp"));	# T += X[-16]
    134 	&shr	("edi",10);
    135 	 &add	($T,&DWP(4*(9+15+16-9),"esp"));	# T += X[-7]
    136 	#&xor	("edi","esi")			# sigma1(X[-2])
    137 	# &add	($T,"edi");			# T += sigma1(X[-2])
    138 	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
    139 
    140 	&BODY_00_15(1);
    141 }
    142 sub BODY_00_15() {
    143     my $in_16_63=shift;
    144 
    145 	&mov	("ecx",$E);
    146 	 &xor	("edi","esi")			if ($in_16_63);	# sigma1(X[-2])
    147 	 &mov	("esi",$Foff);
    148 	&ror	("ecx",25-11);
    149 	 &add	($T,"edi")			if ($in_16_63);	# T += sigma1(X[-2])
    150 	 &mov	("edi",$Goff);
    151 	&xor	("ecx",$E);
    152 	 &xor	("esi","edi");
    153 	 &mov	($T,&DWP(4*(9+15),"esp"))	if (!$in_16_63);
    154 	 &mov	(&DWP(4*(9+15),"esp"),$T)	if ($in_16_63);	# save X[0]
    155 	&ror	("ecx",11-6);
    156 	 &and	("esi",$E);
    157 	 &mov	($Eoff,$E);		# modulo-scheduled
    158 	&xor	($E,"ecx");
    159 	 &add	($T,$Hoff);		# T += h
    160 	 &xor	("esi","edi");		# Ch(e,f,g)
    161 	&ror	($E,6);			# Sigma1(e)
    162 	 &mov	("ecx",$A);
    163 	 &add	($T,"esi");		# T += Ch(e,f,g)
    164 
    165 	&ror	("ecx",22-13);
    166 	 &add	($T,$E);		# T += Sigma1(e)
    167 	 &mov	("edi",$Boff);
    168 	&xor	("ecx",$A);
    169 	 &mov	($Aoff,$A);		# modulo-scheduled
    170 	 &lea	("esp",&DWP(-4,"esp"));
    171 	&ror	("ecx",13-2);
    172 	 &mov	("esi",&DWP(0,$K256));
    173 	&xor	("ecx",$A);
    174 	 &mov	($E,$Eoff);		# e in next iteration, d in this one
    175 	 &xor	($A,"edi");		# a ^= b
    176 	&ror	("ecx",2);		# Sigma0(a)
    177 
    178 	 &add	($T,"esi");		# T+= K[i]
    179 	 &mov	(&DWP(0,"esp"),$A);	# (b^c) in next round
    180 	&add	($E,$T);		# d += T
    181 	 &and	($A,&DWP(4,"esp"));	# a &= (b^c)
    182 	&add	($T,"ecx");		# T += Sigma0(a)
    183 	 &xor	($A,"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
    184 	 &mov	("ecx",&DWP(4*(9+15+16-1),"esp"))	if ($in_16_63);	# preload T
    185 	&add	($K256,4);
    186 	 &add	($A,$T);		# h += T
    187 }
    188 
    189 &external_label("OPENSSL_ia32cap_P")		if (!$i386);
    190 
    191 &function_begin("sha256_block_data_order");
    192 	&mov	("esi",wparam(0));	# ctx
    193 	&mov	("edi",wparam(1));	# inp
    194 	&mov	("eax",wparam(2));	# num
    195 	&mov	("ebx","esp");		# saved sp
    196 
    197 	&call	(&label("pic_point"));	# make it PIC!
    198 &set_label("pic_point");
    199 	&blindpop($K256);
    200 	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
    201 
    202 	&sub	("esp",16);
    203 	&and	("esp",-64);
    204 
    205 	&shl	("eax",6);
    206 	&add	("eax","edi");
    207 	&mov	(&DWP(0,"esp"),"esi");	# ctx
    208 	&mov	(&DWP(4,"esp"),"edi");	# inp
    209 	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
    210 	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
    211 						if (!$i386 && $xmm) {
    212 	&picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
    213 	&mov	("ecx",&DWP(0,"edx"));
    214 	&mov	("ebx",&DWP(4,"edx"));
    215 	&test	("ecx",1<<20);		# check for P4
    216 	&jnz	(&label("loop"));
    217 	&mov	("edx",&DWP(8,"edx"))	if ($xmm);
    218 	&test	("ecx",1<<24);		# check for FXSR
    219 	&jz	($unroll_after?&label("no_xmm"):&label("loop"));
    220 	&and	("ecx",1<<30);		# mask "Intel CPU" bit
    221 	&and	("ebx",1<<28|1<<9);	# mask AVX and SSSE3 bits
    222 	&test	("edx",1<<29)		if ($shaext);	# check for SHA
    223 	&jnz	(&label("shaext"))	if ($shaext);
    224 	&or	("ecx","ebx");
    225 	&and	("ecx",1<<28|1<<30);
    226 	&cmp	("ecx",1<<28|1<<30);
    227 					if ($xmm) {
    228 	&je	(&label("AVX"))		if ($avx);
    229 	&test	("ebx",1<<9);		# check for SSSE3
    230 	&jnz	(&label("SSSE3"));
    231 					} else {
    232 	&je	(&label("loop_shrd"));
    233 					}
    234 						if ($unroll_after) {
    235 &set_label("no_xmm");
    236 	&sub	("eax","edi");
    237 	&cmp	("eax",$unroll_after);
    238 	&jae	(&label("unrolled"));
    239 						} }
    240 	&jmp	(&label("loop"));
    241 
    242 sub COMPACT_LOOP() {
    243 my $suffix=shift;
    244 
    245 &set_label("loop$suffix",$suffix?32:16);
    246     # copy input block to stack reversing byte and dword order
    247     for($i=0;$i<4;$i++) {
    248 	&mov	("eax",&DWP($i*16+0,"edi"));
    249 	&mov	("ebx",&DWP($i*16+4,"edi"));
    250 	&mov	("ecx",&DWP($i*16+8,"edi"));
    251 	&bswap	("eax");
    252 	&mov	("edx",&DWP($i*16+12,"edi"));
    253 	&bswap	("ebx");
    254 	&push	("eax");
    255 	&bswap	("ecx");
    256 	&push	("ebx");
    257 	&bswap	("edx");
    258 	&push	("ecx");
    259 	&push	("edx");
    260     }
    261 	&add	("edi",64);
    262 	&lea	("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
    263 	&mov	(&DWP(4*(9+16)+4,"esp"),"edi");
    264 
    265 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    266 	&mov	($A,&DWP(0,"esi"));
    267 	&mov	("ebx",&DWP(4,"esi"));
    268 	&mov	("ecx",&DWP(8,"esi"));
    269 	&mov	("edi",&DWP(12,"esi"));
    270 	# &mov	($Aoff,$A);
    271 	&mov	($Boff,"ebx");
    272 	&xor	("ebx","ecx");
    273 	&mov	($Coff,"ecx");
    274 	&mov	($Doff,"edi");
    275 	&mov	(&DWP(0,"esp"),"ebx");	# magic
    276 	&mov	($E,&DWP(16,"esi"));
    277 	&mov	("ebx",&DWP(20,"esi"));
    278 	&mov	("ecx",&DWP(24,"esi"));
    279 	&mov	("edi",&DWP(28,"esi"));
    280 	# &mov	($Eoff,$E);
    281 	&mov	($Foff,"ebx");
    282 	&mov	($Goff,"ecx");
    283 	&mov	($Hoff,"edi");
    284 
    285 &set_label("00_15$suffix",16);
    286 
    287 	&BODY_00_15();
    288 
    289 	&cmp	("esi",0xc19bf174);
    290 	&jne	(&label("00_15$suffix"));
    291 
    292 	&mov	("ecx",&DWP(4*(9+15+16-1),"esp"));	# preloaded in BODY_00_15(1)
    293 	&jmp	(&label("16_63$suffix"));
    294 
    295 &set_label("16_63$suffix",16);
    296 
    297 	&BODY_16_63();
    298 
    299 	&cmp	("esi",0xc67178f2);
    300 	&jne	(&label("16_63$suffix"));
    301 
    302 	&mov	("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
    303 	# &mov	($A,$Aoff);
    304 	&mov	("ebx",$Boff);
    305 	# &mov	("edi",$Coff);
    306 	&mov	("ecx",$Doff);
    307 	&add	($A,&DWP(0,"esi"));
    308 	&add	("ebx",&DWP(4,"esi"));
    309 	&add	("edi",&DWP(8,"esi"));
    310 	&add	("ecx",&DWP(12,"esi"));
    311 	&mov	(&DWP(0,"esi"),$A);
    312 	&mov	(&DWP(4,"esi"),"ebx");
    313 	&mov	(&DWP(8,"esi"),"edi");
    314 	&mov	(&DWP(12,"esi"),"ecx");
    315 	# &mov	($E,$Eoff);
    316 	&mov	("eax",$Foff);
    317 	&mov	("ebx",$Goff);
    318 	&mov	("ecx",$Hoff);
    319 	&mov	("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
    320 	&add	($E,&DWP(16,"esi"));
    321 	&add	("eax",&DWP(20,"esi"));
    322 	&add	("ebx",&DWP(24,"esi"));
    323 	&add	("ecx",&DWP(28,"esi"));
    324 	&mov	(&DWP(16,"esi"),$E);
    325 	&mov	(&DWP(20,"esi"),"eax");
    326 	&mov	(&DWP(24,"esi"),"ebx");
    327 	&mov	(&DWP(28,"esi"),"ecx");
    328 
    329 	&lea	("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
    330 	&sub	($K256,4*64);			# rewind K
    331 
    332 	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
    333 	&jb	(&label("loop$suffix"));
    334 }
    335 	&COMPACT_LOOP();
    336 	&mov	("esp",&DWP(12,"esp"));		# restore sp
    337 &function_end_A();
    338 						if (!$i386 && !$xmm) {
    339 	# ~20% improvement on Sandy Bridge
    340 	local *ror = sub { &shrd(@_[0],@_) };
    341 	&COMPACT_LOOP("_shrd");
    342 	&mov	("esp",&DWP(12,"esp"));		# restore sp
    343 &function_end_A();
    344 						}
    345 
    346 &set_label("K256",64);	# Yes! I keep it in the code segment!
    347 @K256=(	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
    348 	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
    349 	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
    350 	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
    351 	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
    352 	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
    353 	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
    354 	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
    355 	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
    356 	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
    357 	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
    358 	0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
    359 	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
    360 	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
    361 	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
    362 	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2	);
    363 &data_word(@K256);
    364 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# byte swap mask
    365 &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
    366 
    367 ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7);	# offsets
    368 sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
    369 
    370 if (!$i386 && $unroll_after) {
    371 my @AH=($A,$K256);
    372 
    373 &set_label("unrolled",16);
    374 	&lea	("esp",&DWP(-96,"esp"));
    375 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    376 	&mov	($AH[0],&DWP(0,"esi"));
    377 	&mov	($AH[1],&DWP(4,"esi"));
    378 	&mov	("ecx",&DWP(8,"esi"));
    379 	&mov	("ebx",&DWP(12,"esi"));
    380 	#&mov	(&DWP(0,"esp"),$AH[0]);
    381 	&mov	(&DWP(4,"esp"),$AH[1]);
    382 	&xor	($AH[1],"ecx");		# magic
    383 	&mov	(&DWP(8,"esp"),"ecx");
    384 	&mov	(&DWP(12,"esp"),"ebx");
    385 	&mov	($E,&DWP(16,"esi"));
    386 	&mov	("ebx",&DWP(20,"esi"));
    387 	&mov	("ecx",&DWP(24,"esi"));
    388 	&mov	("esi",&DWP(28,"esi"));
    389 	#&mov	(&DWP(16,"esp"),$E);
    390 	&mov	(&DWP(20,"esp"),"ebx");
    391 	&mov	(&DWP(24,"esp"),"ecx");
    392 	&mov	(&DWP(28,"esp"),"esi");
    393 	&jmp	(&label("grand_loop"));
    394 
    395 &set_label("grand_loop",16);
    396     # copy input block to stack reversing byte order
    397     for($i=0;$i<5;$i++) {
    398 	&mov	("ebx",&DWP(12*$i+0,"edi"));
    399 	&mov	("ecx",&DWP(12*$i+4,"edi"));
    400 	&bswap	("ebx");
    401 	&mov	("esi",&DWP(12*$i+8,"edi"));
    402 	&bswap	("ecx");
    403 	&mov	(&DWP(32+12*$i+0,"esp"),"ebx");
    404 	&bswap	("esi");
    405 	&mov	(&DWP(32+12*$i+4,"esp"),"ecx");
    406 	&mov	(&DWP(32+12*$i+8,"esp"),"esi");
    407     }
    408 	&mov	("ebx",&DWP($i*12,"edi"));
    409 	&add	("edi",64);
    410 	&bswap	("ebx");
    411 	&mov	(&DWP(96+4,"esp"),"edi");
    412 	&mov	(&DWP(32+12*$i,"esp"),"ebx");
    413 
    414     my ($t1,$t2) = ("ecx","esi");
    415 
    416     for ($i=0;$i<64;$i++) {
    417 
    418       if ($i>=16) {
    419 	&mov	($T,$t1);			# $t1 is preloaded
    420 	# &mov	($t2,&DWP(32+4*(($i+14)&15),"esp"));
    421 	&ror	($t1,18-7);
    422 	 &mov	("edi",$t2);
    423 	&ror	($t2,19-17);
    424 	 &xor	($t1,$T);
    425 	 &shr	($T,3);
    426 	&ror	($t1,7);
    427 	 &xor	($t2,"edi");
    428 	 &xor	($T,$t1);			# T = sigma0(X[-15])
    429 	&ror	($t2,17);
    430 	 &add	($T,&DWP(32+4*($i&15),"esp"));	# T += X[-16]
    431 	&shr	("edi",10);
    432 	 &add	($T,&DWP(32+4*(($i+9)&15),"esp"));	# T += X[-7]
    433 	#&xor	("edi",$t2)			# sigma1(X[-2])
    434 	# &add	($T,"edi");			# T += sigma1(X[-2])
    435 	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
    436       }
    437 	&mov	($t1,$E);
    438 	 &xor	("edi",$t2)			if ($i>=16);	# sigma1(X[-2])
    439 	 &mov	($t2,&off($f));
    440 	&ror	($E,25-11);
    441 	 &add	($T,"edi")			if ($i>=16);	# T += sigma1(X[-2])
    442 	 &mov	("edi",&off($g));
    443 	&xor	($E,$t1);
    444 	 &mov	($T,&DWP(32+4*($i&15),"esp"))	if ($i<16);	# X[i]
    445 	 &mov	(&DWP(32+4*($i&15),"esp"),$T)	if ($i>=16 && $i<62);	# save X[0]
    446 	 &xor	($t2,"edi");
    447 	&ror	($E,11-6);
    448 	 &and	($t2,$t1);
    449 	 &mov	(&off($e),$t1);		# save $E, modulo-scheduled
    450 	&xor	($E,$t1);
    451 	 &add	($T,&off($h));		# T += h
    452 	 &xor	("edi",$t2);		# Ch(e,f,g)
    453 	&ror	($E,6);			# Sigma1(e)
    454 	 &mov	($t1,$AH[0]);
    455 	 &add	($T,"edi");		# T += Ch(e,f,g)
    456 
    457 	&ror	($t1,22-13);
    458 	 &mov	($t2,$AH[0]);
    459 	 &mov	("edi",&off($b));
    460 	&xor	($t1,$AH[0]);
    461 	 &mov	(&off($a),$AH[0]);	# save $A, modulo-scheduled
    462 	 &xor	($AH[0],"edi");		# a ^= b, (b^c) in next round
    463 	&ror	($t1,13-2);
    464 	 &and	($AH[1],$AH[0]);	# (b^c) &= (a^b)
    465 	 &lea	($E,&DWP(@K256[$i],$T,$E));	# T += Sigma1(1)+K[i]
    466 	&xor	($t1,$t2);
    467 	 &xor	($AH[1],"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
    468 	 &mov	($t2,&DWP(32+4*(($i+2)&15),"esp"))	if ($i>=15 && $i<63);
    469 	&ror	($t1,2);		# Sigma0(a)
    470 
    471 	 &add	($AH[1],$E);		# h += T
    472 	 &add	($E,&off($d));		# d += T
    473 	&add	($AH[1],$t1);		# h += Sigma0(a)
    474 	 &mov	($t1,&DWP(32+4*(($i+15)&15),"esp"))	if ($i>=15 && $i<63);
    475 
    476 	@AH = reverse(@AH);		# rotate(a,h)
    477 	($t1,$t2) = ($t2,$t1);		# rotate(t1,t2)
    478     }
    479 	&mov	("esi",&DWP(96,"esp"));	#ctx
    480 					#&mov	($AH[0],&DWP(0,"esp"));
    481 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
    482 					#&mov	("edi", &DWP(8,"esp"));
    483 	&mov	("ecx",&DWP(12,"esp"));
    484 	&add	($AH[0],&DWP(0,"esi"));
    485 	&add	($AH[1],&DWP(4,"esi"));
    486 	&add	("edi",&DWP(8,"esi"));
    487 	&add	("ecx",&DWP(12,"esi"));
    488 	&mov	(&DWP(0,"esi"),$AH[0]);
    489 	&mov	(&DWP(4,"esi"),$AH[1]);
    490 	&mov	(&DWP(8,"esi"),"edi");
    491 	&mov	(&DWP(12,"esi"),"ecx");
    492 	 #&mov	(&DWP(0,"esp"),$AH[0]);
    493 	 &mov	(&DWP(4,"esp"),$AH[1]);
    494 	 &xor	($AH[1],"edi");		# magic
    495 	 &mov	(&DWP(8,"esp"),"edi");
    496 	 &mov	(&DWP(12,"esp"),"ecx");
    497 	#&mov	($E,&DWP(16,"esp"));
    498 	&mov	("edi",&DWP(20,"esp"));
    499 	&mov	("ebx",&DWP(24,"esp"));
    500 	&mov	("ecx",&DWP(28,"esp"));
    501 	&add	($E,&DWP(16,"esi"));
    502 	&add	("edi",&DWP(20,"esi"));
    503 	&add	("ebx",&DWP(24,"esi"));
    504 	&add	("ecx",&DWP(28,"esi"));
    505 	&mov	(&DWP(16,"esi"),$E);
    506 	&mov	(&DWP(20,"esi"),"edi");
    507 	&mov	(&DWP(24,"esi"),"ebx");
    508 	&mov	(&DWP(28,"esi"),"ecx");
    509 	 #&mov	(&DWP(16,"esp"),$E);
    510 	 &mov	(&DWP(20,"esp"),"edi");
    511 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    512 	 &mov	(&DWP(24,"esp"),"ebx");
    513 	 &mov	(&DWP(28,"esp"),"ecx");
    514 
    515 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
    516 	&jb	(&label("grand_loop"));
    517 
    518 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
    519 &function_end_A();
    520 }
    521 						if (!$i386 && $xmm) {{{
    522 if ($shaext) {
    523 ######################################################################
    524 # Intel SHA Extensions implementation of SHA256 update function.
    525 #
    526 my ($ctx,$inp,$end)=("esi","edi","eax");
    527 my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
    528 my @MSG=map("xmm$_",(3..6));
    529 
    530 sub sha256op38 {
    531  my ($opcodelet,$dst,$src)=@_;
    532     if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
    533     {	&data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);	}
    534 }
    535 sub sha256rnds2	{ sha256op38(0xcb,@_); }
    536 sub sha256msg1	{ sha256op38(0xcc,@_); }
    537 sub sha256msg2	{ sha256op38(0xcd,@_); }
    538 
    539 &set_label("shaext",32);
    540 	&sub		("esp",32);
    541 
    542 	&movdqu		($ABEF,&QWP(0,$ctx));		# DCBA
    543 	&lea		($K256,&DWP(0x80,$K256));
    544 	&movdqu		($CDGH,&QWP(16,$ctx));		# HGFE
    545 	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
    546 
    547 	&pshufd		($Wi,$ABEF,0x1b);		# ABCD
    548 	&pshufd		($ABEF,$ABEF,0xb1);		# CDAB
    549 	&pshufd		($CDGH,$CDGH,0x1b);		# EFGH
    550 	&palignr	($ABEF,$CDGH,8);		# ABEF
    551 	&punpcklqdq	($CDGH,$Wi);			# CDGH
    552 	&jmp		(&label("loop_shaext"));
    553 
    554 &set_label("loop_shaext",16);
    555 	&movdqu		(@MSG[0],&QWP(0,$inp));
    556 	&movdqu		(@MSG[1],&QWP(0x10,$inp));
    557 	&movdqu		(@MSG[2],&QWP(0x20,$inp));
    558 	&pshufb		(@MSG[0],$TMP);
    559 	&movdqu		(@MSG[3],&QWP(0x30,$inp));
    560 	&movdqa		(&QWP(16,"esp"),$CDGH);		# offload
    561 
    562 	&movdqa		($Wi,&QWP(0*16-0x80,$K256));
    563 	&paddd		($Wi,@MSG[0]);
    564 	&pshufb		(@MSG[1],$TMP);
    565 	&sha256rnds2	($CDGH,$ABEF);			# 0-3
    566 	&pshufd		($Wi,$Wi,0x0e);
    567 	&nop		();
    568 	&movdqa		(&QWP(0,"esp"),$ABEF);		# offload
    569 	&sha256rnds2	($ABEF,$CDGH);
    570 
    571 	&movdqa		($Wi,&QWP(1*16-0x80,$K256));
    572 	&paddd		($Wi,@MSG[1]);
    573 	&pshufb		(@MSG[2],$TMP);
    574 	&sha256rnds2	($CDGH,$ABEF);			# 4-7
    575 	&pshufd		($Wi,$Wi,0x0e);
    576 	&lea		($inp,&DWP(0x40,$inp));
    577 	&sha256msg1	(@MSG[0],@MSG[1]);
    578 	&sha256rnds2	($ABEF,$CDGH);
    579 
    580 	&movdqa		($Wi,&QWP(2*16-0x80,$K256));
    581 	&paddd		($Wi,@MSG[2]);
    582 	&pshufb		(@MSG[3],$TMP);
    583 	&sha256rnds2	($CDGH,$ABEF);			# 8-11
    584 	&pshufd		($Wi,$Wi,0x0e);
    585 	&movdqa		($TMP,@MSG[3]);
    586 	&palignr	($TMP,@MSG[2],4);
    587 	&nop		();
    588 	&paddd		(@MSG[0],$TMP);
    589 	&sha256msg1	(@MSG[1],@MSG[2]);
    590 	&sha256rnds2	($ABEF,$CDGH);
    591 
    592 	&movdqa		($Wi,&QWP(3*16-0x80,$K256));
    593 	&paddd		($Wi,@MSG[3]);
    594 	&sha256msg2	(@MSG[0],@MSG[3]);
    595 	&sha256rnds2	($CDGH,$ABEF);			# 12-15
    596 	&pshufd		($Wi,$Wi,0x0e);
    597 	&movdqa		($TMP,@MSG[0]);
    598 	&palignr	($TMP,@MSG[3],4);
    599 	&nop		();
    600 	&paddd		(@MSG[1],$TMP);
    601 	&sha256msg1	(@MSG[2],@MSG[3]);
    602 	&sha256rnds2	($ABEF,$CDGH);
    603 
    604 for($i=4;$i<16-3;$i++) {
    605 	&movdqa		($Wi,&QWP($i*16-0x80,$K256));
    606 	&paddd		($Wi,@MSG[0]);
    607 	&sha256msg2	(@MSG[1],@MSG[0]);
    608 	&sha256rnds2	($CDGH,$ABEF);			# 16-19...
    609 	&pshufd		($Wi,$Wi,0x0e);
    610 	&movdqa		($TMP,@MSG[1]);
    611 	&palignr	($TMP,@MSG[0],4);
    612 	&nop		();
    613 	&paddd		(@MSG[2],$TMP);
    614 	&sha256msg1	(@MSG[3],@MSG[0]);
    615 	&sha256rnds2	($ABEF,$CDGH);
    616 
    617 	push(@MSG,shift(@MSG));
    618 }
    619 	&movdqa		($Wi,&QWP(13*16-0x80,$K256));
    620 	&paddd		($Wi,@MSG[0]);
    621 	&sha256msg2	(@MSG[1],@MSG[0]);
    622 	&sha256rnds2	($CDGH,$ABEF);			# 52-55
    623 	&pshufd		($Wi,$Wi,0x0e);
    624 	&movdqa		($TMP,@MSG[1])
    625 	&palignr	($TMP,@MSG[0],4);
    626 	&sha256rnds2	($ABEF,$CDGH);
    627 	&paddd		(@MSG[2],$TMP);
    628 
    629 	&movdqa		($Wi,&QWP(14*16-0x80,$K256));
    630 	&paddd		($Wi,@MSG[1]);
    631 	&sha256rnds2	($CDGH,$ABEF);			# 56-59
    632 	&pshufd		($Wi,$Wi,0x0e);
    633 	&sha256msg2	(@MSG[2],@MSG[1]);
    634 	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
    635 	&sha256rnds2	($ABEF,$CDGH);
    636 
    637 	&movdqa		($Wi,&QWP(15*16-0x80,$K256));
    638 	&paddd		($Wi,@MSG[2]);
    639 	&nop		();
    640 	&sha256rnds2	($CDGH,$ABEF);			# 60-63
    641 	&pshufd		($Wi,$Wi,0x0e);
    642 	&cmp		($end,$inp);
    643 	&nop		();
    644 	&sha256rnds2	($ABEF,$CDGH);
    645 
    646 	&paddd		($CDGH,&QWP(16,"esp"));
    647 	&paddd		($ABEF,&QWP(0,"esp"));
    648 	&jnz		(&label("loop_shaext"));
    649 
    650 	&pshufd		($CDGH,$CDGH,0xb1);		# DCHG
    651 	&pshufd		($TMP,$ABEF,0x1b);		# FEBA
    652 	&pshufd		($ABEF,$ABEF,0xb1);		# BAFE
    653 	&punpckhqdq	($ABEF,$CDGH);			# DCBA
    654 	&palignr	($CDGH,$TMP,8);			# HGFE
    655 
    656 	&mov		("esp",&DWP(32+12,"esp"));
    657 	&movdqu		(&QWP(0,$ctx),$ABEF);
    658 	&movdqu		(&QWP(16,$ctx),$CDGH);
    659 &function_end_A();
    660 }
    661 
    662 my @X = map("xmm$_",(0..3));
    663 my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
    664 my @AH = ($A,$T);
    665 
    666 &set_label("SSSE3",32);
    667 	&lea	("esp",&DWP(-96,"esp"));
    668 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    669 	&mov	($AH[0],&DWP(0,"esi"));
    670 	&mov	($AH[1],&DWP(4,"esi"));
    671 	&mov	("ecx",&DWP(8,"esi"));
    672 	&mov	("edi",&DWP(12,"esi"));
    673 	#&mov	(&DWP(0,"esp"),$AH[0]);
    674 	&mov	(&DWP(4,"esp"),$AH[1]);
    675 	&xor	($AH[1],"ecx");			# magic
    676 	&mov	(&DWP(8,"esp"),"ecx");
    677 	&mov	(&DWP(12,"esp"),"edi");
    678 	&mov	($E,&DWP(16,"esi"));
    679 	&mov	("edi",&DWP(20,"esi"));
    680 	&mov	("ecx",&DWP(24,"esi"));
    681 	&mov	("esi",&DWP(28,"esi"));
    682 	#&mov	(&DWP(16,"esp"),$E);
    683 	&mov	(&DWP(20,"esp"),"edi");
    684 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    685 	&mov	(&DWP(24,"esp"),"ecx");
    686 	&mov	(&DWP(28,"esp"),"esi");
    687 	&movdqa	($t3,&QWP(256,$K256));
    688 	&jmp	(&label("grand_ssse3"));
    689 
    690 &set_label("grand_ssse3",16);
    691 	# load input, reverse byte order, add K256[0..15], save to stack
    692 	&movdqu	(@X[0],&QWP(0,"edi"));
    693 	&movdqu	(@X[1],&QWP(16,"edi"));
    694 	&movdqu	(@X[2],&QWP(32,"edi"));
    695 	&movdqu	(@X[3],&QWP(48,"edi"));
    696 	&add	("edi",64);
    697 	&pshufb	(@X[0],$t3);
    698 	&mov	(&DWP(96+4,"esp"),"edi");
    699 	&pshufb	(@X[1],$t3);
    700 	&movdqa	($t0,&QWP(0,$K256));
    701 	&pshufb	(@X[2],$t3);
    702 	&movdqa	($t1,&QWP(16,$K256));
    703 	&paddd	($t0,@X[0]);
    704 	&pshufb	(@X[3],$t3);
    705 	&movdqa	($t2,&QWP(32,$K256));
    706 	&paddd	($t1,@X[1]);
    707 	&movdqa	($t3,&QWP(48,$K256));
    708 	&movdqa	(&QWP(32+0,"esp"),$t0);
    709 	&paddd	($t2,@X[2]);
    710 	&movdqa	(&QWP(32+16,"esp"),$t1);
    711 	&paddd	($t3,@X[3]);
    712 	&movdqa	(&QWP(32+32,"esp"),$t2);
    713 	&movdqa	(&QWP(32+48,"esp"),$t3);
    714 	&jmp	(&label("ssse3_00_47"));
    715 
    716 &set_label("ssse3_00_47",16);
    717 	&add		($K256,64);
    718 
    719 sub SSSE3_00_47 () {
    720 my $j = shift;
    721 my $body = shift;
    722 my @X = @_;
    723 my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
    724 
    725 	  eval(shift(@insns));
    726 	&movdqa		($t0,@X[1]);
    727 	  eval(shift(@insns));			# @
    728 	  eval(shift(@insns));
    729 	&movdqa		($t3,@X[3]);
    730 	  eval(shift(@insns));
    731 	  eval(shift(@insns));
    732 	&palignr	($t0,@X[0],4);		# X[1..4]
    733 	  eval(shift(@insns));
    734 	  eval(shift(@insns));			# @
    735 	  eval(shift(@insns));
    736 	 &palignr	($t3,@X[2],4);		# X[9..12]
    737 	  eval(shift(@insns));
    738 	  eval(shift(@insns));
    739 	  eval(shift(@insns));
    740 	&movdqa		($t1,$t0);
    741 	  eval(shift(@insns));			# @
    742 	  eval(shift(@insns));
    743 	&movdqa		($t2,$t0);
    744 	  eval(shift(@insns));
    745 	  eval(shift(@insns));
    746 	&psrld		($t0,3);
    747 	  eval(shift(@insns));
    748 	  eval(shift(@insns));			# @
    749 	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
    750 	  eval(shift(@insns));
    751 	  eval(shift(@insns));
    752 	&psrld		($t2,7);
    753 	  eval(shift(@insns));
    754 	  eval(shift(@insns));
    755 	  eval(shift(@insns));			# @
    756 	  eval(shift(@insns));
    757 	 &pshufd	($t3,@X[3],0b11111010);	# X[14..15]
    758 	  eval(shift(@insns));
    759 	  eval(shift(@insns));
    760 	&pslld		($t1,32-18);
    761 	  eval(shift(@insns));
    762 	  eval(shift(@insns));			# @
    763 	&pxor		($t0,$t2);
    764 	  eval(shift(@insns));
    765 	  eval(shift(@insns));
    766 	&psrld		($t2,18-7);
    767 	  eval(shift(@insns));
    768 	  eval(shift(@insns));
    769 	  eval(shift(@insns));			# @
    770 	&pxor		($t0,$t1);
    771 	  eval(shift(@insns));
    772 	  eval(shift(@insns));
    773 	&pslld		($t1,18-7);
    774 	  eval(shift(@insns));
    775 	  eval(shift(@insns));
    776 	  eval(shift(@insns));			# @
    777 	&pxor		($t0,$t2);
    778 	  eval(shift(@insns));
    779 	  eval(shift(@insns));
    780 	 &movdqa	($t2,$t3);
    781 	  eval(shift(@insns));
    782 	  eval(shift(@insns));
    783 	  eval(shift(@insns));			# @
    784 	&pxor		($t0,$t1);		# sigma0(X[1..4])
    785 	  eval(shift(@insns));
    786 	  eval(shift(@insns));
    787 	 &psrld		($t3,10);
    788 	  eval(shift(@insns));
    789 	  eval(shift(@insns));
    790 	  eval(shift(@insns));			# @
    791 	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
    792 	  eval(shift(@insns));
    793 	  eval(shift(@insns));
    794 	 &psrlq		($t2,17);
    795 	  eval(shift(@insns));
    796 	  eval(shift(@insns));
    797 	  eval(shift(@insns));			# @
    798 	 &pxor		($t3,$t2);
    799 	  eval(shift(@insns));
    800 	  eval(shift(@insns));
    801 	 &psrlq		($t2,19-17);
    802 	  eval(shift(@insns));
    803 	  eval(shift(@insns));
    804 	  eval(shift(@insns));			# @
    805 	 &pxor		($t3,$t2);
    806 	  eval(shift(@insns));
    807 	  eval(shift(@insns));
    808 	 &pshufd	($t3,$t3,0b10000000);
    809 	  eval(shift(@insns));
    810 	  eval(shift(@insns));
    811 	  eval(shift(@insns));			# @
    812 	  eval(shift(@insns));
    813 	  eval(shift(@insns));
    814 	  eval(shift(@insns));
    815 	  eval(shift(@insns));
    816 	  eval(shift(@insns));			# @
    817 	  eval(shift(@insns));
    818 	 &psrldq	($t3,8);
    819 	  eval(shift(@insns));
    820 	  eval(shift(@insns));
    821 	  eval(shift(@insns));
    822 	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
    823 	  eval(shift(@insns));			# @
    824 	  eval(shift(@insns));
    825 	  eval(shift(@insns));
    826 	  eval(shift(@insns));
    827 	  eval(shift(@insns));
    828 	  eval(shift(@insns));			# @
    829 	  eval(shift(@insns));
    830 	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
    831 	  eval(shift(@insns));
    832 	  eval(shift(@insns));
    833 	  eval(shift(@insns));
    834 	 &movdqa	($t2,$t3);
    835 	  eval(shift(@insns));			# @
    836 	 &psrld		($t3,10);
    837 	  eval(shift(@insns));
    838 	 &psrlq		($t2,17);
    839 	  eval(shift(@insns));
    840 	  eval(shift(@insns));
    841 	  eval(shift(@insns));
    842 	  eval(shift(@insns));			# @
    843 	 &pxor		($t3,$t2);
    844 	  eval(shift(@insns));
    845 	  eval(shift(@insns));
    846 	 &psrlq		($t2,19-17);
    847 	  eval(shift(@insns));
    848 	  eval(shift(@insns));
    849 	  eval(shift(@insns));			# @
    850 	 &pxor		($t3,$t2);
    851 	  eval(shift(@insns));
    852 	  eval(shift(@insns));
    853 	  eval(shift(@insns));
    854 	 &pshufd	($t3,$t3,0b00001000);
    855 	  eval(shift(@insns));
    856 	  eval(shift(@insns));			# @
    857 	&movdqa		($t2,&QWP(16*$j,$K256));
    858 	  eval(shift(@insns));
    859 	  eval(shift(@insns));
    860 	 &pslldq	($t3,8);
    861 	  eval(shift(@insns));
    862 	  eval(shift(@insns));
    863 	  eval(shift(@insns));			# @
    864 	  eval(shift(@insns));
    865 	  eval(shift(@insns));
    866 	  eval(shift(@insns));
    867 	  eval(shift(@insns));
    868 	  eval(shift(@insns));			# @
    869 	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
    870 	  eval(shift(@insns));
    871 	  eval(shift(@insns));
    872 	  eval(shift(@insns));
    873 	  eval(shift(@insns));
    874 	&paddd		($t2,@X[0]);
    875 	  eval(shift(@insns));			# @
    876 
    877 	foreach (@insns) { eval; }		# remaining instructions
    878 
    879 	&movdqa		(&QWP(32+16*$j,"esp"),$t2);
    880 }
    881 
    882 sub body_00_15 () {
    883 	(
    884 	'&mov	("ecx",$E);',
    885 	'&ror	($E,25-11);',
    886 	 '&mov	("esi",&off($f));',
    887 	'&xor	($E,"ecx");',
    888 	 '&mov	("edi",&off($g));',
    889 	 '&xor	("esi","edi");',
    890 	'&ror	($E,11-6);',
    891 	 '&and	("esi","ecx");',
    892 	 '&mov	(&off($e),"ecx");',	# save $E, modulo-scheduled
    893 	'&xor	($E,"ecx");',
    894 	 '&xor	("edi","esi");',	# Ch(e,f,g)
    895 	'&ror	($E,6);',		# T = Sigma1(e)
    896 	 '&mov	("ecx",$AH[0]);',
    897 	 '&add	($E,"edi");',		# T += Ch(e,f,g)
    898 	 '&mov	("edi",&off($b));',
    899 	'&mov	("esi",$AH[0]);',
    900 
    901 	'&ror	("ecx",22-13);',
    902 	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
    903 	'&xor	("ecx",$AH[0]);',
    904 	 '&xor	($AH[0],"edi");',	# a ^= b, (b^c) in next round
    905 	 '&add	($E,&off($h));',	# T += h
    906 	'&ror	("ecx",13-2);',
    907 	 '&and	($AH[1],$AH[0]);',	# (b^c) &= (a^b)
    908 	'&xor	("ecx","esi");',
    909 	 '&add	($E,&DWP(32+4*($i&15),"esp"));',	# T += K[i]+X[i]
    910 	 '&xor	($AH[1],"edi");',	# h = Maj(a,b,c) = Ch(a^b,c,b)
    911 	'&ror	("ecx",2);',		# Sigma0(a)
    912 
    913 	 '&add	($AH[1],$E);',		# h += T
    914 	 '&add	($E,&off($d));',	# d += T
    915 	'&add	($AH[1],"ecx");'.	# h += Sigma0(a)
    916 
    917 	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
    918 	);
    919 }
    920 
    921     for ($i=0,$j=0; $j<4; $j++) {
    922 	&SSSE3_00_47($j,\&body_00_15,@X);
    923 	push(@X,shift(@X));		# rotate(@X)
    924     }
    925 	&cmp	(&DWP(16*$j,$K256),0x00010203);
    926 	&jne	(&label("ssse3_00_47"));
    927 
    928     for ($i=0; $i<16; ) {
    929 	foreach(body_00_15()) { eval; }
    930     }
    931 
    932 	&mov	("esi",&DWP(96,"esp"));	#ctx
    933 					#&mov	($AH[0],&DWP(0,"esp"));
    934 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
    935 					#&mov	("edi", &DWP(8,"esp"));
    936 	&mov	("ecx",&DWP(12,"esp"));
    937 	&add	($AH[0],&DWP(0,"esi"));
    938 	&add	($AH[1],&DWP(4,"esi"));
    939 	&add	("edi",&DWP(8,"esi"));
    940 	&add	("ecx",&DWP(12,"esi"));
    941 	&mov	(&DWP(0,"esi"),$AH[0]);
    942 	&mov	(&DWP(4,"esi"),$AH[1]);
    943 	&mov	(&DWP(8,"esi"),"edi");
    944 	&mov	(&DWP(12,"esi"),"ecx");
    945 	 #&mov	(&DWP(0,"esp"),$AH[0]);
    946 	 &mov	(&DWP(4,"esp"),$AH[1]);
    947 	 &xor	($AH[1],"edi");			# magic
    948 	 &mov	(&DWP(8,"esp"),"edi");
    949 	 &mov	(&DWP(12,"esp"),"ecx");
    950 	#&mov	($E,&DWP(16,"esp"));
    951 	&mov	("edi",&DWP(20,"esp"));
    952 	&mov	("ecx",&DWP(24,"esp"));
    953 	&add	($E,&DWP(16,"esi"));
    954 	&add	("edi",&DWP(20,"esi"));
    955 	&add	("ecx",&DWP(24,"esi"));
    956 	&mov	(&DWP(16,"esi"),$E);
    957 	&mov	(&DWP(20,"esi"),"edi");
    958 	 &mov	(&DWP(20,"esp"),"edi");
    959 	&mov	("edi",&DWP(28,"esp"));
    960 	&mov	(&DWP(24,"esi"),"ecx");
    961 	 #&mov	(&DWP(16,"esp"),$E);
    962 	&add	("edi",&DWP(28,"esi"));
    963 	 &mov	(&DWP(24,"esp"),"ecx");
    964 	&mov	(&DWP(28,"esi"),"edi");
    965 	 &mov	(&DWP(28,"esp"),"edi");
    966 	&mov	("edi",&DWP(96+4,"esp"));	# inp
    967 
    968 	&movdqa	($t3,&QWP(64,$K256));
    969 	&sub	($K256,3*64);			# rewind K
    970 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
    971 	&jb	(&label("grand_ssse3"));
    972 
    973 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
    974 &function_end_A();
    975 						if ($avx) {
    976 &set_label("AVX",32);
    977 						if ($avx>1) {
    978 	&and	("edx",1<<8|1<<3);		# check for BMI2+BMI1
    979 	&cmp	("edx",1<<8|1<<3);
    980 	&je	(&label("AVX_BMI"));
    981 						}
    982 	&lea	("esp",&DWP(-96,"esp"));
    983 	&vzeroall	();
    984 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
    985 	&mov	($AH[0],&DWP(0,"esi"));
    986 	&mov	($AH[1],&DWP(4,"esi"));
    987 	&mov	("ecx",&DWP(8,"esi"));
    988 	&mov	("edi",&DWP(12,"esi"));
    989 	#&mov	(&DWP(0,"esp"),$AH[0]);
    990 	&mov	(&DWP(4,"esp"),$AH[1]);
    991 	&xor	($AH[1],"ecx");			# magic
    992 	&mov	(&DWP(8,"esp"),"ecx");
    993 	&mov	(&DWP(12,"esp"),"edi");
    994 	&mov	($E,&DWP(16,"esi"));
    995 	&mov	("edi",&DWP(20,"esi"));
    996 	&mov	("ecx",&DWP(24,"esi"));
    997 	&mov	("esi",&DWP(28,"esi"));
    998 	#&mov	(&DWP(16,"esp"),$E);
    999 	&mov	(&DWP(20,"esp"),"edi");
   1000 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1001 	&mov	(&DWP(24,"esp"),"ecx");
   1002 	&mov	(&DWP(28,"esp"),"esi");
   1003 	&vmovdqa	($t3,&QWP(256,$K256));
   1004 	&jmp	(&label("grand_avx"));
   1005 
   1006 &set_label("grand_avx",32);
   1007 	# load input, reverse byte order, add K256[0..15], save to stack
   1008 	&vmovdqu	(@X[0],&QWP(0,"edi"));
   1009 	&vmovdqu	(@X[1],&QWP(16,"edi"));
   1010 	&vmovdqu	(@X[2],&QWP(32,"edi"));
   1011 	&vmovdqu	(@X[3],&QWP(48,"edi"));
   1012 	&add		("edi",64);
   1013 	&vpshufb	(@X[0],@X[0],$t3);
   1014 	&mov		(&DWP(96+4,"esp"),"edi");
   1015 	&vpshufb	(@X[1],@X[1],$t3);
   1016 	&vpshufb	(@X[2],@X[2],$t3);
   1017 	&vpaddd		($t0,@X[0],&QWP(0,$K256));
   1018 	&vpshufb	(@X[3],@X[3],$t3);
   1019 	&vpaddd		($t1,@X[1],&QWP(16,$K256));
   1020 	&vpaddd		($t2,@X[2],&QWP(32,$K256));
   1021 	&vpaddd		($t3,@X[3],&QWP(48,$K256));
   1022 	&vmovdqa	(&QWP(32+0,"esp"),$t0);
   1023 	&vmovdqa	(&QWP(32+16,"esp"),$t1);
   1024 	&vmovdqa	(&QWP(32+32,"esp"),$t2);
   1025 	&vmovdqa	(&QWP(32+48,"esp"),$t3);
   1026 	&jmp		(&label("avx_00_47"));
   1027 
   1028 &set_label("avx_00_47",16);
   1029 	&add		($K256,64);
   1030 
   1031 sub Xupdate_AVX () {
   1032 	(
   1033 	'&vpalignr	($t0,@X[1],@X[0],4);',	# X[1..4]
   1034 	 '&vpalignr	($t3,@X[3],@X[2],4);',	# X[9..12]
   1035 	'&vpsrld	($t2,$t0,7);',
   1036 	 '&vpaddd	(@X[0],@X[0],$t3);',	# X[0..3] += X[9..16]
   1037 	'&vpsrld	($t3,$t0,3);',
   1038 	'&vpslld	($t1,$t0,14);',
   1039 	'&vpxor		($t0,$t3,$t2);',
   1040 	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
   1041 	'&vpsrld	($t2,$t2,18-7);',
   1042 	'&vpxor		($t0,$t0,$t1);',
   1043 	'&vpslld	($t1,$t1,25-14);',
   1044 	'&vpxor		($t0,$t0,$t2);',
   1045 	 '&vpsrld	($t2,$t3,10);',
   1046 	'&vpxor		($t0,$t0,$t1);',	# sigma0(X[1..4])
   1047 	 '&vpsrlq	($t1,$t3,17);',
   1048 	'&vpaddd	(@X[0],@X[0],$t0);',	# X[0..3] += sigma0(X[1..4])
   1049 	 '&vpxor	($t2,$t2,$t1);',
   1050 	 '&vpsrlq	($t3,$t3,19);',
   1051 	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[14..15]
   1052 	 '&vpshufd	($t3,$t2,0b10000100);',
   1053 	'&vpsrldq	($t3,$t3,8);',
   1054 	'&vpaddd	(@X[0],@X[0],$t3);',	# X[0..1] += sigma1(X[14..15])
   1055 	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
   1056 	 '&vpsrld	($t2,$t3,10);',
   1057 	 '&vpsrlq	($t1,$t3,17);',
   1058 	 '&vpxor	($t2,$t2,$t1);',
   1059 	 '&vpsrlq	($t3,$t3,19);',
   1060 	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[16..17]
   1061 	 '&vpshufd	($t3,$t2,0b11101000);',
   1062 	'&vpslldq	($t3,$t3,8);',
   1063 	'&vpaddd	(@X[0],@X[0],$t3);'	# X[2..3] += sigma1(X[16..17])
   1064 	);
   1065 }
   1066 
   1067 local *ror = sub { &shrd(@_[0],@_) };
   1068 sub AVX_00_47 () {
   1069 my $j = shift;
   1070 my $body = shift;
   1071 my @X = @_;
   1072 my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
   1073 my $insn;
   1074 
   1075 	foreach (Xupdate_AVX()) {		# 31 instructions
   1076 	    eval;
   1077 	    eval(shift(@insns));
   1078 	    eval(shift(@insns));
   1079 	    eval($insn = shift(@insns));
   1080 	    eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
   1081 	}
   1082 	&vpaddd		($t2,@X[0],&QWP(16*$j,$K256));
   1083 	foreach (@insns) { eval; }		# remaining instructions
   1084 	&vmovdqa	(&QWP(32+16*$j,"esp"),$t2);
   1085 }
   1086 
   1087     for ($i=0,$j=0; $j<4; $j++) {
   1088 	&AVX_00_47($j,\&body_00_15,@X);
   1089 	push(@X,shift(@X));		# rotate(@X)
   1090     }
   1091 	&cmp	(&DWP(16*$j,$K256),0x00010203);
   1092 	&jne	(&label("avx_00_47"));
   1093 
   1094     for ($i=0; $i<16; ) {
   1095 	foreach(body_00_15()) { eval; }
   1096     }
   1097 
   1098 	&mov	("esi",&DWP(96,"esp"));	#ctx
   1099 					#&mov	($AH[0],&DWP(0,"esp"));
   1100 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
   1101 					#&mov	("edi", &DWP(8,"esp"));
   1102 	&mov	("ecx",&DWP(12,"esp"));
   1103 	&add	($AH[0],&DWP(0,"esi"));
   1104 	&add	($AH[1],&DWP(4,"esi"));
   1105 	&add	("edi",&DWP(8,"esi"));
   1106 	&add	("ecx",&DWP(12,"esi"));
   1107 	&mov	(&DWP(0,"esi"),$AH[0]);
   1108 	&mov	(&DWP(4,"esi"),$AH[1]);
   1109 	&mov	(&DWP(8,"esi"),"edi");
   1110 	&mov	(&DWP(12,"esi"),"ecx");
   1111 	 #&mov	(&DWP(0,"esp"),$AH[0]);
   1112 	 &mov	(&DWP(4,"esp"),$AH[1]);
   1113 	 &xor	($AH[1],"edi");			# magic
   1114 	 &mov	(&DWP(8,"esp"),"edi");
   1115 	 &mov	(&DWP(12,"esp"),"ecx");
   1116 	#&mov	($E,&DWP(16,"esp"));
   1117 	&mov	("edi",&DWP(20,"esp"));
   1118 	&mov	("ecx",&DWP(24,"esp"));
   1119 	&add	($E,&DWP(16,"esi"));
   1120 	&add	("edi",&DWP(20,"esi"));
   1121 	&add	("ecx",&DWP(24,"esi"));
   1122 	&mov	(&DWP(16,"esi"),$E);
   1123 	&mov	(&DWP(20,"esi"),"edi");
   1124 	 &mov	(&DWP(20,"esp"),"edi");
   1125 	&mov	("edi",&DWP(28,"esp"));
   1126 	&mov	(&DWP(24,"esi"),"ecx");
   1127 	 #&mov	(&DWP(16,"esp"),$E);
   1128 	&add	("edi",&DWP(28,"esi"));
   1129 	 &mov	(&DWP(24,"esp"),"ecx");
   1130 	&mov	(&DWP(28,"esi"),"edi");
   1131 	 &mov	(&DWP(28,"esp"),"edi");
   1132 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1133 
   1134 	&vmovdqa	($t3,&QWP(64,$K256));
   1135 	&sub	($K256,3*64);			# rewind K
   1136 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
   1137 	&jb	(&label("grand_avx"));
   1138 
   1139 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
   1140 	&vzeroall	();
   1141 &function_end_A();
   1142 						if ($avx>1) {
   1143 sub bodyx_00_15 () {			# +10%
   1144 	(
   1145 	'&rorx	("ecx",$E,6)',
   1146 	'&rorx	("esi",$E,11)',
   1147 	 '&mov	(&off($e),$E)',		# save $E, modulo-scheduled
   1148 	'&rorx	("edi",$E,25)',
   1149 	'&xor	("ecx","esi")',
   1150 	 '&andn	("esi",$E,&off($g))',
   1151 	'&xor	("ecx","edi")',		# Sigma1(e)
   1152 	 '&and	($E,&off($f))',
   1153 	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
   1154 	 '&or	($E,"esi")',		# T = Ch(e,f,g)
   1155 
   1156 	'&rorx	("edi",$AH[0],2)',
   1157 	'&rorx	("esi",$AH[0],13)',
   1158 	 '&lea	($E,&DWP(0,$E,"ecx"))',	# T += Sigma1(e)
   1159 	'&rorx	("ecx",$AH[0],22)',
   1160 	'&xor	("esi","edi")',
   1161 	 '&mov	("edi",&off($b))',
   1162 	'&xor	("ecx","esi")',		# Sigma0(a)
   1163 
   1164 	 '&xor	($AH[0],"edi")',	# a ^= b, (b^c) in next round
   1165 	 '&add	($E,&off($h))',		# T += h
   1166 	 '&and	($AH[1],$AH[0])',	# (b^c) &= (a^b)
   1167 	 '&add	($E,&DWP(32+4*($i&15),"esp"))',	# T += K[i]+X[i]
   1168 	 '&xor	($AH[1],"edi")',	# h = Maj(a,b,c) = Ch(a^b,c,b)
   1169 
   1170 	 '&add	("ecx",$E)',		# h += T
   1171 	 '&add	($E,&off($d))',		# d += T
   1172 	'&lea	($AH[1],&DWP(0,$AH[1],"ecx"));'.	# h += Sigma0(a)
   1173 
   1174 	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
   1175 	);
   1176 }
   1177 
   1178 &set_label("AVX_BMI",32);
   1179 	&lea	("esp",&DWP(-96,"esp"));
   1180 	&vzeroall	();
   1181 	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
   1182 	&mov	($AH[0],&DWP(0,"esi"));
   1183 	&mov	($AH[1],&DWP(4,"esi"));
   1184 	&mov	("ecx",&DWP(8,"esi"));
   1185 	&mov	("edi",&DWP(12,"esi"));
   1186 	#&mov	(&DWP(0,"esp"),$AH[0]);
   1187 	&mov	(&DWP(4,"esp"),$AH[1]);
   1188 	&xor	($AH[1],"ecx");			# magic
   1189 	&mov	(&DWP(8,"esp"),"ecx");
   1190 	&mov	(&DWP(12,"esp"),"edi");
   1191 	&mov	($E,&DWP(16,"esi"));
   1192 	&mov	("edi",&DWP(20,"esi"));
   1193 	&mov	("ecx",&DWP(24,"esi"));
   1194 	&mov	("esi",&DWP(28,"esi"));
   1195 	#&mov	(&DWP(16,"esp"),$E);
   1196 	&mov	(&DWP(20,"esp"),"edi");
   1197 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1198 	&mov	(&DWP(24,"esp"),"ecx");
   1199 	&mov	(&DWP(28,"esp"),"esi");
   1200 	&vmovdqa	($t3,&QWP(256,$K256));
   1201 	&jmp	(&label("grand_avx_bmi"));
   1202 
   1203 &set_label("grand_avx_bmi",32);
   1204 	# load input, reverse byte order, add K256[0..15], save to stack
   1205 	&vmovdqu	(@X[0],&QWP(0,"edi"));
   1206 	&vmovdqu	(@X[1],&QWP(16,"edi"));
   1207 	&vmovdqu	(@X[2],&QWP(32,"edi"));
   1208 	&vmovdqu	(@X[3],&QWP(48,"edi"));
   1209 	&add		("edi",64);
   1210 	&vpshufb	(@X[0],@X[0],$t3);
   1211 	&mov		(&DWP(96+4,"esp"),"edi");
   1212 	&vpshufb	(@X[1],@X[1],$t3);
   1213 	&vpshufb	(@X[2],@X[2],$t3);
   1214 	&vpaddd		($t0,@X[0],&QWP(0,$K256));
   1215 	&vpshufb	(@X[3],@X[3],$t3);
   1216 	&vpaddd		($t1,@X[1],&QWP(16,$K256));
   1217 	&vpaddd		($t2,@X[2],&QWP(32,$K256));
   1218 	&vpaddd		($t3,@X[3],&QWP(48,$K256));
   1219 	&vmovdqa	(&QWP(32+0,"esp"),$t0);
   1220 	&vmovdqa	(&QWP(32+16,"esp"),$t1);
   1221 	&vmovdqa	(&QWP(32+32,"esp"),$t2);
   1222 	&vmovdqa	(&QWP(32+48,"esp"),$t3);
   1223 	&jmp		(&label("avx_bmi_00_47"));
   1224 
   1225 &set_label("avx_bmi_00_47",16);
   1226 	&add		($K256,64);
   1227 
   1228     for ($i=0,$j=0; $j<4; $j++) {
   1229 	&AVX_00_47($j,\&bodyx_00_15,@X);
   1230 	push(@X,shift(@X));		# rotate(@X)
   1231     }
   1232 	&cmp	(&DWP(16*$j,$K256),0x00010203);
   1233 	&jne	(&label("avx_bmi_00_47"));
   1234 
   1235     for ($i=0; $i<16; ) {
   1236 	foreach(bodyx_00_15()) { eval; }
   1237     }
   1238 
   1239 	&mov	("esi",&DWP(96,"esp"));	#ctx
   1240 					#&mov	($AH[0],&DWP(0,"esp"));
   1241 	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
   1242 					#&mov	("edi", &DWP(8,"esp"));
   1243 	&mov	("ecx",&DWP(12,"esp"));
   1244 	&add	($AH[0],&DWP(0,"esi"));
   1245 	&add	($AH[1],&DWP(4,"esi"));
   1246 	&add	("edi",&DWP(8,"esi"));
   1247 	&add	("ecx",&DWP(12,"esi"));
   1248 	&mov	(&DWP(0,"esi"),$AH[0]);
   1249 	&mov	(&DWP(4,"esi"),$AH[1]);
   1250 	&mov	(&DWP(8,"esi"),"edi");
   1251 	&mov	(&DWP(12,"esi"),"ecx");
   1252 	 #&mov	(&DWP(0,"esp"),$AH[0]);
   1253 	 &mov	(&DWP(4,"esp"),$AH[1]);
   1254 	 &xor	($AH[1],"edi");			# magic
   1255 	 &mov	(&DWP(8,"esp"),"edi");
   1256 	 &mov	(&DWP(12,"esp"),"ecx");
   1257 	#&mov	($E,&DWP(16,"esp"));
   1258 	&mov	("edi",&DWP(20,"esp"));
   1259 	&mov	("ecx",&DWP(24,"esp"));
   1260 	&add	($E,&DWP(16,"esi"));
   1261 	&add	("edi",&DWP(20,"esi"));
   1262 	&add	("ecx",&DWP(24,"esi"));
   1263 	&mov	(&DWP(16,"esi"),$E);
   1264 	&mov	(&DWP(20,"esi"),"edi");
   1265 	 &mov	(&DWP(20,"esp"),"edi");
   1266 	&mov	("edi",&DWP(28,"esp"));
   1267 	&mov	(&DWP(24,"esi"),"ecx");
   1268 	 #&mov	(&DWP(16,"esp"),$E);
   1269 	&add	("edi",&DWP(28,"esi"));
   1270 	 &mov	(&DWP(24,"esp"),"ecx");
   1271 	&mov	(&DWP(28,"esi"),"edi");
   1272 	 &mov	(&DWP(28,"esp"),"edi");
   1273 	&mov	("edi",&DWP(96+4,"esp"));	# inp
   1274 
   1275 	&vmovdqa	($t3,&QWP(64,$K256));
   1276 	&sub	($K256,3*64);			# rewind K
   1277 	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
   1278 	&jb	(&label("grand_avx_bmi"));
   1279 
   1280 	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
   1281 	&vzeroall	();
   1282 &function_end_A();
   1283 						}
   1284 						}
   1285 						}}}
   1286 &function_end_B("sha256_block_data_order");
   1287 
   1288 &asm_finish();
   1289 
   1290 close STDOUT;
   1291