1 #!/usr/bin/env perl 2 # 3 # ==================================================================== 4 # Written by Andy Polyakov <appro (at] openssl.org> for the OpenSSL 5 # project. The module is, however, dual licensed under OpenSSL and 6 # CRYPTOGAMS licenses depending on where you obtain it. For further 7 # details see http://www.openssl.org/~appro/cryptogams/. 8 # ==================================================================== 9 # 10 # SHA512 block transform for x86. September 2007. 11 # 12 # May 2013. 13 # 14 # Add SSSE3 code path, 20-25% improvement [over original SSE2 code]. 15 # 16 # Performance in clock cycles per processed byte (less is better): 17 # 18 # gcc icc x86 asm SIMD(*) x86_64(**) 19 # Pentium 100 97 61 - - 20 # PIII 75 77 56 - - 21 # P4 116 95 82 34.6 30.8 22 # AMD K8 54 55 36 20.7 9.57 23 # Core2 66 57 40 15.9 9.97 24 # Westmere 70 - 38 12.2 9.58 25 # Sandy Bridge 58 - 35 11.9 11.2 26 # Ivy Bridge 50 - 33 11.5 8.17 27 # Haswell 46 - 29 11.3 7.66 28 # Bulldozer 121 - 50 14.0 13.5 29 # VIA Nano 91 - 52 33 14.7 30 # Atom 126 - 68 48(***) 14.7 31 # 32 # (*) whichever best applicable. 33 # (**) x86_64 assembler performance is presented for reference 34 # purposes, the results are for integer-only code. 35 # (***) paddq is increadibly slow on Atom. 36 # 37 # IALU code-path is optimized for elder Pentiums. On vanilla Pentium 38 # performance improvement over compiler generated code reaches ~60%, 39 # while on PIII - ~35%. On newer -archs improvement varies from 15% 40 # to 50%, but it's less important as they are expected to execute SSE2 41 # code-path, which is commonly ~2-3x faster [than compiler generated 42 # code]. SSE2 code-path is as fast as original sha512-sse2.pl, even 43 # though it does not use 128-bit operations. The latter means that 44 # SSE2-aware kernel is no longer required to execute the code. Another 45 # difference is that new code optimizes amount of writes, but at the 46 # cost of increased data cache "footprint" by 1/2KB. 47 48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 49 push(@INC,"${dir}","${dir}../../perlasm"); 50 require "x86asm.pl"; 51 52 &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386"); 53 54 $sse2=0; 55 for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } 56 57 &external_label("OPENSSL_ia32cap_P") if ($sse2); 58 59 $Tlo=&DWP(0,"esp"); $Thi=&DWP(4,"esp"); 60 $Alo=&DWP(8,"esp"); $Ahi=&DWP(8+4,"esp"); 61 $Blo=&DWP(16,"esp"); $Bhi=&DWP(16+4,"esp"); 62 $Clo=&DWP(24,"esp"); $Chi=&DWP(24+4,"esp"); 63 $Dlo=&DWP(32,"esp"); $Dhi=&DWP(32+4,"esp"); 64 $Elo=&DWP(40,"esp"); $Ehi=&DWP(40+4,"esp"); 65 $Flo=&DWP(48,"esp"); $Fhi=&DWP(48+4,"esp"); 66 $Glo=&DWP(56,"esp"); $Ghi=&DWP(56+4,"esp"); 67 $Hlo=&DWP(64,"esp"); $Hhi=&DWP(64+4,"esp"); 68 $K512="ebp"; 69 70 $Asse2=&QWP(0,"esp"); 71 $Bsse2=&QWP(8,"esp"); 72 $Csse2=&QWP(16,"esp"); 73 $Dsse2=&QWP(24,"esp"); 74 $Esse2=&QWP(32,"esp"); 75 $Fsse2=&QWP(40,"esp"); 76 $Gsse2=&QWP(48,"esp"); 77 $Hsse2=&QWP(56,"esp"); 78 79 $A="mm0"; # B-D and 80 $E="mm4"; # F-H are commonly loaded to respectively mm1-mm3 and 81 # mm5-mm7, but it's done on on-demand basis... 82 $BxC="mm2"; # ... except for B^C 83 84 sub BODY_00_15_sse2 { 85 my $phase=shift; 86 87 #&movq ("mm5",$Fsse2); # load f 88 #&movq ("mm6",$Gsse2); # load g 89 90 &movq ("mm1",$E); # %mm1 is sliding right 91 &pxor ("mm5","mm6"); # f^=g 92 &psrlq ("mm1",14); 93 &movq ($Esse2,$E); # modulo-scheduled save e 94 &pand ("mm5",$E); # f&=e 95 &psllq ($E,23); # $E is sliding left 96 &movq ($A,"mm3") if ($phase<2); 97 &movq (&QWP(8*9,"esp"),"mm7") # save X[i] 98 &movq ("mm3","mm1"); # %mm3 is T1 99 &psrlq ("mm1",4); 100 &pxor ("mm5","mm6"); # Ch(e,f,g) 101 &pxor ("mm3",$E); 102 &psllq ($E,23); 103 &pxor ("mm3","mm1"); 104 &movq ($Asse2,$A); # modulo-scheduled save a 105 &paddq ("mm7","mm5"); # X[i]+=Ch(e,f,g) 106 &pxor ("mm3",$E); 107 &psrlq ("mm1",23); 108 &paddq ("mm7",$Hsse2); # X[i]+=h 109 &pxor ("mm3","mm1"); 110 &psllq ($E,4); 111 &paddq ("mm7",QWP(0,$K512)); # X[i]+=K512[i] 112 &pxor ("mm3",$E); # T1=Sigma1_512(e) 113 114 &movq ($E,$Dsse2); # e = load d, e in next round 115 &paddq ("mm3","mm7"); # T1+=X[i] 116 &movq ("mm5",$A); # %mm5 is sliding right 117 &psrlq ("mm5",28); 118 &paddq ($E,"mm3"); # d += T1 119 &movq ("mm6",$A); # %mm6 is sliding left 120 &movq ("mm7","mm5"); 121 &psllq ("mm6",25); 122 &movq ("mm1",$Bsse2); # load b 123 &psrlq ("mm5",6); 124 &pxor ("mm7","mm6"); 125 &sub ("esp",8); 126 &psllq ("mm6",5); 127 &pxor ("mm7","mm5"); 128 &pxor ($A,"mm1"); # a^b, b^c in next round 129 &psrlq ("mm5",5); 130 &pxor ("mm7","mm6"); 131 &pand ($BxC,$A); # (b^c)&(a^b) 132 &psllq ("mm6",6); 133 &pxor ("mm7","mm5"); 134 &pxor ($BxC,"mm1"); # [h=]Maj(a,b,c) 135 &pxor ("mm6","mm7"); # Sigma0_512(a) 136 &movq ("mm7",&QWP(8*(9+16-1),"esp")) if ($phase!=0); # pre-fetch 137 &movq ("mm5",$Fsse2) if ($phase==0); # load f 138 139 if ($phase>1) { 140 &paddq ($BxC,"mm6"); # h+=Sigma0(a) 141 &add ($K512,8); 142 #&paddq ($BxC,"mm3"); # h+=T1 143 144 ($A,$BxC) = ($BxC,$A); # rotate registers 145 } else { 146 &paddq ("mm3",$BxC); # T1+=Maj(a,b,c) 147 &movq ($BxC,$A); 148 &add ($K512,8); 149 &paddq ("mm3","mm6"); # T1+=Sigma0(a) 150 &movq ("mm6",$Gsse2) if ($phase==0); # load g 151 #&movq ($A,"mm3"); # h=T1 152 } 153 } 154 155 sub BODY_00_15_x86 { 156 #define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) 157 # LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 158 # HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 159 &mov ("ecx",$Elo); 160 &mov ("edx",$Ehi); 161 &mov ("esi","ecx"); 162 163 &shr ("ecx",9); # lo>>9 164 &mov ("edi","edx"); 165 &shr ("edx",9); # hi>>9 166 &mov ("ebx","ecx"); 167 &shl ("esi",14); # lo<<14 168 &mov ("eax","edx"); 169 &shl ("edi",14); # hi<<14 170 &xor ("ebx","esi"); 171 172 &shr ("ecx",14-9); # lo>>14 173 &xor ("eax","edi"); 174 &shr ("edx",14-9); # hi>>14 175 &xor ("eax","ecx"); 176 &shl ("esi",18-14); # lo<<18 177 &xor ("ebx","edx"); 178 &shl ("edi",18-14); # hi<<18 179 &xor ("ebx","esi"); 180 181 &shr ("ecx",18-14); # lo>>18 182 &xor ("eax","edi"); 183 &shr ("edx",18-14); # hi>>18 184 &xor ("eax","ecx"); 185 &shl ("esi",23-18); # lo<<23 186 &xor ("ebx","edx"); 187 &shl ("edi",23-18); # hi<<23 188 &xor ("eax","esi"); 189 &xor ("ebx","edi"); # T1 = Sigma1(e) 190 191 &mov ("ecx",$Flo); 192 &mov ("edx",$Fhi); 193 &mov ("esi",$Glo); 194 &mov ("edi",$Ghi); 195 &add ("eax",$Hlo); 196 &adc ("ebx",$Hhi); # T1 += h 197 &xor ("ecx","esi"); 198 &xor ("edx","edi"); 199 &and ("ecx",$Elo); 200 &and ("edx",$Ehi); 201 &add ("eax",&DWP(8*(9+15)+0,"esp")); 202 &adc ("ebx",&DWP(8*(9+15)+4,"esp")); # T1 += X[0] 203 &xor ("ecx","esi"); 204 &xor ("edx","edi"); # Ch(e,f,g) = (f^g)&e)^g 205 206 &mov ("esi",&DWP(0,$K512)); 207 &mov ("edi",&DWP(4,$K512)); # K[i] 208 &add ("eax","ecx"); 209 &adc ("ebx","edx"); # T1 += Ch(e,f,g) 210 &mov ("ecx",$Dlo); 211 &mov ("edx",$Dhi); 212 &add ("eax","esi"); 213 &adc ("ebx","edi"); # T1 += K[i] 214 &mov ($Tlo,"eax"); 215 &mov ($Thi,"ebx"); # put T1 away 216 &add ("eax","ecx"); 217 &adc ("ebx","edx"); # d += T1 218 219 #define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) 220 # LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 221 # HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 222 &mov ("ecx",$Alo); 223 &mov ("edx",$Ahi); 224 &mov ($Dlo,"eax"); 225 &mov ($Dhi,"ebx"); 226 &mov ("esi","ecx"); 227 228 &shr ("ecx",2); # lo>>2 229 &mov ("edi","edx"); 230 &shr ("edx",2); # hi>>2 231 &mov ("ebx","ecx"); 232 &shl ("esi",4); # lo<<4 233 &mov ("eax","edx"); 234 &shl ("edi",4); # hi<<4 235 &xor ("ebx","esi"); 236 237 &shr ("ecx",7-2); # lo>>7 238 &xor ("eax","edi"); 239 &shr ("edx",7-2); # hi>>7 240 &xor ("ebx","ecx"); 241 &shl ("esi",25-4); # lo<<25 242 &xor ("eax","edx"); 243 &shl ("edi",25-4); # hi<<25 244 &xor ("eax","esi"); 245 246 &shr ("ecx",28-7); # lo>>28 247 &xor ("ebx","edi"); 248 &shr ("edx",28-7); # hi>>28 249 &xor ("eax","ecx"); 250 &shl ("esi",30-25); # lo<<30 251 &xor ("ebx","edx"); 252 &shl ("edi",30-25); # hi<<30 253 &xor ("eax","esi"); 254 &xor ("ebx","edi"); # Sigma0(a) 255 256 &mov ("ecx",$Alo); 257 &mov ("edx",$Ahi); 258 &mov ("esi",$Blo); 259 &mov ("edi",$Bhi); 260 &add ("eax",$Tlo); 261 &adc ("ebx",$Thi); # T1 = Sigma0(a)+T1 262 &or ("ecx","esi"); 263 &or ("edx","edi"); 264 &and ("ecx",$Clo); 265 &and ("edx",$Chi); 266 &and ("esi",$Alo); 267 &and ("edi",$Ahi); 268 &or ("ecx","esi"); 269 &or ("edx","edi"); # Maj(a,b,c) = ((a|b)&c)|(a&b) 270 271 &add ("eax","ecx"); 272 &adc ("ebx","edx"); # T1 += Maj(a,b,c) 273 &mov ($Tlo,"eax"); 274 &mov ($Thi,"ebx"); 275 276 &mov (&LB("edx"),&BP(0,$K512)); # pre-fetch LSB of *K 277 &sub ("esp",8); 278 &lea ($K512,&DWP(8,$K512)); # K++ 279 } 280 281 282 &function_begin("sha512_block_data_order"); 283 &mov ("esi",wparam(0)); # ctx 284 &mov ("edi",wparam(1)); # inp 285 &mov ("eax",wparam(2)); # num 286 &mov ("ebx","esp"); # saved sp 287 288 &call (&label("pic_point")); # make it PIC! 289 &set_label("pic_point"); 290 &blindpop($K512); 291 &lea ($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512)); 292 293 &sub ("esp",16); 294 &and ("esp",-64); 295 296 &shl ("eax",7); 297 &add ("eax","edi"); 298 &mov (&DWP(0,"esp"),"esi"); # ctx 299 &mov (&DWP(4,"esp"),"edi"); # inp 300 &mov (&DWP(8,"esp"),"eax"); # inp+num*128 301 &mov (&DWP(12,"esp"),"ebx"); # saved sp 302 303 if ($sse2) { 304 &picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512")); 305 &mov ("ecx",&DWP(0,"edx")); 306 &test ("ecx",1<<26); 307 &jz (&label("loop_x86")); 308 309 &mov ("edx",&DWP(4,"edx")); 310 311 # load ctx->h[0-7] 312 &movq ($A,&QWP(0,"esi")); 313 &and ("ecx",1<<24); # XMM registers availability 314 &movq ("mm1",&QWP(8,"esi")); 315 &and ("edx",1<<9); # SSSE3 bit 316 &movq ($BxC,&QWP(16,"esi")); 317 &or ("ecx","edx"); 318 &movq ("mm3",&QWP(24,"esi")); 319 &movq ($E,&QWP(32,"esi")); 320 &movq ("mm5",&QWP(40,"esi")); 321 &movq ("mm6",&QWP(48,"esi")); 322 &movq ("mm7",&QWP(56,"esi")); 323 &cmp ("ecx",1<<24|1<<9); 324 &je (&label("SSSE3")); 325 &sub ("esp",8*10); 326 &jmp (&label("loop_sse2")); 327 328 &set_label("loop_sse2",16); 329 #&movq ($Asse2,$A); 330 &movq ($Bsse2,"mm1"); 331 &movq ($Csse2,$BxC); 332 &movq ($Dsse2,"mm3"); 333 #&movq ($Esse2,$E); 334 &movq ($Fsse2,"mm5"); 335 &movq ($Gsse2,"mm6"); 336 &pxor ($BxC,"mm1"); # magic 337 &movq ($Hsse2,"mm7"); 338 &movq ("mm3",$A); # magic 339 340 &mov ("eax",&DWP(0,"edi")); 341 &mov ("ebx",&DWP(4,"edi")); 342 &add ("edi",8); 343 &mov ("edx",15); # counter 344 &bswap ("eax"); 345 &bswap ("ebx"); 346 &jmp (&label("00_14_sse2")); 347 348 &set_label("00_14_sse2",16); 349 &movd ("mm1","eax"); 350 &mov ("eax",&DWP(0,"edi")); 351 &movd ("mm7","ebx"); 352 &mov ("ebx",&DWP(4,"edi")); 353 &add ("edi",8); 354 &bswap ("eax"); 355 &bswap ("ebx"); 356 &punpckldq("mm7","mm1"); 357 358 &BODY_00_15_sse2(); 359 360 &dec ("edx"); 361 &jnz (&label("00_14_sse2")); 362 363 &movd ("mm1","eax"); 364 &movd ("mm7","ebx"); 365 &punpckldq("mm7","mm1"); 366 367 &BODY_00_15_sse2(1); 368 369 &pxor ($A,$A); # A is in %mm3 370 &mov ("edx",32); # counter 371 &jmp (&label("16_79_sse2")); 372 373 &set_label("16_79_sse2",16); 374 for ($j=0;$j<2;$j++) { # 2x unroll 375 #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15 376 &movq ("mm5",&QWP(8*(9+16-14),"esp")); 377 &movq ("mm1","mm7"); 378 &psrlq ("mm7",1); 379 &movq ("mm6","mm5"); 380 &psrlq ("mm5",6); 381 &psllq ("mm1",56); 382 &paddq ($A,"mm3"); # from BODY_00_15 383 &movq ("mm3","mm7"); 384 &psrlq ("mm7",7-1); 385 &pxor ("mm3","mm1"); 386 &psllq ("mm1",63-56); 387 &pxor ("mm3","mm7"); 388 &psrlq ("mm7",8-7); 389 &pxor ("mm3","mm1"); 390 &movq ("mm1","mm5"); 391 &psrlq ("mm5",19-6); 392 &pxor ("mm7","mm3"); # sigma0 393 394 &psllq ("mm6",3); 395 &pxor ("mm1","mm5"); 396 &paddq ("mm7",&QWP(8*(9+16),"esp")); 397 &pxor ("mm1","mm6"); 398 &psrlq ("mm5",61-19); 399 &paddq ("mm7",&QWP(8*(9+16-9),"esp")); 400 &pxor ("mm1","mm5"); 401 &psllq ("mm6",45-3); 402 &movq ("mm5",$Fsse2); # load f 403 &pxor ("mm1","mm6"); # sigma1 404 &movq ("mm6",$Gsse2); # load g 405 406 &paddq ("mm7","mm1"); # X[i] 407 #&movq (&QWP(8*9,"esp"),"mm7"); # moved to BODY_00_15 408 409 &BODY_00_15_sse2(2); 410 } 411 &dec ("edx"); 412 &jnz (&label("16_79_sse2")); 413 414 #&movq ($A,$Asse2); 415 &paddq ($A,"mm3"); # from BODY_00_15 416 &movq ("mm1",$Bsse2); 417 #&movq ($BxC,$Csse2); 418 &movq ("mm3",$Dsse2); 419 #&movq ($E,$Esse2); 420 &movq ("mm5",$Fsse2); 421 &movq ("mm6",$Gsse2); 422 &movq ("mm7",$Hsse2); 423 424 &pxor ($BxC,"mm1"); # de-magic 425 &paddq ($A,&QWP(0,"esi")); 426 &paddq ("mm1",&QWP(8,"esi")); 427 &paddq ($BxC,&QWP(16,"esi")); 428 &paddq ("mm3",&QWP(24,"esi")); 429 &paddq ($E,&QWP(32,"esi")); 430 &paddq ("mm5",&QWP(40,"esi")); 431 &paddq ("mm6",&QWP(48,"esi")); 432 &paddq ("mm7",&QWP(56,"esi")); 433 434 &mov ("eax",8*80); 435 &movq (&QWP(0,"esi"),$A); 436 &movq (&QWP(8,"esi"),"mm1"); 437 &movq (&QWP(16,"esi"),$BxC); 438 &movq (&QWP(24,"esi"),"mm3"); 439 &movq (&QWP(32,"esi"),$E); 440 &movq (&QWP(40,"esi"),"mm5"); 441 &movq (&QWP(48,"esi"),"mm6"); 442 &movq (&QWP(56,"esi"),"mm7"); 443 444 &lea ("esp",&DWP(0,"esp","eax")); # destroy frame 445 &sub ($K512,"eax"); # rewind K 446 447 &cmp ("edi",&DWP(8*10+8,"esp")); # are we done yet? 448 &jb (&label("loop_sse2")); 449 450 &mov ("esp",&DWP(8*10+12,"esp")); # restore sp 451 &emms (); 452 &function_end_A(); 453 454 &set_label("SSSE3",32); 455 { my ($cnt,$frame)=("ecx","edx"); 456 my @X=map("xmm$_",(0..7)); 457 my $j; 458 my $i=0; 459 460 &lea ($frame,&DWP(-64,"esp")); 461 &sub ("esp",256); 462 463 # fixed stack frame layout 464 # 465 # +0 A B C D E F G H # backing store 466 # +64 X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area 467 # +192 # XMM off-load ring buffer 468 # +256 # saved parameters 469 470 &movdqa (@X[1],&QWP(80*8,$K512)); # byte swap mask 471 &movdqu (@X[0],&QWP(0,"edi")); 472 &pshufb (@X[0],@X[1]); 473 for ($j=0;$j<8;$j++) { 474 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load 475 &movdqa (@X[3],&QWP(16*($j%8),$K512)); 476 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask 477 &movdqu (@X[1],&QWP(16*($j+1),"edi")) if ($j<7); # next input 478 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0] 479 &paddq (@X[3],@X[0]); 480 &pshufb (@X[1],@X[2]) if ($j<7); 481 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]); # xfer X[i]+K[i] 482 483 push(@X,shift(@X)); # rotate(@X) 484 } 485 #&jmp (&label("loop_ssse3")); 486 &nop (); 487 488 &set_label("loop_ssse3",32); 489 &movdqa (@X[2],&QWP(16*(($j+1)%4),$frame)); # pre-restore @X[1] 490 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]); # off-load @X[3] 491 &lea ($K512,&DWP(16*8,$K512)); 492 493 #&movq ($Asse2,$A); # off-load A-H 494 &movq ($Bsse2,"mm1"); 495 &mov ("ebx","edi"); 496 &movq ($Csse2,$BxC); 497 &lea ("edi",&DWP(128,"edi")); # advance input 498 &movq ($Dsse2,"mm3"); 499 &cmp ("edi","eax"); 500 #&movq ($Esse2,$E); 501 &movq ($Fsse2,"mm5"); 502 &cmovb ("ebx","edi"); 503 &movq ($Gsse2,"mm6"); 504 &mov ("ecx",4); # loop counter 505 &pxor ($BxC,"mm1"); # magic 506 &movq ($Hsse2,"mm7"); 507 &pxor ("mm3","mm3"); # magic 508 509 &jmp (&label("00_47_ssse3")); 510 511 sub BODY_00_15_ssse3 { # "phase-less" copy of BODY_00_15_sse2 512 ( 513 '&movq ("mm1",$E)', # %mm1 is sliding right 514 '&movq ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i] 515 '&pxor ("mm5","mm6")', # f^=g 516 '&psrlq ("mm1",14)', 517 '&movq (&QWP(8*($i+4)%64,"esp"),$E)', # modulo-scheduled save e 518 '&pand ("mm5",$E)', # f&=e 519 '&psllq ($E,23)', # $E is sliding left 520 '&paddq ($A,"mm3")', # [h+=Maj(a,b,c)] 521 '&movq ("mm3","mm1")', # %mm3 is T1 522 '&psrlq("mm1",4)', 523 '&pxor ("mm5","mm6")', # Ch(e,f,g) 524 '&pxor ("mm3",$E)', 525 '&psllq($E,23)', 526 '&pxor ("mm3","mm1")', 527 '&movq (&QWP(8*$i%64,"esp"),$A)', # modulo-scheduled save a 528 '&paddq("mm7","mm5")', # X[i]+=Ch(e,f,g) 529 '&pxor ("mm3",$E)', 530 '&psrlq("mm1",23)', 531 '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))', # X[i]+=h 532 '&pxor ("mm3","mm1")', 533 '&psllq($E,4)', 534 '&pxor ("mm3",$E)', # T1=Sigma1_512(e) 535 536 '&movq ($E,&QWP(8*($i+3)%64,"esp"))', # e = load d, e in next round 537 '&paddq ("mm3","mm7")', # T1+=X[i] 538 '&movq ("mm5",$A)', # %mm5 is sliding right 539 '&psrlq("mm5",28)', 540 '&paddq ($E,"mm3")', # d += T1 541 '&movq ("mm6",$A)', # %mm6 is sliding left 542 '&movq ("mm7","mm5")', 543 '&psllq("mm6",25)', 544 '&movq ("mm1",&QWP(8*($i+1)%64,"esp"))', # load b 545 '&psrlq("mm5",6)', 546 '&pxor ("mm7","mm6")', 547 '&psllq("mm6",5)', 548 '&pxor ("mm7","mm5")', 549 '&pxor ($A,"mm1")', # a^b, b^c in next round 550 '&psrlq("mm5",5)', 551 '&pxor ("mm7","mm6")', 552 '&pand ($BxC,$A)', # (b^c)&(a^b) 553 '&psllq("mm6",6)', 554 '&pxor ("mm7","mm5")', 555 '&pxor ($BxC,"mm1")', # [h=]Maj(a,b,c) 556 '&pxor ("mm6","mm7")', # Sigma0_512(a) 557 '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))', # pre-load f 558 '&paddq ($BxC,"mm6")', # h+=Sigma0(a) 559 '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))', # pre-load g 560 561 '($A,$BxC) = ($BxC,$A); $i--;' 562 ); 563 } 564 565 &set_label("00_47_ssse3",32); 566 567 for(;$j<16;$j++) { 568 my ($t0,$t2,$t1)=@X[2..4]; 569 my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3()); 570 571 &movdqa ($t2,@X[5]); 572 &movdqa (@X[1],$t0); # restore @X[1] 573 &palignr ($t0,@X[0],8); # X[1..2] 574 &movdqa (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4] 575 &palignr ($t2,@X[4],8); # X[9..10] 576 577 &movdqa ($t1,$t0); 578 &psrlq ($t0,7); 579 &paddq (@X[0],$t2); # X[0..1] += X[9..10] 580 &movdqa ($t2,$t1); 581 &psrlq ($t1,1); 582 &psllq ($t2,64-8); 583 &pxor ($t0,$t1); 584 &psrlq ($t1,8-1); 585 &pxor ($t0,$t2); 586 &psllq ($t2,8-1); 587 &pxor ($t0,$t1); 588 &movdqa ($t1,@X[7]); 589 &pxor ($t0,$t2); # sigma0(X[1..2]) 590 &movdqa ($t2,@X[7]); 591 &psrlq ($t1,6); 592 &paddq (@X[0],$t0); # X[0..1] += sigma0(X[1..2]) 593 594 &movdqa ($t0,@X[7]); 595 &psrlq ($t2,19); 596 &psllq ($t0,64-61); 597 &pxor ($t1,$t2); 598 &psrlq ($t2,61-19); 599 &pxor ($t1,$t0); 600 &psllq ($t0,61-19); 601 &pxor ($t1,$t2); 602 &movdqa ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1] 603 &pxor ($t1,$t0); # sigma0(X[1..2]) 604 &movdqa ($t0,&QWP(16*($j%8),$K512)); 605 eval(shift(@insns)); 606 &paddq (@X[0],$t1); # X[0..1] += sigma0(X[14..15]) 607 eval(shift(@insns)); 608 eval(shift(@insns)); 609 eval(shift(@insns)); 610 eval(shift(@insns)); 611 &paddq ($t0,@X[0]); 612 foreach(@insns) { eval; } 613 &movdqa (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i] 614 615 push(@X,shift(@X)); # rotate(@X) 616 } 617 &lea ($K512,&DWP(16*8,$K512)); 618 &dec ("ecx"); 619 &jnz (&label("00_47_ssse3")); 620 621 &movdqa (@X[1],&QWP(0,$K512)); # byte swap mask 622 &lea ($K512,&DWP(-80*8,$K512)); # rewind 623 &movdqu (@X[0],&QWP(0,"ebx")); 624 &pshufb (@X[0],@X[1]); 625 626 for ($j=0;$j<8;$j++) { # load next or same block 627 my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3()); 628 629 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load 630 &movdqa (@X[3],&QWP(16*($j%8),$K512)); 631 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask 632 &movdqu (@X[1],&QWP(16*($j+1),"ebx")) if ($j<7); # next input 633 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0] 634 &paddq (@X[3],@X[0]); 635 &pshufb (@X[1],@X[2]) if ($j<7); 636 foreach(@insns) { eval; } 637 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i] 638 639 push(@X,shift(@X)); # rotate(@X) 640 } 641 642 #&movq ($A,$Asse2); # load A-H 643 &movq ("mm1",$Bsse2); 644 &paddq ($A,"mm3"); # from BODY_00_15 645 #&movq ($BxC,$Csse2); 646 &movq ("mm3",$Dsse2); 647 #&movq ($E,$Esse2); 648 #&movq ("mm5",$Fsse2); 649 #&movq ("mm6",$Gsse2); 650 &movq ("mm7",$Hsse2); 651 652 &pxor ($BxC,"mm1"); # de-magic 653 &paddq ($A,&QWP(0,"esi")); 654 &paddq ("mm1",&QWP(8,"esi")); 655 &paddq ($BxC,&QWP(16,"esi")); 656 &paddq ("mm3",&QWP(24,"esi")); 657 &paddq ($E,&QWP(32,"esi")); 658 &paddq ("mm5",&QWP(40,"esi")); 659 &paddq ("mm6",&QWP(48,"esi")); 660 &paddq ("mm7",&QWP(56,"esi")); 661 662 &movq (&QWP(0,"esi"),$A); 663 &movq (&QWP(8,"esi"),"mm1"); 664 &movq (&QWP(16,"esi"),$BxC); 665 &movq (&QWP(24,"esi"),"mm3"); 666 &movq (&QWP(32,"esi"),$E); 667 &movq (&QWP(40,"esi"),"mm5"); 668 &movq (&QWP(48,"esi"),"mm6"); 669 &movq (&QWP(56,"esi"),"mm7"); 670 671 &cmp ("edi","eax") # are we done yet? 672 &jb (&label("loop_ssse3")); 673 674 &mov ("esp",&DWP(64+12,$frame)); # restore sp 675 &emms (); 676 } 677 &function_end_A(); 678 } 679 &set_label("loop_x86",16); 680 # copy input block to stack reversing byte and qword order 681 for ($i=0;$i<8;$i++) { 682 &mov ("eax",&DWP($i*16+0,"edi")); 683 &mov ("ebx",&DWP($i*16+4,"edi")); 684 &mov ("ecx",&DWP($i*16+8,"edi")); 685 &mov ("edx",&DWP($i*16+12,"edi")); 686 &bswap ("eax"); 687 &bswap ("ebx"); 688 &bswap ("ecx"); 689 &bswap ("edx"); 690 &push ("eax"); 691 &push ("ebx"); 692 &push ("ecx"); 693 &push ("edx"); 694 } 695 &add ("edi",128); 696 &sub ("esp",9*8); # place for T,A,B,C,D,E,F,G,H 697 &mov (&DWP(8*(9+16)+4,"esp"),"edi"); 698 699 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack 700 &lea ("edi",&DWP(8,"esp")); 701 &mov ("ecx",16); 702 &data_word(0xA5F3F689); # rep movsd 703 704 &set_label("00_15_x86",16); 705 &BODY_00_15_x86(); 706 707 &cmp (&LB("edx"),0x94); 708 &jne (&label("00_15_x86")); 709 710 &set_label("16_79_x86",16); 711 #define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) 712 # LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 713 # HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 714 &mov ("ecx",&DWP(8*(9+15+16-1)+0,"esp")); 715 &mov ("edx",&DWP(8*(9+15+16-1)+4,"esp")); 716 &mov ("esi","ecx"); 717 718 &shr ("ecx",1); # lo>>1 719 &mov ("edi","edx"); 720 &shr ("edx",1); # hi>>1 721 &mov ("eax","ecx"); 722 &shl ("esi",24); # lo<<24 723 &mov ("ebx","edx"); 724 &shl ("edi",24); # hi<<24 725 &xor ("ebx","esi"); 726 727 &shr ("ecx",7-1); # lo>>7 728 &xor ("eax","edi"); 729 &shr ("edx",7-1); # hi>>7 730 &xor ("eax","ecx"); 731 &shl ("esi",31-24); # lo<<31 732 &xor ("ebx","edx"); 733 &shl ("edi",25-24); # hi<<25 734 &xor ("ebx","esi"); 735 736 &shr ("ecx",8-7); # lo>>8 737 &xor ("eax","edi"); 738 &shr ("edx",8-7); # hi>>8 739 &xor ("eax","ecx"); 740 &shl ("edi",31-25); # hi<<31 741 &xor ("ebx","edx"); 742 &xor ("eax","edi"); # T1 = sigma0(X[-15]) 743 744 &mov (&DWP(0,"esp"),"eax"); 745 &mov (&DWP(4,"esp"),"ebx"); # put T1 away 746 747 #define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) 748 # LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 749 # HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 750 &mov ("ecx",&DWP(8*(9+15+16-14)+0,"esp")); 751 &mov ("edx",&DWP(8*(9+15+16-14)+4,"esp")); 752 &mov ("esi","ecx"); 753 754 &shr ("ecx",6); # lo>>6 755 &mov ("edi","edx"); 756 &shr ("edx",6); # hi>>6 757 &mov ("eax","ecx"); 758 &shl ("esi",3); # lo<<3 759 &mov ("ebx","edx"); 760 &shl ("edi",3); # hi<<3 761 &xor ("eax","esi"); 762 763 &shr ("ecx",19-6); # lo>>19 764 &xor ("ebx","edi"); 765 &shr ("edx",19-6); # hi>>19 766 &xor ("eax","ecx"); 767 &shl ("esi",13-3); # lo<<13 768 &xor ("ebx","edx"); 769 &shl ("edi",13-3); # hi<<13 770 &xor ("ebx","esi"); 771 772 &shr ("ecx",29-19); # lo>>29 773 &xor ("eax","edi"); 774 &shr ("edx",29-19); # hi>>29 775 &xor ("ebx","ecx"); 776 &shl ("edi",26-13); # hi<<26 777 &xor ("eax","edx"); 778 &xor ("eax","edi"); # sigma1(X[-2]) 779 780 &mov ("ecx",&DWP(8*(9+15+16)+0,"esp")); 781 &mov ("edx",&DWP(8*(9+15+16)+4,"esp")); 782 &add ("eax",&DWP(0,"esp")); 783 &adc ("ebx",&DWP(4,"esp")); # T1 = sigma1(X[-2])+T1 784 &mov ("esi",&DWP(8*(9+15+16-9)+0,"esp")); 785 &mov ("edi",&DWP(8*(9+15+16-9)+4,"esp")); 786 &add ("eax","ecx"); 787 &adc ("ebx","edx"); # T1 += X[-16] 788 &add ("eax","esi"); 789 &adc ("ebx","edi"); # T1 += X[-7] 790 &mov (&DWP(8*(9+15)+0,"esp"),"eax"); 791 &mov (&DWP(8*(9+15)+4,"esp"),"ebx"); # save X[0] 792 793 &BODY_00_15_x86(); 794 795 &cmp (&LB("edx"),0x17); 796 &jne (&label("16_79_x86")); 797 798 &mov ("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx 799 &mov ("edi",&DWP(8*(9+16+80)+4,"esp"));# inp 800 for($i=0;$i<4;$i++) { 801 &mov ("eax",&DWP($i*16+0,"esi")); 802 &mov ("ebx",&DWP($i*16+4,"esi")); 803 &mov ("ecx",&DWP($i*16+8,"esi")); 804 &mov ("edx",&DWP($i*16+12,"esi")); 805 &add ("eax",&DWP(8+($i*16)+0,"esp")); 806 &adc ("ebx",&DWP(8+($i*16)+4,"esp")); 807 &mov (&DWP($i*16+0,"esi"),"eax"); 808 &mov (&DWP($i*16+4,"esi"),"ebx"); 809 &add ("ecx",&DWP(8+($i*16)+8,"esp")); 810 &adc ("edx",&DWP(8+($i*16)+12,"esp")); 811 &mov (&DWP($i*16+8,"esi"),"ecx"); 812 &mov (&DWP($i*16+12,"esi"),"edx"); 813 } 814 &add ("esp",8*(9+16+80)); # destroy frame 815 &sub ($K512,8*80); # rewind K 816 817 &cmp ("edi",&DWP(8,"esp")); # are we done yet? 818 &jb (&label("loop_x86")); 819 820 &mov ("esp",&DWP(12,"esp")); # restore sp 821 &function_end_A(); 822 823 &set_label("K512",64); # Yes! I keep it in the code segment! 824 &data_word(0xd728ae22,0x428a2f98); # u64 825 &data_word(0x23ef65cd,0x71374491); # u64 826 &data_word(0xec4d3b2f,0xb5c0fbcf); # u64 827 &data_word(0x8189dbbc,0xe9b5dba5); # u64 828 &data_word(0xf348b538,0x3956c25b); # u64 829 &data_word(0xb605d019,0x59f111f1); # u64 830 &data_word(0xaf194f9b,0x923f82a4); # u64 831 &data_word(0xda6d8118,0xab1c5ed5); # u64 832 &data_word(0xa3030242,0xd807aa98); # u64 833 &data_word(0x45706fbe,0x12835b01); # u64 834 &data_word(0x4ee4b28c,0x243185be); # u64 835 &data_word(0xd5ffb4e2,0x550c7dc3); # u64 836 &data_word(0xf27b896f,0x72be5d74); # u64 837 &data_word(0x3b1696b1,0x80deb1fe); # u64 838 &data_word(0x25c71235,0x9bdc06a7); # u64 839 &data_word(0xcf692694,0xc19bf174); # u64 840 &data_word(0x9ef14ad2,0xe49b69c1); # u64 841 &data_word(0x384f25e3,0xefbe4786); # u64 842 &data_word(0x8b8cd5b5,0x0fc19dc6); # u64 843 &data_word(0x77ac9c65,0x240ca1cc); # u64 844 &data_word(0x592b0275,0x2de92c6f); # u64 845 &data_word(0x6ea6e483,0x4a7484aa); # u64 846 &data_word(0xbd41fbd4,0x5cb0a9dc); # u64 847 &data_word(0x831153b5,0x76f988da); # u64 848 &data_word(0xee66dfab,0x983e5152); # u64 849 &data_word(0x2db43210,0xa831c66d); # u64 850 &data_word(0x98fb213f,0xb00327c8); # u64 851 &data_word(0xbeef0ee4,0xbf597fc7); # u64 852 &data_word(0x3da88fc2,0xc6e00bf3); # u64 853 &data_word(0x930aa725,0xd5a79147); # u64 854 &data_word(0xe003826f,0x06ca6351); # u64 855 &data_word(0x0a0e6e70,0x14292967); # u64 856 &data_word(0x46d22ffc,0x27b70a85); # u64 857 &data_word(0x5c26c926,0x2e1b2138); # u64 858 &data_word(0x5ac42aed,0x4d2c6dfc); # u64 859 &data_word(0x9d95b3df,0x53380d13); # u64 860 &data_word(0x8baf63de,0x650a7354); # u64 861 &data_word(0x3c77b2a8,0x766a0abb); # u64 862 &data_word(0x47edaee6,0x81c2c92e); # u64 863 &data_word(0x1482353b,0x92722c85); # u64 864 &data_word(0x4cf10364,0xa2bfe8a1); # u64 865 &data_word(0xbc423001,0xa81a664b); # u64 866 &data_word(0xd0f89791,0xc24b8b70); # u64 867 &data_word(0x0654be30,0xc76c51a3); # u64 868 &data_word(0xd6ef5218,0xd192e819); # u64 869 &data_word(0x5565a910,0xd6990624); # u64 870 &data_word(0x5771202a,0xf40e3585); # u64 871 &data_word(0x32bbd1b8,0x106aa070); # u64 872 &data_word(0xb8d2d0c8,0x19a4c116); # u64 873 &data_word(0x5141ab53,0x1e376c08); # u64 874 &data_word(0xdf8eeb99,0x2748774c); # u64 875 &data_word(0xe19b48a8,0x34b0bcb5); # u64 876 &data_word(0xc5c95a63,0x391c0cb3); # u64 877 &data_word(0xe3418acb,0x4ed8aa4a); # u64 878 &data_word(0x7763e373,0x5b9cca4f); # u64 879 &data_word(0xd6b2b8a3,0x682e6ff3); # u64 880 &data_word(0x5defb2fc,0x748f82ee); # u64 881 &data_word(0x43172f60,0x78a5636f); # u64 882 &data_word(0xa1f0ab72,0x84c87814); # u64 883 &data_word(0x1a6439ec,0x8cc70208); # u64 884 &data_word(0x23631e28,0x90befffa); # u64 885 &data_word(0xde82bde9,0xa4506ceb); # u64 886 &data_word(0xb2c67915,0xbef9a3f7); # u64 887 &data_word(0xe372532b,0xc67178f2); # u64 888 &data_word(0xea26619c,0xca273ece); # u64 889 &data_word(0x21c0c207,0xd186b8c7); # u64 890 &data_word(0xcde0eb1e,0xeada7dd6); # u64 891 &data_word(0xee6ed178,0xf57d4f7f); # u64 892 &data_word(0x72176fba,0x06f067aa); # u64 893 &data_word(0xa2c898a6,0x0a637dc5); # u64 894 &data_word(0xbef90dae,0x113f9804); # u64 895 &data_word(0x131c471b,0x1b710b35); # u64 896 &data_word(0x23047d84,0x28db77f5); # u64 897 &data_word(0x40c72493,0x32caab7b); # u64 898 &data_word(0x15c9bebc,0x3c9ebe0a); # u64 899 &data_word(0x9c100d4c,0x431d67c4); # u64 900 &data_word(0xcb3e42b6,0x4cc5d4be); # u64 901 &data_word(0xfc657e2a,0x597f299c); # u64 902 &data_word(0x3ad6faec,0x5fcb6fab); # u64 903 &data_word(0x4a475817,0x6c44198c); # u64 904 905 &data_word(0x04050607,0x00010203); # byte swap 906 &data_word(0x0c0d0e0f,0x08090a0b); # mask 907 &function_end_B("sha512_block_data_order"); 908 &asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>"); 909 910 &asm_finish(); 911