1 #!/usr/bin/env perl 2 # 3 # ==================================================================== 4 # Written by Andy Polyakov <appro (at] openssl.org> for the OpenSSL 5 # project. The module is, however, dual licensed under OpenSSL and 6 # CRYPTOGAMS licenses depending on where you obtain it. For further 7 # details see http://www.openssl.org/~appro/cryptogams/. 8 # ==================================================================== 9 # 10 # March, June 2010 11 # 12 # The module implements "4-bit" GCM GHASH function and underlying 13 # single multiplication operation in GF(2^128). "4-bit" means that 14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH 15 # function features so called "528B" variant utilizing additional 16 # 256+16 bytes of per-key storage [+512 bytes shared table]. 17 # Performance results are for this streamed GHASH subroutine and are 18 # expressed in cycles per processed byte, less is better: 19 # 20 # gcc 3.4.x(*) assembler 21 # 22 # P4 28.6 14.0 +100% 23 # Opteron 19.3 7.7 +150% 24 # Core2 17.8 8.1(**) +120% 25 # Atom 31.6 16.8 +88% 26 # VIA Nano 21.8 10.1 +115% 27 # 28 # (*) comparison is not completely fair, because C results are 29 # for vanilla "256B" implementation, while assembler results 30 # are for "528B";-) 31 # (**) it's mystery [to me] why Core2 result is not same as for 32 # Opteron; 33 34 # May 2010 35 # 36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte. 37 # See ghash-x86.pl for background information and details about coding 38 # techniques. 39 # 40 # Special thanks to David Woodhouse <dwmw2 (at] infradead.org> for 41 # providing access to a Westmere-based system on behalf of Intel 42 # Open Source Technology Centre. 43 44 # December 2012 45 # 46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in 47 # reduction_alg9, increase reduction aggregate factor to 4x. As for 48 # the latter. ghash-x86.pl discusses that it makes lesser sense to 49 # increase aggregate factor. Then why increase here? Critical path 50 # consists of 3 independent pclmulqdq instructions, Karatsuba post- 51 # processing and reduction. "On top" of this we lay down aggregated 52 # multiplication operations, triplets of independent pclmulqdq's. As 53 # issue rate for pclmulqdq is limited, it makes lesser sense to 54 # aggregate more multiplications than it takes to perform remaining 55 # non-multiplication operations. 2x is near-optimal coefficient for 56 # contemporary Intel CPUs (therefore modest improvement coefficient), 57 # but not for Bulldozer. Latter is because logical SIMD operations 58 # are twice as slow in comparison to Intel, so that critical path is 59 # longer. A CPU with higher pclmulqdq issue rate would also benefit 60 # from higher aggregate factor... 61 # 62 # Westmere 1.78(+13%) 63 # Sandy Bridge 1.80(+8%) 64 # Ivy Bridge 1.80(+7%) 65 # Haswell 0.55(+93%) (if system doesn't support AVX) 66 # Broadwell 0.45(+110%)(if system doesn't support AVX) 67 # Bulldozer 1.49(+27%) 68 # Silvermont 2.88(+13%) 69 70 # March 2013 71 # 72 # ... 8x aggregate factor AVX code path is using reduction algorithm 73 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable 74 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs 75 # sub-optimally in comparison to above mentioned version. But thanks 76 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that 77 # it performs in 0.41 cycles per byte on Haswell processor, and in 78 # 0.29 on Broadwell. 79 # 80 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest 81 82 $flavour = shift; 83 $output = shift; 84 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 85 86 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 87 88 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 89 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 90 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or 91 die "can't locate x86_64-xlate.pl"; 92 93 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` 94 =~ /GNU assembler version ([2-9]\.[0-9]+)/) { 95 $avx = ($1>=2.19) + ($1>=2.22); 96 } 97 98 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && 99 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) { 100 $avx = ($1>=2.09) + ($1>=2.10); 101 } 102 103 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && 104 `ml64 2>&1` =~ /Version ([0-9]+)\./) { 105 $avx = ($1>=10) + ($1>=11); 106 } 107 108 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) { 109 $avx = ($2>=3.0) + ($2>3.0); 110 } 111 112 open OUT,"| \"$^X\" $xlate $flavour $output"; 113 *STDOUT=*OUT; 114 115 $do4xaggr=1; 116 117 # common register layout 118 $nlo="%rax"; 119 $nhi="%rbx"; 120 $Zlo="%r8"; 121 $Zhi="%r9"; 122 $tmp="%r10"; 123 $rem_4bit = "%r11"; 124 125 $Xi="%rdi"; 126 $Htbl="%rsi"; 127 128 # per-function register layout 129 $cnt="%rcx"; 130 $rem="%rdx"; 131 132 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or 133 $r =~ s/%[er]([sd]i)/%\1l/ or 134 $r =~ s/%[er](bp)/%\1l/ or 135 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; } 136 137 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm 138 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; 139 my $arg = pop; 140 $arg = "\$$arg" if ($arg*1 eq $arg); 141 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; 142 } 143 145 { my $N; 146 sub loop() { 147 my $inp = shift; 148 149 $N++; 150 $code.=<<___; 151 xor $nlo,$nlo 152 xor $nhi,$nhi 153 mov `&LB("$Zlo")`,`&LB("$nlo")` 154 mov `&LB("$Zlo")`,`&LB("$nhi")` 155 shl \$4,`&LB("$nlo")` 156 mov \$14,$cnt 157 mov 8($Htbl,$nlo),$Zlo 158 mov ($Htbl,$nlo),$Zhi 159 and \$0xf0,`&LB("$nhi")` 160 mov $Zlo,$rem 161 jmp .Loop$N 162 163 .align 16 164 .Loop$N: 165 shr \$4,$Zlo 166 and \$0xf,$rem 167 mov $Zhi,$tmp 168 mov ($inp,$cnt),`&LB("$nlo")` 169 shr \$4,$Zhi 170 xor 8($Htbl,$nhi),$Zlo 171 shl \$60,$tmp 172 xor ($Htbl,$nhi),$Zhi 173 mov `&LB("$nlo")`,`&LB("$nhi")` 174 xor ($rem_4bit,$rem,8),$Zhi 175 mov $Zlo,$rem 176 shl \$4,`&LB("$nlo")` 177 xor $tmp,$Zlo 178 dec $cnt 179 js .Lbreak$N 180 181 shr \$4,$Zlo 182 and \$0xf,$rem 183 mov $Zhi,$tmp 184 shr \$4,$Zhi 185 xor 8($Htbl,$nlo),$Zlo 186 shl \$60,$tmp 187 xor ($Htbl,$nlo),$Zhi 188 and \$0xf0,`&LB("$nhi")` 189 xor ($rem_4bit,$rem,8),$Zhi 190 mov $Zlo,$rem 191 xor $tmp,$Zlo 192 jmp .Loop$N 193 194 .align 16 195 .Lbreak$N: 196 shr \$4,$Zlo 197 and \$0xf,$rem 198 mov $Zhi,$tmp 199 shr \$4,$Zhi 200 xor 8($Htbl,$nlo),$Zlo 201 shl \$60,$tmp 202 xor ($Htbl,$nlo),$Zhi 203 and \$0xf0,`&LB("$nhi")` 204 xor ($rem_4bit,$rem,8),$Zhi 205 mov $Zlo,$rem 206 xor $tmp,$Zlo 207 208 shr \$4,$Zlo 209 and \$0xf,$rem 210 mov $Zhi,$tmp 211 shr \$4,$Zhi 212 xor 8($Htbl,$nhi),$Zlo 213 shl \$60,$tmp 214 xor ($Htbl,$nhi),$Zhi 215 xor $tmp,$Zlo 216 xor ($rem_4bit,$rem,8),$Zhi 217 218 bswap $Zlo 219 bswap $Zhi 220 ___ 221 }} 222 223 $code=<<___; 224 .text 225 .extern OPENSSL_ia32cap_P 226 227 .globl gcm_gmult_4bit 228 .type gcm_gmult_4bit,\@function,2 229 .align 16 230 gcm_gmult_4bit: 231 push %rbx 232 push %rbp # %rbp and %r12 are pushed exclusively in 233 push %r12 # order to reuse Win64 exception handler... 234 .Lgmult_prologue: 235 236 movzb 15($Xi),$Zlo 237 lea .Lrem_4bit(%rip),$rem_4bit 238 ___ 239 &loop ($Xi); 240 $code.=<<___; 241 mov $Zlo,8($Xi) 242 mov $Zhi,($Xi) 243 244 mov 16(%rsp),%rbx 245 lea 24(%rsp),%rsp 246 .Lgmult_epilogue: 247 ret 248 .size gcm_gmult_4bit,.-gcm_gmult_4bit 249 ___ 250 252 # per-function register layout 253 $inp="%rdx"; 254 $len="%rcx"; 255 $rem_8bit=$rem_4bit; 256 257 $code.=<<___; 258 .globl gcm_ghash_4bit 259 .type gcm_ghash_4bit,\@function,4 260 .align 16 261 gcm_ghash_4bit: 262 push %rbx 263 push %rbp 264 push %r12 265 push %r13 266 push %r14 267 push %r15 268 sub \$280,%rsp 269 .Lghash_prologue: 270 mov $inp,%r14 # reassign couple of args 271 mov $len,%r15 272 ___ 273 { my $inp="%r14"; 274 my $dat="%edx"; 275 my $len="%r15"; 276 my @nhi=("%ebx","%ecx"); 277 my @rem=("%r12","%r13"); 278 my $Hshr4="%rbp"; 279 280 &sub ($Htbl,-128); # size optimization 281 &lea ($Hshr4,"16+128(%rsp)"); 282 { my @lo =($nlo,$nhi); 283 my @hi =($Zlo,$Zhi); 284 285 &xor ($dat,$dat); 286 for ($i=0,$j=-2;$i<18;$i++,$j++) { 287 &mov ("$j(%rsp)",&LB($dat)) if ($i>1); 288 &or ($lo[0],$tmp) if ($i>1); 289 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17); 290 &shr ($lo[1],4) if ($i>0 && $i<17); 291 &mov ($tmp,$hi[1]) if ($i>0 && $i<17); 292 &shr ($hi[1],4) if ($i>0 && $i<17); 293 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1); 294 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16); 295 &shl (&LB($dat),4) if ($i>0 && $i<17); 296 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1); 297 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16); 298 &shl ($tmp,60) if ($i>0 && $i<17); 299 300 push (@lo,shift(@lo)); 301 push (@hi,shift(@hi)); 302 } 303 } 304 &add ($Htbl,-128); 305 &mov ($Zlo,"8($Xi)"); 306 &mov ($Zhi,"0($Xi)"); 307 &add ($len,$inp); # pointer to the end of data 308 &lea ($rem_8bit,".Lrem_8bit(%rip)"); 309 &jmp (".Louter_loop"); 310 311 $code.=".align 16\n.Louter_loop:\n"; 312 &xor ($Zhi,"($inp)"); 313 &mov ("%rdx","8($inp)"); 314 &lea ($inp,"16($inp)"); 315 &xor ("%rdx",$Zlo); 316 &mov ("($Xi)",$Zhi); 317 &mov ("8($Xi)","%rdx"); 318 &shr ("%rdx",32); 319 320 &xor ($nlo,$nlo); 321 &rol ($dat,8); 322 &mov (&LB($nlo),&LB($dat)); 323 &movz ($nhi[0],&LB($dat)); 324 &shl (&LB($nlo),4); 325 &shr ($nhi[0],4); 326 327 for ($j=11,$i=0;$i<15;$i++) { 328 &rol ($dat,8); 329 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0); 330 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0); 331 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0); 332 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0); 333 334 &mov (&LB($nlo),&LB($dat)); 335 &xor ($Zlo,$tmp) if ($i>0); 336 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0); 337 338 &movz ($nhi[1],&LB($dat)); 339 &shl (&LB($nlo),4); 340 &movzb ($rem[0],"(%rsp,$nhi[0])"); 341 342 &shr ($nhi[1],4) if ($i<14); 343 &and ($nhi[1],0xf0) if ($i==14); 344 &shl ($rem[1],48) if ($i>0); 345 &xor ($rem[0],$Zlo); 346 347 &mov ($tmp,$Zhi); 348 &xor ($Zhi,$rem[1]) if ($i>0); 349 &shr ($Zlo,8); 350 351 &movz ($rem[0],&LB($rem[0])); 352 &mov ($dat,"$j($Xi)") if (--$j%4==0); 353 &shr ($Zhi,8); 354 355 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)"); 356 &shl ($tmp,56); 357 &xor ($Zhi,"($Hshr4,$nhi[0],8)"); 358 359 unshift (@nhi,pop(@nhi)); # "rotate" registers 360 unshift (@rem,pop(@rem)); 361 } 362 &movzw ($rem[1],"($rem_8bit,$rem[1],2)"); 363 &xor ($Zlo,"8($Htbl,$nlo)"); 364 &xor ($Zhi,"($Htbl,$nlo)"); 365 366 &shl ($rem[1],48); 367 &xor ($Zlo,$tmp); 368 369 &xor ($Zhi,$rem[1]); 370 &movz ($rem[0],&LB($Zlo)); 371 &shr ($Zlo,4); 372 373 &mov ($tmp,$Zhi); 374 &shl (&LB($rem[0]),4); 375 &shr ($Zhi,4); 376 377 &xor ($Zlo,"8($Htbl,$nhi[0])"); 378 &movzw ($rem[0],"($rem_8bit,$rem[0],2)"); 379 &shl ($tmp,60); 380 381 &xor ($Zhi,"($Htbl,$nhi[0])"); 382 &xor ($Zlo,$tmp); 383 &shl ($rem[0],48); 384 385 &bswap ($Zlo); 386 &xor ($Zhi,$rem[0]); 387 388 &bswap ($Zhi); 389 &cmp ($inp,$len); 390 &jb (".Louter_loop"); 391 } 392 $code.=<<___; 393 mov $Zlo,8($Xi) 394 mov $Zhi,($Xi) 395 396 lea 280(%rsp),%rsi 397 mov 0(%rsi),%r15 398 mov 8(%rsi),%r14 399 mov 16(%rsi),%r13 400 mov 24(%rsi),%r12 401 mov 32(%rsi),%rbp 402 mov 40(%rsi),%rbx 403 lea 48(%rsi),%rsp 404 .Lghash_epilogue: 405 ret 406 .size gcm_ghash_4bit,.-gcm_ghash_4bit 407 ___ 408 410 ###################################################################### 411 # PCLMULQDQ version. 412 413 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order 414 ("%rdi","%rsi","%rdx","%rcx"); # Unix order 415 416 ($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2"; 417 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5"); 418 419 sub clmul64x64_T2 { # minimal register pressure 420 my ($Xhi,$Xi,$Hkey,$HK)=@_; 421 422 if (!defined($HK)) { $HK = $T2; 423 $code.=<<___; 424 movdqa $Xi,$Xhi # 425 pshufd \$0b01001110,$Xi,$T1 426 pshufd \$0b01001110,$Hkey,$T2 427 pxor $Xi,$T1 # 428 pxor $Hkey,$T2 429 ___ 430 } else { 431 $code.=<<___; 432 movdqa $Xi,$Xhi # 433 pshufd \$0b01001110,$Xi,$T1 434 pxor $Xi,$T1 # 435 ___ 436 } 437 $code.=<<___; 438 pclmulqdq \$0x00,$Hkey,$Xi ####### 439 pclmulqdq \$0x11,$Hkey,$Xhi ####### 440 pclmulqdq \$0x00,$HK,$T1 ####### 441 pxor $Xi,$T1 # 442 pxor $Xhi,$T1 # 443 444 movdqa $T1,$T2 # 445 psrldq \$8,$T1 446 pslldq \$8,$T2 # 447 pxor $T1,$Xhi 448 pxor $T2,$Xi # 449 ___ 450 } 451 452 sub reduction_alg9 { # 17/11 times faster than Intel version 453 my ($Xhi,$Xi) = @_; 454 455 $code.=<<___; 456 # 1st phase 457 movdqa $Xi,$T2 # 458 movdqa $Xi,$T1 459 psllq \$5,$Xi 460 pxor $Xi,$T1 # 461 psllq \$1,$Xi 462 pxor $T1,$Xi # 463 psllq \$57,$Xi # 464 movdqa $Xi,$T1 # 465 pslldq \$8,$Xi 466 psrldq \$8,$T1 # 467 pxor $T2,$Xi 468 pxor $T1,$Xhi # 469 470 # 2nd phase 471 movdqa $Xi,$T2 472 psrlq \$1,$Xi 473 pxor $T2,$Xhi # 474 pxor $Xi,$T2 475 psrlq \$5,$Xi 476 pxor $T2,$Xi # 477 psrlq \$1,$Xi # 478 pxor $Xhi,$Xi # 479 ___ 480 } 481 483 { my ($Htbl,$Xip)=@_4args; 484 my $HK="%xmm6"; 485 486 $code.=<<___; 487 .globl gcm_init_clmul 488 .type gcm_init_clmul,\@abi-omnipotent 489 .align 16 490 gcm_init_clmul: 491 .L_init_clmul: 492 ___ 493 $code.=<<___ if ($win64); 494 .LSEH_begin_gcm_init_clmul: 495 # I can't trust assembler to use specific encoding:-( 496 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 497 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 498 ___ 499 $code.=<<___; 500 movdqu ($Xip),$Hkey 501 pshufd \$0b01001110,$Hkey,$Hkey # dword swap 502 503 # <<1 twist 504 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 505 movdqa $Hkey,$T1 506 psllq \$1,$Hkey 507 pxor $T3,$T3 # 508 psrlq \$63,$T1 509 pcmpgtd $T2,$T3 # broadcast carry bit 510 pslldq \$8,$T1 511 por $T1,$Hkey # H<<=1 512 513 # magic reduction 514 pand .L0x1c2_polynomial(%rip),$T3 515 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial 516 517 # calculate H^2 518 pshufd \$0b01001110,$Hkey,$HK 519 movdqa $Hkey,$Xi 520 pxor $Hkey,$HK 521 ___ 522 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); 523 &reduction_alg9 ($Xhi,$Xi); 524 $code.=<<___; 525 pshufd \$0b01001110,$Hkey,$T1 526 pshufd \$0b01001110,$Xi,$T2 527 pxor $Hkey,$T1 # Karatsuba pre-processing 528 movdqu $Hkey,0x00($Htbl) # save H 529 pxor $Xi,$T2 # Karatsuba pre-processing 530 movdqu $Xi,0x10($Htbl) # save H^2 531 palignr \$8,$T1,$T2 # low part is H.lo^H.hi... 532 movdqu $T2,0x20($Htbl) # save Karatsuba "salt" 533 ___ 534 if ($do4xaggr) { 535 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3 536 &reduction_alg9 ($Xhi,$Xi); 537 $code.=<<___; 538 movdqa $Xi,$T3 539 ___ 540 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4 541 &reduction_alg9 ($Xhi,$Xi); 542 $code.=<<___; 543 pshufd \$0b01001110,$T3,$T1 544 pshufd \$0b01001110,$Xi,$T2 545 pxor $T3,$T1 # Karatsuba pre-processing 546 movdqu $T3,0x30($Htbl) # save H^3 547 pxor $Xi,$T2 # Karatsuba pre-processing 548 movdqu $Xi,0x40($Htbl) # save H^4 549 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi... 550 movdqu $T2,0x50($Htbl) # save Karatsuba "salt" 551 ___ 552 } 553 $code.=<<___ if ($win64); 554 movaps (%rsp),%xmm6 555 lea 0x18(%rsp),%rsp 556 .LSEH_end_gcm_init_clmul: 557 ___ 558 $code.=<<___; 559 ret 560 .size gcm_init_clmul,.-gcm_init_clmul 561 ___ 562 } 563 564 { my ($Xip,$Htbl)=@_4args; 565 566 $code.=<<___; 567 .globl gcm_gmult_clmul 568 .type gcm_gmult_clmul,\@abi-omnipotent 569 .align 16 570 gcm_gmult_clmul: 571 .L_gmult_clmul: 572 movdqu ($Xip),$Xi 573 movdqa .Lbswap_mask(%rip),$T3 574 movdqu ($Htbl),$Hkey 575 movdqu 0x20($Htbl),$T2 576 pshufb $T3,$Xi 577 ___ 578 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2); 579 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0)); 580 # experimental alternative. special thing about is that there 581 # no dependency between the two multiplications... 582 mov \$`0xE1<<1`,%eax 583 mov \$0xA040608020C0E000,%r10 # ((7..0)0xE0)&0xff 584 mov \$0x07,%r11d 585 movq %rax,$T1 586 movq %r10,$T2 587 movq %r11,$T3 # borrow $T3 588 pand $Xi,$T3 589 pshufb $T3,$T2 # ($Xi&7)0xE0 590 movq %rax,$T3 591 pclmulqdq \$0x00,$Xi,$T1 # (0xE1<<1) 592 pxor $Xi,$T2 593 pslldq \$15,$T2 594 paddd $T2,$T2 # <<(64+56+1) 595 pxor $T2,$Xi 596 pclmulqdq \$0x01,$T3,$Xi 597 movdqa .Lbswap_mask(%rip),$T3 # reload $T3 598 psrldq \$1,$T1 599 pxor $T1,$Xhi 600 pslldq \$7,$Xi 601 pxor $Xhi,$Xi 602 ___ 603 $code.=<<___; 604 pshufb $T3,$Xi 605 movdqu $Xi,($Xip) 606 ret 607 .size gcm_gmult_clmul,.-gcm_gmult_clmul 608 ___ 609 } 610 612 { my ($Xip,$Htbl,$inp,$len)=@_4args; 613 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7)); 614 my ($T1,$T2,$T3)=map("%xmm$_",(8..10)); 615 616 $code.=<<___; 617 .globl gcm_ghash_clmul 618 .type gcm_ghash_clmul,\@abi-omnipotent 619 .align 32 620 gcm_ghash_clmul: 621 .L_ghash_clmul: 622 ___ 623 $code.=<<___ if ($win64); 624 lea -0x88(%rsp),%rax 625 .LSEH_begin_gcm_ghash_clmul: 626 # I can't trust assembler to use specific encoding:-( 627 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 628 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 629 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 630 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 631 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 632 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 633 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 634 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 635 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 636 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 637 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 638 ___ 639 $code.=<<___; 640 movdqa .Lbswap_mask(%rip),$T3 641 642 movdqu ($Xip),$Xi 643 movdqu ($Htbl),$Hkey 644 movdqu 0x20($Htbl),$HK 645 pshufb $T3,$Xi 646 647 sub \$0x10,$len 648 jz .Lodd_tail 649 650 movdqu 0x10($Htbl),$Hkey2 651 ___ 652 if ($do4xaggr) { 653 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15)); 654 655 $code.=<<___; 656 mov OPENSSL_ia32cap_P+4(%rip),%eax 657 cmp \$0x30,$len 658 jb .Lskip4x 659 660 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE 661 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE 662 je .Lskip4x 663 664 sub \$0x30,$len 665 mov \$0xA040608020C0E000,%rax # ((7..0)0xE0)&0xff 666 movdqu 0x30($Htbl),$Hkey3 667 movdqu 0x40($Htbl),$Hkey4 668 669 ####### 670 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P 671 # 672 movdqu 0x30($inp),$Xln 673 movdqu 0x20($inp),$Xl 674 pshufb $T3,$Xln 675 pshufb $T3,$Xl 676 movdqa $Xln,$Xhn 677 pshufd \$0b01001110,$Xln,$Xmn 678 pxor $Xln,$Xmn 679 pclmulqdq \$0x00,$Hkey,$Xln 680 pclmulqdq \$0x11,$Hkey,$Xhn 681 pclmulqdq \$0x00,$HK,$Xmn 682 683 movdqa $Xl,$Xh 684 pshufd \$0b01001110,$Xl,$Xm 685 pxor $Xl,$Xm 686 pclmulqdq \$0x00,$Hkey2,$Xl 687 pclmulqdq \$0x11,$Hkey2,$Xh 688 pclmulqdq \$0x10,$HK,$Xm 689 xorps $Xl,$Xln 690 xorps $Xh,$Xhn 691 movups 0x50($Htbl),$HK 692 xorps $Xm,$Xmn 693 694 movdqu 0x10($inp),$Xl 695 movdqu 0($inp),$T1 696 pshufb $T3,$Xl 697 pshufb $T3,$T1 698 movdqa $Xl,$Xh 699 pshufd \$0b01001110,$Xl,$Xm 700 pxor $T1,$Xi 701 pxor $Xl,$Xm 702 pclmulqdq \$0x00,$Hkey3,$Xl 703 movdqa $Xi,$Xhi 704 pshufd \$0b01001110,$Xi,$T1 705 pxor $Xi,$T1 706 pclmulqdq \$0x11,$Hkey3,$Xh 707 pclmulqdq \$0x00,$HK,$Xm 708 xorps $Xl,$Xln 709 xorps $Xh,$Xhn 710 711 lea 0x40($inp),$inp 712 sub \$0x40,$len 713 jc .Ltail4x 714 715 jmp .Lmod4_loop 716 .align 32 717 .Lmod4_loop: 718 pclmulqdq \$0x00,$Hkey4,$Xi 719 xorps $Xm,$Xmn 720 movdqu 0x30($inp),$Xl 721 pshufb $T3,$Xl 722 pclmulqdq \$0x11,$Hkey4,$Xhi 723 xorps $Xln,$Xi 724 movdqu 0x20($inp),$Xln 725 movdqa $Xl,$Xh 726 pclmulqdq \$0x10,$HK,$T1 727 pshufd \$0b01001110,$Xl,$Xm 728 xorps $Xhn,$Xhi 729 pxor $Xl,$Xm 730 pshufb $T3,$Xln 731 movups 0x20($Htbl),$HK 732 xorps $Xmn,$T1 733 pclmulqdq \$0x00,$Hkey,$Xl 734 pshufd \$0b01001110,$Xln,$Xmn 735 736 pxor $Xi,$T1 # aggregated Karatsuba post-processing 737 movdqa $Xln,$Xhn 738 pxor $Xhi,$T1 # 739 pxor $Xln,$Xmn 740 movdqa $T1,$T2 # 741 pclmulqdq \$0x11,$Hkey,$Xh 742 pslldq \$8,$T1 743 psrldq \$8,$T2 # 744 pxor $T1,$Xi 745 movdqa .L7_mask(%rip),$T1 746 pxor $T2,$Xhi # 747 movq %rax,$T2 748 749 pand $Xi,$T1 # 1st phase 750 pshufb $T1,$T2 # 751 pxor $Xi,$T2 # 752 pclmulqdq \$0x00,$HK,$Xm 753 psllq \$57,$T2 # 754 movdqa $T2,$T1 # 755 pslldq \$8,$T2 756 pclmulqdq \$0x00,$Hkey2,$Xln 757 psrldq \$8,$T1 # 758 pxor $T2,$Xi 759 pxor $T1,$Xhi # 760 movdqu 0($inp),$T1 761 762 movdqa $Xi,$T2 # 2nd phase 763 psrlq \$1,$Xi 764 pclmulqdq \$0x11,$Hkey2,$Xhn 765 xorps $Xl,$Xln 766 movdqu 0x10($inp),$Xl 767 pshufb $T3,$Xl 768 pclmulqdq \$0x10,$HK,$Xmn 769 xorps $Xh,$Xhn 770 movups 0x50($Htbl),$HK 771 pshufb $T3,$T1 772 pxor $T2,$Xhi # 773 pxor $Xi,$T2 774 psrlq \$5,$Xi 775 776 movdqa $Xl,$Xh 777 pxor $Xm,$Xmn 778 pshufd \$0b01001110,$Xl,$Xm 779 pxor $T2,$Xi # 780 pxor $T1,$Xhi 781 pxor $Xl,$Xm 782 pclmulqdq \$0x00,$Hkey3,$Xl 783 psrlq \$1,$Xi # 784 pxor $Xhi,$Xi # 785 movdqa $Xi,$Xhi 786 pclmulqdq \$0x11,$Hkey3,$Xh 787 xorps $Xl,$Xln 788 pshufd \$0b01001110,$Xi,$T1 789 pxor $Xi,$T1 790 791 pclmulqdq \$0x00,$HK,$Xm 792 xorps $Xh,$Xhn 793 794 lea 0x40($inp),$inp 795 sub \$0x40,$len 796 jnc .Lmod4_loop 797 798 .Ltail4x: 799 pclmulqdq \$0x00,$Hkey4,$Xi 800 pclmulqdq \$0x11,$Hkey4,$Xhi 801 pclmulqdq \$0x10,$HK,$T1 802 xorps $Xm,$Xmn 803 xorps $Xln,$Xi 804 xorps $Xhn,$Xhi 805 pxor $Xi,$Xhi # aggregated Karatsuba post-processing 806 pxor $Xmn,$T1 807 808 pxor $Xhi,$T1 # 809 pxor $Xi,$Xhi 810 811 movdqa $T1,$T2 # 812 psrldq \$8,$T1 813 pslldq \$8,$T2 # 814 pxor $T1,$Xhi 815 pxor $T2,$Xi # 816 ___ 817 &reduction_alg9($Xhi,$Xi); 818 $code.=<<___; 819 add \$0x40,$len 820 jz .Ldone 821 movdqu 0x20($Htbl),$HK 822 sub \$0x10,$len 823 jz .Lodd_tail 824 .Lskip4x: 825 ___ 826 } 827 $code.=<<___; 828 ####### 829 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 830 # [(H*Ii+1) + (H*Xi+1)] mod P = 831 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 832 # 833 movdqu ($inp),$T1 # Ii 834 movdqu 16($inp),$Xln # Ii+1 835 pshufb $T3,$T1 836 pshufb $T3,$Xln 837 pxor $T1,$Xi # Ii+Xi 838 839 movdqa $Xln,$Xhn 840 pshufd \$0b01001110,$Xln,$Xmn 841 pxor $Xln,$Xmn 842 pclmulqdq \$0x00,$Hkey,$Xln 843 pclmulqdq \$0x11,$Hkey,$Xhn 844 pclmulqdq \$0x00,$HK,$Xmn 845 846 lea 32($inp),$inp # i+=2 847 nop 848 sub \$0x20,$len 849 jbe .Leven_tail 850 nop 851 jmp .Lmod_loop 852 853 .align 32 854 .Lmod_loop: 855 movdqa $Xi,$Xhi 856 movdqa $Xmn,$T1 857 pshufd \$0b01001110,$Xi,$Xmn # 858 pxor $Xi,$Xmn # 859 860 pclmulqdq \$0x00,$Hkey2,$Xi 861 pclmulqdq \$0x11,$Hkey2,$Xhi 862 pclmulqdq \$0x10,$HK,$Xmn 863 864 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 865 pxor $Xhn,$Xhi 866 movdqu ($inp),$T2 # Ii 867 pxor $Xi,$T1 # aggregated Karatsuba post-processing 868 pshufb $T3,$T2 869 movdqu 16($inp),$Xln # Ii+1 870 871 pxor $Xhi,$T1 872 pxor $T2,$Xhi # "Ii+Xi", consume early 873 pxor $T1,$Xmn 874 pshufb $T3,$Xln 875 movdqa $Xmn,$T1 # 876 psrldq \$8,$T1 877 pslldq \$8,$Xmn # 878 pxor $T1,$Xhi 879 pxor $Xmn,$Xi # 880 881 movdqa $Xln,$Xhn # 882 883 movdqa $Xi,$T2 # 1st phase 884 movdqa $Xi,$T1 885 psllq \$5,$Xi 886 pxor $Xi,$T1 # 887 pclmulqdq \$0x00,$Hkey,$Xln ####### 888 psllq \$1,$Xi 889 pxor $T1,$Xi # 890 psllq \$57,$Xi # 891 movdqa $Xi,$T1 # 892 pslldq \$8,$Xi 893 psrldq \$8,$T1 # 894 pxor $T2,$Xi 895 pshufd \$0b01001110,$Xhn,$Xmn 896 pxor $T1,$Xhi # 897 pxor $Xhn,$Xmn # 898 899 movdqa $Xi,$T2 # 2nd phase 900 psrlq \$1,$Xi 901 pclmulqdq \$0x11,$Hkey,$Xhn ####### 902 pxor $T2,$Xhi # 903 pxor $Xi,$T2 904 psrlq \$5,$Xi 905 pxor $T2,$Xi # 906 lea 32($inp),$inp 907 psrlq \$1,$Xi # 908 pclmulqdq \$0x00,$HK,$Xmn ####### 909 pxor $Xhi,$Xi # 910 911 sub \$0x20,$len 912 ja .Lmod_loop 913 914 .Leven_tail: 915 movdqa $Xi,$Xhi 916 movdqa $Xmn,$T1 917 pshufd \$0b01001110,$Xi,$Xmn # 918 pxor $Xi,$Xmn # 919 920 pclmulqdq \$0x00,$Hkey2,$Xi 921 pclmulqdq \$0x11,$Hkey2,$Xhi 922 pclmulqdq \$0x10,$HK,$Xmn 923 924 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 925 pxor $Xhn,$Xhi 926 pxor $Xi,$T1 927 pxor $Xhi,$T1 928 pxor $T1,$Xmn 929 movdqa $Xmn,$T1 # 930 psrldq \$8,$T1 931 pslldq \$8,$Xmn # 932 pxor $T1,$Xhi 933 pxor $Xmn,$Xi # 934 ___ 935 &reduction_alg9 ($Xhi,$Xi); 936 $code.=<<___; 937 test $len,$len 938 jnz .Ldone 939 940 .Lodd_tail: 941 movdqu ($inp),$T1 # Ii 942 pshufb $T3,$T1 943 pxor $T1,$Xi # Ii+Xi 944 ___ 945 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi) 946 &reduction_alg9 ($Xhi,$Xi); 947 $code.=<<___; 948 .Ldone: 949 pshufb $T3,$Xi 950 movdqu $Xi,($Xip) 951 ___ 952 $code.=<<___ if ($win64); 953 movaps (%rsp),%xmm6 954 movaps 0x10(%rsp),%xmm7 955 movaps 0x20(%rsp),%xmm8 956 movaps 0x30(%rsp),%xmm9 957 movaps 0x40(%rsp),%xmm10 958 movaps 0x50(%rsp),%xmm11 959 movaps 0x60(%rsp),%xmm12 960 movaps 0x70(%rsp),%xmm13 961 movaps 0x80(%rsp),%xmm14 962 movaps 0x90(%rsp),%xmm15 963 lea 0xa8(%rsp),%rsp 964 .LSEH_end_gcm_ghash_clmul: 965 ___ 966 $code.=<<___; 967 ret 968 .size gcm_ghash_clmul,.-gcm_ghash_clmul 969 ___ 970 } 971 973 $code.=<<___; 974 .globl gcm_init_avx 975 .type gcm_init_avx,\@abi-omnipotent 976 .align 32 977 gcm_init_avx: 978 ___ 979 if ($avx) { 980 my ($Htbl,$Xip)=@_4args; 981 my $HK="%xmm6"; 982 983 $code.=<<___ if ($win64); 984 .LSEH_begin_gcm_init_avx: 985 # I can't trust assembler to use specific encoding:-( 986 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 987 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 988 ___ 989 $code.=<<___; 990 vzeroupper 991 992 vmovdqu ($Xip),$Hkey 993 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap 994 995 # <<1 twist 996 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 997 vpsrlq \$63,$Hkey,$T1 998 vpsllq \$1,$Hkey,$Hkey 999 vpxor $T3,$T3,$T3 # 1000 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit 1001 vpslldq \$8,$T1,$T1 1002 vpor $T1,$Hkey,$Hkey # H<<=1 1003 1004 # magic reduction 1005 vpand .L0x1c2_polynomial(%rip),$T3,$T3 1006 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial 1007 1008 vpunpckhqdq $Hkey,$Hkey,$HK 1009 vmovdqa $Hkey,$Xi 1010 vpxor $Hkey,$HK,$HK 1011 mov \$4,%r10 # up to H^8 1012 jmp .Linit_start_avx 1013 ___ 1014 1015 sub clmul64x64_avx { 1016 my ($Xhi,$Xi,$Hkey,$HK)=@_; 1017 1018 if (!defined($HK)) { $HK = $T2; 1019 $code.=<<___; 1020 vpunpckhqdq $Xi,$Xi,$T1 1021 vpunpckhqdq $Hkey,$Hkey,$T2 1022 vpxor $Xi,$T1,$T1 # 1023 vpxor $Hkey,$T2,$T2 1024 ___ 1025 } else { 1026 $code.=<<___; 1027 vpunpckhqdq $Xi,$Xi,$T1 1028 vpxor $Xi,$T1,$T1 # 1029 ___ 1030 } 1031 $code.=<<___; 1032 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi ####### 1033 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi ####### 1034 vpclmulqdq \$0x00,$HK,$T1,$T1 ####### 1035 vpxor $Xi,$Xhi,$T2 # 1036 vpxor $T2,$T1,$T1 # 1037 1038 vpslldq \$8,$T1,$T2 # 1039 vpsrldq \$8,$T1,$T1 1040 vpxor $T2,$Xi,$Xi # 1041 vpxor $T1,$Xhi,$Xhi 1042 ___ 1043 } 1044 1045 sub reduction_avx { 1046 my ($Xhi,$Xi) = @_; 1047 1048 $code.=<<___; 1049 vpsllq \$57,$Xi,$T1 # 1st phase 1050 vpsllq \$62,$Xi,$T2 1051 vpxor $T1,$T2,$T2 # 1052 vpsllq \$63,$Xi,$T1 1053 vpxor $T1,$T2,$T2 # 1054 vpslldq \$8,$T2,$T1 # 1055 vpsrldq \$8,$T2,$T2 1056 vpxor $T1,$Xi,$Xi # 1057 vpxor $T2,$Xhi,$Xhi 1058 1059 vpsrlq \$1,$Xi,$T2 # 2nd phase 1060 vpxor $Xi,$Xhi,$Xhi 1061 vpxor $T2,$Xi,$Xi # 1062 vpsrlq \$5,$T2,$T2 1063 vpxor $T2,$Xi,$Xi # 1064 vpsrlq \$1,$Xi,$Xi # 1065 vpxor $Xhi,$Xi,$Xi # 1066 ___ 1067 } 1068 1069 $code.=<<___; 1070 .align 32 1071 .Linit_loop_avx: 1072 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi... 1073 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt" 1074 ___ 1075 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7 1076 &reduction_avx ($Xhi,$Xi); 1077 $code.=<<___; 1078 .Linit_start_avx: 1079 vmovdqa $Xi,$T3 1080 ___ 1081 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8 1082 &reduction_avx ($Xhi,$Xi); 1083 $code.=<<___; 1084 vpshufd \$0b01001110,$T3,$T1 1085 vpshufd \$0b01001110,$Xi,$T2 1086 vpxor $T3,$T1,$T1 # Karatsuba pre-processing 1087 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7 1088 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing 1089 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8 1090 lea 0x30($Htbl),$Htbl 1091 sub \$1,%r10 1092 jnz .Linit_loop_avx 1093 1094 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped 1095 vmovdqu $T3,-0x10($Htbl) 1096 1097 vzeroupper 1098 ___ 1099 $code.=<<___ if ($win64); 1100 movaps (%rsp),%xmm6 1101 lea 0x18(%rsp),%rsp 1102 .LSEH_end_gcm_init_avx: 1103 ___ 1104 $code.=<<___; 1105 ret 1106 .size gcm_init_avx,.-gcm_init_avx 1107 ___ 1108 } else { 1109 $code.=<<___; 1110 jmp .L_init_clmul 1111 .size gcm_init_avx,.-gcm_init_avx 1112 ___ 1113 } 1114 1115 $code.=<<___; 1116 .globl gcm_gmult_avx 1117 .type gcm_gmult_avx,\@abi-omnipotent 1118 .align 32 1119 gcm_gmult_avx: 1120 jmp .L_gmult_clmul 1121 .size gcm_gmult_avx,.-gcm_gmult_avx 1122 ___ 1123 1125 $code.=<<___; 1126 .globl gcm_ghash_avx 1127 .type gcm_ghash_avx,\@abi-omnipotent 1128 .align 32 1129 gcm_ghash_avx: 1130 ___ 1131 if ($avx) { 1132 my ($Xip,$Htbl,$inp,$len)=@_4args; 1133 my ($Xlo,$Xhi,$Xmi, 1134 $Zlo,$Zhi,$Zmi, 1135 $Hkey,$HK,$T1,$T2, 1136 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15)); 1137 1138 $code.=<<___ if ($win64); 1139 lea -0x88(%rsp),%rax 1140 .LSEH_begin_gcm_ghash_avx: 1141 # I can't trust assembler to use specific encoding:-( 1142 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 1143 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 1144 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 1145 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 1146 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 1147 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 1148 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 1149 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 1150 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 1151 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 1152 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 1153 ___ 1154 $code.=<<___; 1155 vzeroupper 1156 1157 vmovdqu ($Xip),$Xi # load $Xi 1158 lea .L0x1c2_polynomial(%rip),%r10 1159 lea 0x40($Htbl),$Htbl # size optimization 1160 vmovdqu .Lbswap_mask(%rip),$bswap 1161 vpshufb $bswap,$Xi,$Xi 1162 cmp \$0x80,$len 1163 jb .Lshort_avx 1164 sub \$0x80,$len 1165 1166 vmovdqu 0x70($inp),$Ii # I[7] 1167 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1168 vpshufb $bswap,$Ii,$Ii 1169 vmovdqu 0x20-0x40($Htbl),$HK 1170 1171 vpunpckhqdq $Ii,$Ii,$T2 1172 vmovdqu 0x60($inp),$Ij # I[6] 1173 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1174 vpxor $Ii,$T2,$T2 1175 vpshufb $bswap,$Ij,$Ij 1176 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1177 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1178 vpunpckhqdq $Ij,$Ij,$T1 1179 vmovdqu 0x50($inp),$Ii # I[5] 1180 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1181 vpxor $Ij,$T1,$T1 1182 1183 vpshufb $bswap,$Ii,$Ii 1184 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1185 vpunpckhqdq $Ii,$Ii,$T2 1186 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1187 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1188 vpxor $Ii,$T2,$T2 1189 vmovdqu 0x40($inp),$Ij # I[4] 1190 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1191 vmovdqu 0x50-0x40($Htbl),$HK 1192 1193 vpshufb $bswap,$Ij,$Ij 1194 vpxor $Xlo,$Zlo,$Zlo 1195 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1196 vpxor $Xhi,$Zhi,$Zhi 1197 vpunpckhqdq $Ij,$Ij,$T1 1198 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1199 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1200 vpxor $Xmi,$Zmi,$Zmi 1201 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1202 vpxor $Ij,$T1,$T1 1203 1204 vmovdqu 0x30($inp),$Ii # I[3] 1205 vpxor $Zlo,$Xlo,$Xlo 1206 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1207 vpxor $Zhi,$Xhi,$Xhi 1208 vpshufb $bswap,$Ii,$Ii 1209 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1210 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1211 vpxor $Zmi,$Xmi,$Xmi 1212 vpunpckhqdq $Ii,$Ii,$T2 1213 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1214 vmovdqu 0x80-0x40($Htbl),$HK 1215 vpxor $Ii,$T2,$T2 1216 1217 vmovdqu 0x20($inp),$Ij # I[2] 1218 vpxor $Xlo,$Zlo,$Zlo 1219 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1220 vpxor $Xhi,$Zhi,$Zhi 1221 vpshufb $bswap,$Ij,$Ij 1222 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1223 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1224 vpxor $Xmi,$Zmi,$Zmi 1225 vpunpckhqdq $Ij,$Ij,$T1 1226 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1227 vpxor $Ij,$T1,$T1 1228 1229 vmovdqu 0x10($inp),$Ii # I[1] 1230 vpxor $Zlo,$Xlo,$Xlo 1231 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1232 vpxor $Zhi,$Xhi,$Xhi 1233 vpshufb $bswap,$Ii,$Ii 1234 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1235 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1236 vpxor $Zmi,$Xmi,$Xmi 1237 vpunpckhqdq $Ii,$Ii,$T2 1238 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1239 vmovdqu 0xb0-0x40($Htbl),$HK 1240 vpxor $Ii,$T2,$T2 1241 1242 vmovdqu ($inp),$Ij # I[0] 1243 vpxor $Xlo,$Zlo,$Zlo 1244 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1245 vpxor $Xhi,$Zhi,$Zhi 1246 vpshufb $bswap,$Ij,$Ij 1247 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1248 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1249 vpxor $Xmi,$Zmi,$Zmi 1250 vpclmulqdq \$0x10,$HK,$T2,$Xmi 1251 1252 lea 0x80($inp),$inp 1253 cmp \$0x80,$len 1254 jb .Ltail_avx 1255 1256 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1257 sub \$0x80,$len 1258 jmp .Loop8x_avx 1259 1260 .align 32 1261 .Loop8x_avx: 1262 vpunpckhqdq $Ij,$Ij,$T1 1263 vmovdqu 0x70($inp),$Ii # I[7] 1264 vpxor $Xlo,$Zlo,$Zlo 1265 vpxor $Ij,$T1,$T1 1266 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi 1267 vpshufb $bswap,$Ii,$Ii 1268 vpxor $Xhi,$Zhi,$Zhi 1269 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo 1270 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1271 vpunpckhqdq $Ii,$Ii,$T2 1272 vpxor $Xmi,$Zmi,$Zmi 1273 vpclmulqdq \$0x00,$HK,$T1,$Tred 1274 vmovdqu 0x20-0x40($Htbl),$HK 1275 vpxor $Ii,$T2,$T2 1276 1277 vmovdqu 0x60($inp),$Ij # I[6] 1278 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1279 vpxor $Zlo,$Xi,$Xi # collect result 1280 vpshufb $bswap,$Ij,$Ij 1281 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1282 vxorps $Zhi,$Xo,$Xo 1283 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1284 vpunpckhqdq $Ij,$Ij,$T1 1285 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1286 vpxor $Zmi,$Tred,$Tred 1287 vxorps $Ij,$T1,$T1 1288 1289 vmovdqu 0x50($inp),$Ii # I[5] 1290 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing 1291 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1292 vpxor $Xo,$Tred,$Tred 1293 vpslldq \$8,$Tred,$T2 1294 vpxor $Xlo,$Zlo,$Zlo 1295 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1296 vpsrldq \$8,$Tred,$Tred 1297 vpxor $T2, $Xi, $Xi 1298 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1299 vpshufb $bswap,$Ii,$Ii 1300 vxorps $Tred,$Xo, $Xo 1301 vpxor $Xhi,$Zhi,$Zhi 1302 vpunpckhqdq $Ii,$Ii,$T2 1303 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1304 vmovdqu 0x50-0x40($Htbl),$HK 1305 vpxor $Ii,$T2,$T2 1306 vpxor $Xmi,$Zmi,$Zmi 1307 1308 vmovdqu 0x40($inp),$Ij # I[4] 1309 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase 1310 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1311 vpshufb $bswap,$Ij,$Ij 1312 vpxor $Zlo,$Xlo,$Xlo 1313 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1314 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1315 vpunpckhqdq $Ij,$Ij,$T1 1316 vpxor $Zhi,$Xhi,$Xhi 1317 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1318 vxorps $Ij,$T1,$T1 1319 vpxor $Zmi,$Xmi,$Xmi 1320 1321 vmovdqu 0x30($inp),$Ii # I[3] 1322 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1323 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1324 vpshufb $bswap,$Ii,$Ii 1325 vpxor $Xlo,$Zlo,$Zlo 1326 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1327 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1328 vpunpckhqdq $Ii,$Ii,$T2 1329 vpxor $Xhi,$Zhi,$Zhi 1330 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1331 vmovdqu 0x80-0x40($Htbl),$HK 1332 vpxor $Ii,$T2,$T2 1333 vpxor $Xmi,$Zmi,$Zmi 1334 1335 vmovdqu 0x20($inp),$Ij # I[2] 1336 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1337 vpshufb $bswap,$Ij,$Ij 1338 vpxor $Zlo,$Xlo,$Xlo 1339 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1340 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1341 vpunpckhqdq $Ij,$Ij,$T1 1342 vpxor $Zhi,$Xhi,$Xhi 1343 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1344 vpxor $Ij,$T1,$T1 1345 vpxor $Zmi,$Xmi,$Xmi 1346 vxorps $Tred,$Xi,$Xi 1347 1348 vmovdqu 0x10($inp),$Ii # I[1] 1349 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase 1350 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1351 vpshufb $bswap,$Ii,$Ii 1352 vpxor $Xlo,$Zlo,$Zlo 1353 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1354 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1355 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1356 vxorps $Xo,$Tred,$Tred 1357 vpunpckhqdq $Ii,$Ii,$T2 1358 vpxor $Xhi,$Zhi,$Zhi 1359 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1360 vmovdqu 0xb0-0x40($Htbl),$HK 1361 vpxor $Ii,$T2,$T2 1362 vpxor $Xmi,$Zmi,$Zmi 1363 1364 vmovdqu ($inp),$Ij # I[0] 1365 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1366 vpshufb $bswap,$Ij,$Ij 1367 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1368 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1369 vpxor $Tred,$Ij,$Ij 1370 vpclmulqdq \$0x10,$HK, $T2,$Xmi 1371 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1372 1373 lea 0x80($inp),$inp 1374 sub \$0x80,$len 1375 jnc .Loop8x_avx 1376 1377 add \$0x80,$len 1378 jmp .Ltail_no_xor_avx 1379 1380 .align 32 1381 .Lshort_avx: 1382 vmovdqu -0x10($inp,$len),$Ii # very last word 1383 lea ($inp,$len),$inp 1384 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1385 vmovdqu 0x20-0x40($Htbl),$HK 1386 vpshufb $bswap,$Ii,$Ij 1387 1388 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo, 1389 vmovdqa $Xhi,$Zhi # $Zhi and 1390 vmovdqa $Xmi,$Zmi # $Zmi 1391 sub \$0x10,$len 1392 jz .Ltail_avx 1393 1394 vpunpckhqdq $Ij,$Ij,$T1 1395 vpxor $Xlo,$Zlo,$Zlo 1396 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1397 vpxor $Ij,$T1,$T1 1398 vmovdqu -0x20($inp),$Ii 1399 vpxor $Xhi,$Zhi,$Zhi 1400 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1401 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1402 vpshufb $bswap,$Ii,$Ij 1403 vpxor $Xmi,$Zmi,$Zmi 1404 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1405 vpsrldq \$8,$HK,$HK 1406 sub \$0x10,$len 1407 jz .Ltail_avx 1408 1409 vpunpckhqdq $Ij,$Ij,$T1 1410 vpxor $Xlo,$Zlo,$Zlo 1411 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1412 vpxor $Ij,$T1,$T1 1413 vmovdqu -0x30($inp),$Ii 1414 vpxor $Xhi,$Zhi,$Zhi 1415 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1416 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1417 vpshufb $bswap,$Ii,$Ij 1418 vpxor $Xmi,$Zmi,$Zmi 1419 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1420 vmovdqu 0x50-0x40($Htbl),$HK 1421 sub \$0x10,$len 1422 jz .Ltail_avx 1423 1424 vpunpckhqdq $Ij,$Ij,$T1 1425 vpxor $Xlo,$Zlo,$Zlo 1426 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1427 vpxor $Ij,$T1,$T1 1428 vmovdqu -0x40($inp),$Ii 1429 vpxor $Xhi,$Zhi,$Zhi 1430 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1431 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1432 vpshufb $bswap,$Ii,$Ij 1433 vpxor $Xmi,$Zmi,$Zmi 1434 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1435 vpsrldq \$8,$HK,$HK 1436 sub \$0x10,$len 1437 jz .Ltail_avx 1438 1439 vpunpckhqdq $Ij,$Ij,$T1 1440 vpxor $Xlo,$Zlo,$Zlo 1441 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1442 vpxor $Ij,$T1,$T1 1443 vmovdqu -0x50($inp),$Ii 1444 vpxor $Xhi,$Zhi,$Zhi 1445 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1446 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1447 vpshufb $bswap,$Ii,$Ij 1448 vpxor $Xmi,$Zmi,$Zmi 1449 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1450 vmovdqu 0x80-0x40($Htbl),$HK 1451 sub \$0x10,$len 1452 jz .Ltail_avx 1453 1454 vpunpckhqdq $Ij,$Ij,$T1 1455 vpxor $Xlo,$Zlo,$Zlo 1456 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1457 vpxor $Ij,$T1,$T1 1458 vmovdqu -0x60($inp),$Ii 1459 vpxor $Xhi,$Zhi,$Zhi 1460 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1461 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1462 vpshufb $bswap,$Ii,$Ij 1463 vpxor $Xmi,$Zmi,$Zmi 1464 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1465 vpsrldq \$8,$HK,$HK 1466 sub \$0x10,$len 1467 jz .Ltail_avx 1468 1469 vpunpckhqdq $Ij,$Ij,$T1 1470 vpxor $Xlo,$Zlo,$Zlo 1471 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1472 vpxor $Ij,$T1,$T1 1473 vmovdqu -0x70($inp),$Ii 1474 vpxor $Xhi,$Zhi,$Zhi 1475 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1476 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1477 vpshufb $bswap,$Ii,$Ij 1478 vpxor $Xmi,$Zmi,$Zmi 1479 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1480 vmovq 0xb8-0x40($Htbl),$HK 1481 sub \$0x10,$len 1482 jmp .Ltail_avx 1483 1484 .align 32 1485 .Ltail_avx: 1486 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1487 .Ltail_no_xor_avx: 1488 vpunpckhqdq $Ij,$Ij,$T1 1489 vpxor $Xlo,$Zlo,$Zlo 1490 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1491 vpxor $Ij,$T1,$T1 1492 vpxor $Xhi,$Zhi,$Zhi 1493 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1494 vpxor $Xmi,$Zmi,$Zmi 1495 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1496 1497 vmovdqu (%r10),$Tred 1498 1499 vpxor $Xlo,$Zlo,$Xi 1500 vpxor $Xhi,$Zhi,$Xo 1501 vpxor $Xmi,$Zmi,$Zmi 1502 1503 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing 1504 vpxor $Xo, $Zmi,$Zmi 1505 vpslldq \$8, $Zmi,$T2 1506 vpsrldq \$8, $Zmi,$Zmi 1507 vpxor $T2, $Xi, $Xi 1508 vpxor $Zmi,$Xo, $Xo 1509 1510 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase 1511 vpalignr \$8,$Xi,$Xi,$Xi 1512 vpxor $T2,$Xi,$Xi 1513 1514 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase 1515 vpalignr \$8,$Xi,$Xi,$Xi 1516 vpxor $Xo,$Xi,$Xi 1517 vpxor $T2,$Xi,$Xi 1518 1519 cmp \$0,$len 1520 jne .Lshort_avx 1521 1522 vpshufb $bswap,$Xi,$Xi 1523 vmovdqu $Xi,($Xip) 1524 vzeroupper 1525 ___ 1526 $code.=<<___ if ($win64); 1527 movaps (%rsp),%xmm6 1528 movaps 0x10(%rsp),%xmm7 1529 movaps 0x20(%rsp),%xmm8 1530 movaps 0x30(%rsp),%xmm9 1531 movaps 0x40(%rsp),%xmm10 1532 movaps 0x50(%rsp),%xmm11 1533 movaps 0x60(%rsp),%xmm12 1534 movaps 0x70(%rsp),%xmm13 1535 movaps 0x80(%rsp),%xmm14 1536 movaps 0x90(%rsp),%xmm15 1537 lea 0xa8(%rsp),%rsp 1538 .LSEH_end_gcm_ghash_avx: 1539 ___ 1540 $code.=<<___; 1541 ret 1542 .size gcm_ghash_avx,.-gcm_ghash_avx 1543 ___ 1544 } else { 1545 $code.=<<___; 1546 jmp .L_ghash_clmul 1547 .size gcm_ghash_avx,.-gcm_ghash_avx 1548 ___ 1549 } 1550 1552 $code.=<<___; 1553 .align 64 1554 .Lbswap_mask: 1555 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 1556 .L0x1c2_polynomial: 1557 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 1558 .L7_mask: 1559 .long 7,0,7,0 1560 .L7_mask_poly: 1561 .long 7,0,`0xE1<<1`,0 1562 .align 64 1563 .type .Lrem_4bit,\@object 1564 .Lrem_4bit: 1565 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16` 1566 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16` 1567 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16` 1568 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16` 1569 .type .Lrem_8bit,\@object 1570 .Lrem_8bit: 1571 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E 1572 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E 1573 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E 1574 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E 1575 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E 1576 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E 1577 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E 1578 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E 1579 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE 1580 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE 1581 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE 1582 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE 1583 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E 1584 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E 1585 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE 1586 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE 1587 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E 1588 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E 1589 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E 1590 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E 1591 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E 1592 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E 1593 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E 1594 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E 1595 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE 1596 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE 1597 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE 1598 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE 1599 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E 1600 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E 1601 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE 1602 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE 1603 1604 .asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 1605 .align 64 1606 ___ 1607 1609 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 1610 # CONTEXT *context,DISPATCHER_CONTEXT *disp) 1611 if ($win64) { 1612 $rec="%rcx"; 1613 $frame="%rdx"; 1614 $context="%r8"; 1615 $disp="%r9"; 1616 1617 $code.=<<___; 1618 .extern __imp_RtlVirtualUnwind 1619 .type se_handler,\@abi-omnipotent 1620 .align 16 1621 se_handler: 1622 push %rsi 1623 push %rdi 1624 push %rbx 1625 push %rbp 1626 push %r12 1627 push %r13 1628 push %r14 1629 push %r15 1630 pushfq 1631 sub \$64,%rsp 1632 1633 mov 120($context),%rax # pull context->Rax 1634 mov 248($context),%rbx # pull context->Rip 1635 1636 mov 8($disp),%rsi # disp->ImageBase 1637 mov 56($disp),%r11 # disp->HandlerData 1638 1639 mov 0(%r11),%r10d # HandlerData[0] 1640 lea (%rsi,%r10),%r10 # prologue label 1641 cmp %r10,%rbx # context->Rip<prologue label 1642 jb .Lin_prologue 1643 1644 mov 152($context),%rax # pull context->Rsp 1645 1646 mov 4(%r11),%r10d # HandlerData[1] 1647 lea (%rsi,%r10),%r10 # epilogue label 1648 cmp %r10,%rbx # context->Rip>=epilogue label 1649 jae .Lin_prologue 1650 1651 lea 24(%rax),%rax # adjust "rsp" 1652 1653 mov -8(%rax),%rbx 1654 mov -16(%rax),%rbp 1655 mov -24(%rax),%r12 1656 mov %rbx,144($context) # restore context->Rbx 1657 mov %rbp,160($context) # restore context->Rbp 1658 mov %r12,216($context) # restore context->R12 1659 1660 .Lin_prologue: 1661 mov 8(%rax),%rdi 1662 mov 16(%rax),%rsi 1663 mov %rax,152($context) # restore context->Rsp 1664 mov %rsi,168($context) # restore context->Rsi 1665 mov %rdi,176($context) # restore context->Rdi 1666 1667 mov 40($disp),%rdi # disp->ContextRecord 1668 mov $context,%rsi # context 1669 mov \$`1232/8`,%ecx # sizeof(CONTEXT) 1670 .long 0xa548f3fc # cld; rep movsq 1671 1672 mov $disp,%rsi 1673 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 1674 mov 8(%rsi),%rdx # arg2, disp->ImageBase 1675 mov 0(%rsi),%r8 # arg3, disp->ControlPc 1676 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 1677 mov 40(%rsi),%r10 # disp->ContextRecord 1678 lea 56(%rsi),%r11 # &disp->HandlerData 1679 lea 24(%rsi),%r12 # &disp->EstablisherFrame 1680 mov %r10,32(%rsp) # arg5 1681 mov %r11,40(%rsp) # arg6 1682 mov %r12,48(%rsp) # arg7 1683 mov %rcx,56(%rsp) # arg8, (NULL) 1684 call *__imp_RtlVirtualUnwind(%rip) 1685 1686 mov \$1,%eax # ExceptionContinueSearch 1687 add \$64,%rsp 1688 popfq 1689 pop %r15 1690 pop %r14 1691 pop %r13 1692 pop %r12 1693 pop %rbp 1694 pop %rbx 1695 pop %rdi 1696 pop %rsi 1697 ret 1698 .size se_handler,.-se_handler 1699 1700 .section .pdata 1701 .align 4 1702 .rva .LSEH_begin_gcm_gmult_4bit 1703 .rva .LSEH_end_gcm_gmult_4bit 1704 .rva .LSEH_info_gcm_gmult_4bit 1705 1706 .rva .LSEH_begin_gcm_ghash_4bit 1707 .rva .LSEH_end_gcm_ghash_4bit 1708 .rva .LSEH_info_gcm_ghash_4bit 1709 1710 .rva .LSEH_begin_gcm_init_clmul 1711 .rva .LSEH_end_gcm_init_clmul 1712 .rva .LSEH_info_gcm_init_clmul 1713 1714 .rva .LSEH_begin_gcm_ghash_clmul 1715 .rva .LSEH_end_gcm_ghash_clmul 1716 .rva .LSEH_info_gcm_ghash_clmul 1717 ___ 1718 $code.=<<___ if ($avx); 1719 .rva .LSEH_begin_gcm_init_avx 1720 .rva .LSEH_end_gcm_init_avx 1721 .rva .LSEH_info_gcm_init_clmul 1722 1723 .rva .LSEH_begin_gcm_ghash_avx 1724 .rva .LSEH_end_gcm_ghash_avx 1725 .rva .LSEH_info_gcm_ghash_clmul 1726 ___ 1727 $code.=<<___; 1728 .section .xdata 1729 .align 8 1730 .LSEH_info_gcm_gmult_4bit: 1731 .byte 9,0,0,0 1732 .rva se_handler 1733 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData 1734 .LSEH_info_gcm_ghash_4bit: 1735 .byte 9,0,0,0 1736 .rva se_handler 1737 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData 1738 .LSEH_info_gcm_init_clmul: 1739 .byte 0x01,0x08,0x03,0x00 1740 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1741 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18 1742 .LSEH_info_gcm_ghash_clmul: 1743 .byte 0x01,0x33,0x16,0x00 1744 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15 1745 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14 1746 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13 1747 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12 1748 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11 1749 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10 1750 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9 1751 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8 1752 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7 1753 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1754 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8 1755 ___ 1756 } 1757 1759 $code =~ s/\`([^\`]*)\`/eval($1)/gem; 1760 1761 print $code; 1762 1763 close STDOUT; 1764