1 #!/usr/bin/env perl 2 3 ###################################################################### 4 ## Constant-time SSSE3 AES core implementation. 5 ## version 0.1 6 ## 7 ## By Mike Hamburg (Stanford University), 2009 8 ## Public domain. 9 ## 10 ## For details see http://shiftleft.org/papers/vector_aes/ and 11 ## http://crypto.stanford.edu/vpaes/. 12 13 ###################################################################### 14 # September 2011. 15 # 16 # Interface to OpenSSL as "almost" drop-in replacement for 17 # aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt 18 # doesn't handle partial vectors (doesn't have to if called from 19 # EVP only). "Drop-in" implies that this module doesn't share key 20 # schedule structure with the original nor does it make assumption 21 # about its alignment... 22 # 23 # Performance summary. aes-x86_64.pl column lists large-block CBC 24 # encrypt/decrypt/with-hyper-threading-off(*) results in cycles per 25 # byte processed with 128-bit key, and vpaes-x86_64.pl column - 26 # [also large-block CBC] encrypt/decrypt. 27 # 28 # aes-x86_64.pl vpaes-x86_64.pl 29 # 30 # Core 2(**) 29.6/41.1/14.3 21.9/25.2(***) 31 # Nehalem 29.6/40.3/14.6 10.0/11.8 32 # Atom 57.3/74.2/32.1 60.9/77.2(***) 33 # Silvermont 52.7/64.0/19.5 48.8/60.8(***) 34 # Goldmont 38.9/49.0/17.8 10.6/12.6 35 # 36 # (*) "Hyper-threading" in the context refers rather to cache shared 37 # among multiple cores, than to specifically Intel HTT. As vast 38 # majority of contemporary cores share cache, slower code path 39 # is common place. In other words "with-hyper-threading-off" 40 # results are presented mostly for reference purposes. 41 # 42 # (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe. 43 # 44 # (***) Less impressive improvement on Core 2 and Atom is due to slow 45 # pshufb, yet it's respectable +36%/62% improvement on Core 2 46 # (as implied, over "hyper-threading-safe" code path). 47 # 48 # <appro (at] openssl.org> 49 50 $flavour = shift; 51 $output = shift; 52 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 53 54 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 55 56 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 57 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 58 ( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or 59 die "can't locate x86_64-xlate.pl"; 60 61 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; 62 *STDOUT=*OUT; 63 64 $PREFIX="vpaes"; 65 66 $code.=<<___; 67 .text 68 69 ## 70 ## _aes_encrypt_core 71 ## 72 ## AES-encrypt %xmm0. 73 ## 74 ## Inputs: 75 ## %xmm0 = input 76 ## %xmm9-%xmm15 as in _vpaes_preheat 77 ## (%rdx) = scheduled keys 78 ## 79 ## Output in %xmm0 80 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 81 ## Preserves %xmm6 - %xmm8 so you get some local vectors 82 ## 83 ## 84 .type _vpaes_encrypt_core,\@abi-omnipotent 85 .align 16 86 _vpaes_encrypt_core: 87 mov %rdx, %r9 88 mov \$16, %r11 89 mov 240(%rdx),%eax 90 movdqa %xmm9, %xmm1 91 movdqa .Lk_ipt(%rip), %xmm2 # iptlo 92 pandn %xmm0, %xmm1 93 movdqu (%r9), %xmm5 # round0 key 94 psrld \$4, %xmm1 95 pand %xmm9, %xmm0 96 pshufb %xmm0, %xmm2 97 movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi 98 pshufb %xmm1, %xmm0 99 pxor %xmm5, %xmm2 100 add \$16, %r9 101 pxor %xmm2, %xmm0 102 lea .Lk_mc_backward(%rip),%r10 103 jmp .Lenc_entry 104 105 .align 16 106 .Lenc_loop: 107 # middle of middle round 108 movdqa %xmm13, %xmm4 # 4 : sb1u 109 movdqa %xmm12, %xmm0 # 0 : sb1t 110 pshufb %xmm2, %xmm4 # 4 = sb1u 111 pshufb %xmm3, %xmm0 # 0 = sb1t 112 pxor %xmm5, %xmm4 # 4 = sb1u + k 113 movdqa %xmm15, %xmm5 # 4 : sb2u 114 pxor %xmm4, %xmm0 # 0 = A 115 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 116 pshufb %xmm2, %xmm5 # 4 = sb2u 117 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 118 movdqa %xmm14, %xmm2 # 2 : sb2t 119 pshufb %xmm3, %xmm2 # 2 = sb2t 120 movdqa %xmm0, %xmm3 # 3 = A 121 pxor %xmm5, %xmm2 # 2 = 2A 122 pshufb %xmm1, %xmm0 # 0 = B 123 add \$16, %r9 # next key 124 pxor %xmm2, %xmm0 # 0 = 2A+B 125 pshufb %xmm4, %xmm3 # 3 = D 126 add \$16, %r11 # next mc 127 pxor %xmm0, %xmm3 # 3 = 2A+B+D 128 pshufb %xmm1, %xmm0 # 0 = 2B+C 129 and \$0x30, %r11 # ... mod 4 130 sub \$1,%rax # nr-- 131 pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D 132 133 .Lenc_entry: 134 # top of round 135 movdqa %xmm9, %xmm1 # 1 : i 136 movdqa %xmm11, %xmm5 # 2 : a/k 137 pandn %xmm0, %xmm1 # 1 = i<<4 138 psrld \$4, %xmm1 # 1 = i 139 pand %xmm9, %xmm0 # 0 = k 140 pshufb %xmm0, %xmm5 # 2 = a/k 141 movdqa %xmm10, %xmm3 # 3 : 1/i 142 pxor %xmm1, %xmm0 # 0 = j 143 pshufb %xmm1, %xmm3 # 3 = 1/i 144 movdqa %xmm10, %xmm4 # 4 : 1/j 145 pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k 146 pshufb %xmm0, %xmm4 # 4 = 1/j 147 movdqa %xmm10, %xmm2 # 2 : 1/iak 148 pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k 149 pshufb %xmm3, %xmm2 # 2 = 1/iak 150 movdqa %xmm10, %xmm3 # 3 : 1/jak 151 pxor %xmm0, %xmm2 # 2 = io 152 pshufb %xmm4, %xmm3 # 3 = 1/jak 153 movdqu (%r9), %xmm5 154 pxor %xmm1, %xmm3 # 3 = jo 155 jnz .Lenc_loop 156 157 # middle of last round 158 movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 159 movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 160 pshufb %xmm2, %xmm4 # 4 = sbou 161 pxor %xmm5, %xmm4 # 4 = sb1u + k 162 pshufb %xmm3, %xmm0 # 0 = sb1t 163 movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 164 pxor %xmm4, %xmm0 # 0 = A 165 pshufb %xmm1, %xmm0 166 ret 167 .size _vpaes_encrypt_core,.-_vpaes_encrypt_core 168 169 ## 170 ## Decryption core 171 ## 172 ## Same API as encryption core. 173 ## 174 .type _vpaes_decrypt_core,\@abi-omnipotent 175 .align 16 176 _vpaes_decrypt_core: 177 mov %rdx, %r9 # load key 178 mov 240(%rdx),%eax 179 movdqa %xmm9, %xmm1 180 movdqa .Lk_dipt(%rip), %xmm2 # iptlo 181 pandn %xmm0, %xmm1 182 mov %rax, %r11 183 psrld \$4, %xmm1 184 movdqu (%r9), %xmm5 # round0 key 185 shl \$4, %r11 186 pand %xmm9, %xmm0 187 pshufb %xmm0, %xmm2 188 movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi 189 xor \$0x30, %r11 190 lea .Lk_dsbd(%rip),%r10 191 pshufb %xmm1, %xmm0 192 and \$0x30, %r11 193 pxor %xmm5, %xmm2 194 movdqa .Lk_mc_forward+48(%rip), %xmm5 195 pxor %xmm2, %xmm0 196 add \$16, %r9 197 add %r10, %r11 198 jmp .Ldec_entry 199 200 .align 16 201 .Ldec_loop: 202 ## 203 ## Inverse mix columns 204 ## 205 movdqa -0x20(%r10),%xmm4 # 4 : sb9u 206 movdqa -0x10(%r10),%xmm1 # 0 : sb9t 207 pshufb %xmm2, %xmm4 # 4 = sb9u 208 pshufb %xmm3, %xmm1 # 0 = sb9t 209 pxor %xmm4, %xmm0 210 movdqa 0x00(%r10),%xmm4 # 4 : sbdu 211 pxor %xmm1, %xmm0 # 0 = ch 212 movdqa 0x10(%r10),%xmm1 # 0 : sbdt 213 214 pshufb %xmm2, %xmm4 # 4 = sbdu 215 pshufb %xmm5, %xmm0 # MC ch 216 pshufb %xmm3, %xmm1 # 0 = sbdt 217 pxor %xmm4, %xmm0 # 4 = ch 218 movdqa 0x20(%r10),%xmm4 # 4 : sbbu 219 pxor %xmm1, %xmm0 # 0 = ch 220 movdqa 0x30(%r10),%xmm1 # 0 : sbbt 221 222 pshufb %xmm2, %xmm4 # 4 = sbbu 223 pshufb %xmm5, %xmm0 # MC ch 224 pshufb %xmm3, %xmm1 # 0 = sbbt 225 pxor %xmm4, %xmm0 # 4 = ch 226 movdqa 0x40(%r10),%xmm4 # 4 : sbeu 227 pxor %xmm1, %xmm0 # 0 = ch 228 movdqa 0x50(%r10),%xmm1 # 0 : sbet 229 230 pshufb %xmm2, %xmm4 # 4 = sbeu 231 pshufb %xmm5, %xmm0 # MC ch 232 pshufb %xmm3, %xmm1 # 0 = sbet 233 pxor %xmm4, %xmm0 # 4 = ch 234 add \$16, %r9 # next round key 235 palignr \$12, %xmm5, %xmm5 236 pxor %xmm1, %xmm0 # 0 = ch 237 sub \$1,%rax # nr-- 238 239 .Ldec_entry: 240 # top of round 241 movdqa %xmm9, %xmm1 # 1 : i 242 pandn %xmm0, %xmm1 # 1 = i<<4 243 movdqa %xmm11, %xmm2 # 2 : a/k 244 psrld \$4, %xmm1 # 1 = i 245 pand %xmm9, %xmm0 # 0 = k 246 pshufb %xmm0, %xmm2 # 2 = a/k 247 movdqa %xmm10, %xmm3 # 3 : 1/i 248 pxor %xmm1, %xmm0 # 0 = j 249 pshufb %xmm1, %xmm3 # 3 = 1/i 250 movdqa %xmm10, %xmm4 # 4 : 1/j 251 pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k 252 pshufb %xmm0, %xmm4 # 4 = 1/j 253 pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k 254 movdqa %xmm10, %xmm2 # 2 : 1/iak 255 pshufb %xmm3, %xmm2 # 2 = 1/iak 256 movdqa %xmm10, %xmm3 # 3 : 1/jak 257 pxor %xmm0, %xmm2 # 2 = io 258 pshufb %xmm4, %xmm3 # 3 = 1/jak 259 movdqu (%r9), %xmm0 260 pxor %xmm1, %xmm3 # 3 = jo 261 jnz .Ldec_loop 262 263 # middle of last round 264 movdqa 0x60(%r10), %xmm4 # 3 : sbou 265 pshufb %xmm2, %xmm4 # 4 = sbou 266 pxor %xmm0, %xmm4 # 4 = sb1u + k 267 movdqa 0x70(%r10), %xmm0 # 0 : sbot 268 movdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 269 pshufb %xmm3, %xmm0 # 0 = sb1t 270 pxor %xmm4, %xmm0 # 0 = A 271 pshufb %xmm2, %xmm0 272 ret 273 .size _vpaes_decrypt_core,.-_vpaes_decrypt_core 274 275 ######################################################## 276 ## ## 277 ## AES key schedule ## 278 ## ## 279 ######################################################## 280 .type _vpaes_schedule_core,\@abi-omnipotent 281 .align 16 282 _vpaes_schedule_core: 283 # rdi = key 284 # rsi = size in bits 285 # rdx = buffer 286 # rcx = direction. 0=encrypt, 1=decrypt 287 288 call _vpaes_preheat # load the tables 289 movdqa .Lk_rcon(%rip), %xmm8 # load rcon 290 movdqu (%rdi), %xmm0 # load key (unaligned) 291 292 # input transform 293 movdqa %xmm0, %xmm3 294 lea .Lk_ipt(%rip), %r11 295 call _vpaes_schedule_transform 296 movdqa %xmm0, %xmm7 297 298 lea .Lk_sr(%rip),%r10 299 test %rcx, %rcx 300 jnz .Lschedule_am_decrypting 301 302 # encrypting, output zeroth round key after transform 303 movdqu %xmm0, (%rdx) 304 jmp .Lschedule_go 305 306 .Lschedule_am_decrypting: 307 # decrypting, output zeroth round key after shiftrows 308 movdqa (%r8,%r10),%xmm1 309 pshufb %xmm1, %xmm3 310 movdqu %xmm3, (%rdx) 311 xor \$0x30, %r8 312 313 .Lschedule_go: 314 cmp \$192, %esi 315 ja .Lschedule_256 316 je .Lschedule_192 317 # 128: fall though 318 319 ## 320 ## .schedule_128 321 ## 322 ## 128-bit specific part of key schedule. 323 ## 324 ## This schedule is really simple, because all its parts 325 ## are accomplished by the subroutines. 326 ## 327 .Lschedule_128: 328 mov \$10, %esi 329 330 .Loop_schedule_128: 331 call _vpaes_schedule_round 332 dec %rsi 333 jz .Lschedule_mangle_last 334 call _vpaes_schedule_mangle # write output 335 jmp .Loop_schedule_128 336 337 ## 338 ## .aes_schedule_192 339 ## 340 ## 192-bit specific part of key schedule. 341 ## 342 ## The main body of this schedule is the same as the 128-bit 343 ## schedule, but with more smearing. The long, high side is 344 ## stored in %xmm7 as before, and the short, low side is in 345 ## the high bits of %xmm6. 346 ## 347 ## This schedule is somewhat nastier, however, because each 348 ## round produces 192 bits of key material, or 1.5 round keys. 349 ## Therefore, on each cycle we do 2 rounds and produce 3 round 350 ## keys. 351 ## 352 .align 16 353 .Lschedule_192: 354 movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) 355 call _vpaes_schedule_transform # input transform 356 movdqa %xmm0, %xmm6 # save short part 357 pxor %xmm4, %xmm4 # clear 4 358 movhlps %xmm4, %xmm6 # clobber low side with zeros 359 mov \$4, %esi 360 361 .Loop_schedule_192: 362 call _vpaes_schedule_round 363 palignr \$8,%xmm6,%xmm0 364 call _vpaes_schedule_mangle # save key n 365 call _vpaes_schedule_192_smear 366 call _vpaes_schedule_mangle # save key n+1 367 call _vpaes_schedule_round 368 dec %rsi 369 jz .Lschedule_mangle_last 370 call _vpaes_schedule_mangle # save key n+2 371 call _vpaes_schedule_192_smear 372 jmp .Loop_schedule_192 373 374 ## 375 ## .aes_schedule_256 376 ## 377 ## 256-bit specific part of key schedule. 378 ## 379 ## The structure here is very similar to the 128-bit 380 ## schedule, but with an additional "low side" in 381 ## %xmm6. The low side's rounds are the same as the 382 ## high side's, except no rcon and no rotation. 383 ## 384 .align 16 385 .Lschedule_256: 386 movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) 387 call _vpaes_schedule_transform # input transform 388 mov \$7, %esi 389 390 .Loop_schedule_256: 391 call _vpaes_schedule_mangle # output low result 392 movdqa %xmm0, %xmm6 # save cur_lo in xmm6 393 394 # high round 395 call _vpaes_schedule_round 396 dec %rsi 397 jz .Lschedule_mangle_last 398 call _vpaes_schedule_mangle 399 400 # low round. swap xmm7 and xmm6 401 pshufd \$0xFF, %xmm0, %xmm0 402 movdqa %xmm7, %xmm5 403 movdqa %xmm6, %xmm7 404 call _vpaes_schedule_low_round 405 movdqa %xmm5, %xmm7 406 407 jmp .Loop_schedule_256 408 409 410 ## 411 ## .aes_schedule_mangle_last 412 ## 413 ## Mangler for last round of key schedule 414 ## Mangles %xmm0 415 ## when encrypting, outputs out(%xmm0) ^ 63 416 ## when decrypting, outputs unskew(%xmm0) 417 ## 418 ## Always called right before return... jumps to cleanup and exits 419 ## 420 .align 16 421 .Lschedule_mangle_last: 422 # schedule last round key from xmm0 423 lea .Lk_deskew(%rip),%r11 # prepare to deskew 424 test %rcx, %rcx 425 jnz .Lschedule_mangle_last_dec 426 427 # encrypting 428 movdqa (%r8,%r10),%xmm1 429 pshufb %xmm1, %xmm0 # output permute 430 lea .Lk_opt(%rip), %r11 # prepare to output transform 431 add \$32, %rdx 432 433 .Lschedule_mangle_last_dec: 434 add \$-16, %rdx 435 pxor .Lk_s63(%rip), %xmm0 436 call _vpaes_schedule_transform # output transform 437 movdqu %xmm0, (%rdx) # save last key 438 439 # cleanup 440 pxor %xmm0, %xmm0 441 pxor %xmm1, %xmm1 442 pxor %xmm2, %xmm2 443 pxor %xmm3, %xmm3 444 pxor %xmm4, %xmm4 445 pxor %xmm5, %xmm5 446 pxor %xmm6, %xmm6 447 pxor %xmm7, %xmm7 448 ret 449 .size _vpaes_schedule_core,.-_vpaes_schedule_core 450 451 ## 452 ## .aes_schedule_192_smear 453 ## 454 ## Smear the short, low side in the 192-bit key schedule. 455 ## 456 ## Inputs: 457 ## %xmm7: high side, b a x y 458 ## %xmm6: low side, d c 0 0 459 ## %xmm13: 0 460 ## 461 ## Outputs: 462 ## %xmm6: b+c+d b+c 0 0 463 ## %xmm0: b+c+d b+c b a 464 ## 465 .type _vpaes_schedule_192_smear,\@abi-omnipotent 466 .align 16 467 _vpaes_schedule_192_smear: 468 pshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 469 pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a 470 pxor %xmm1, %xmm6 # -> c+d c 0 0 471 pxor %xmm1, %xmm1 472 pxor %xmm0, %xmm6 # -> b+c+d b+c b a 473 movdqa %xmm6, %xmm0 474 movhlps %xmm1, %xmm6 # clobber low side with zeros 475 ret 476 .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear 477 478 ## 479 ## .aes_schedule_round 480 ## 481 ## Runs one main round of the key schedule on %xmm0, %xmm7 482 ## 483 ## Specifically, runs subbytes on the high dword of %xmm0 484 ## then rotates it by one byte and xors into the low dword of 485 ## %xmm7. 486 ## 487 ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for 488 ## next rcon. 489 ## 490 ## Smears the dwords of %xmm7 by xoring the low into the 491 ## second low, result into third, result into highest. 492 ## 493 ## Returns results in %xmm7 = %xmm0. 494 ## Clobbers %xmm1-%xmm4, %r11. 495 ## 496 .type _vpaes_schedule_round,\@abi-omnipotent 497 .align 16 498 _vpaes_schedule_round: 499 # extract rcon from xmm8 500 pxor %xmm1, %xmm1 501 palignr \$15, %xmm8, %xmm1 502 palignr \$15, %xmm8, %xmm8 503 pxor %xmm1, %xmm7 504 505 # rotate 506 pshufd \$0xFF, %xmm0, %xmm0 507 palignr \$1, %xmm0, %xmm0 508 509 # fall through... 510 511 # low round: same as high round, but no rotation and no rcon. 512 _vpaes_schedule_low_round: 513 # smear xmm7 514 movdqa %xmm7, %xmm1 515 pslldq \$4, %xmm7 516 pxor %xmm1, %xmm7 517 movdqa %xmm7, %xmm1 518 pslldq \$8, %xmm7 519 pxor %xmm1, %xmm7 520 pxor .Lk_s63(%rip), %xmm7 521 522 # subbytes 523 movdqa %xmm9, %xmm1 524 pandn %xmm0, %xmm1 525 psrld \$4, %xmm1 # 1 = i 526 pand %xmm9, %xmm0 # 0 = k 527 movdqa %xmm11, %xmm2 # 2 : a/k 528 pshufb %xmm0, %xmm2 # 2 = a/k 529 pxor %xmm1, %xmm0 # 0 = j 530 movdqa %xmm10, %xmm3 # 3 : 1/i 531 pshufb %xmm1, %xmm3 # 3 = 1/i 532 pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k 533 movdqa %xmm10, %xmm4 # 4 : 1/j 534 pshufb %xmm0, %xmm4 # 4 = 1/j 535 pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k 536 movdqa %xmm10, %xmm2 # 2 : 1/iak 537 pshufb %xmm3, %xmm2 # 2 = 1/iak 538 pxor %xmm0, %xmm2 # 2 = io 539 movdqa %xmm10, %xmm3 # 3 : 1/jak 540 pshufb %xmm4, %xmm3 # 3 = 1/jak 541 pxor %xmm1, %xmm3 # 3 = jo 542 movdqa %xmm13, %xmm4 # 4 : sbou 543 pshufb %xmm2, %xmm4 # 4 = sbou 544 movdqa %xmm12, %xmm0 # 0 : sbot 545 pshufb %xmm3, %xmm0 # 0 = sb1t 546 pxor %xmm4, %xmm0 # 0 = sbox output 547 548 # add in smeared stuff 549 pxor %xmm7, %xmm0 550 movdqa %xmm0, %xmm7 551 ret 552 .size _vpaes_schedule_round,.-_vpaes_schedule_round 553 554 ## 555 ## .aes_schedule_transform 556 ## 557 ## Linear-transform %xmm0 according to tables at (%r11) 558 ## 559 ## Requires that %xmm9 = 0x0F0F... as in preheat 560 ## Output in %xmm0 561 ## Clobbers %xmm1, %xmm2 562 ## 563 .type _vpaes_schedule_transform,\@abi-omnipotent 564 .align 16 565 _vpaes_schedule_transform: 566 movdqa %xmm9, %xmm1 567 pandn %xmm0, %xmm1 568 psrld \$4, %xmm1 569 pand %xmm9, %xmm0 570 movdqa (%r11), %xmm2 # lo 571 pshufb %xmm0, %xmm2 572 movdqa 16(%r11), %xmm0 # hi 573 pshufb %xmm1, %xmm0 574 pxor %xmm2, %xmm0 575 ret 576 .size _vpaes_schedule_transform,.-_vpaes_schedule_transform 577 578 ## 579 ## .aes_schedule_mangle 580 ## 581 ## Mangle xmm0 from (basis-transformed) standard version 582 ## to our version. 583 ## 584 ## On encrypt, 585 ## xor with 0x63 586 ## multiply by circulant 0,1,1,1 587 ## apply shiftrows transform 588 ## 589 ## On decrypt, 590 ## xor with 0x63 591 ## multiply by "inverse mixcolumns" circulant E,B,D,9 592 ## deskew 593 ## apply shiftrows transform 594 ## 595 ## 596 ## Writes out to (%rdx), and increments or decrements it 597 ## Keeps track of round number mod 4 in %r8 598 ## Preserves xmm0 599 ## Clobbers xmm1-xmm5 600 ## 601 .type _vpaes_schedule_mangle,\@abi-omnipotent 602 .align 16 603 _vpaes_schedule_mangle: 604 movdqa %xmm0, %xmm4 # save xmm0 for later 605 movdqa .Lk_mc_forward(%rip),%xmm5 606 test %rcx, %rcx 607 jnz .Lschedule_mangle_dec 608 609 # encrypting 610 add \$16, %rdx 611 pxor .Lk_s63(%rip),%xmm4 612 pshufb %xmm5, %xmm4 613 movdqa %xmm4, %xmm3 614 pshufb %xmm5, %xmm4 615 pxor %xmm4, %xmm3 616 pshufb %xmm5, %xmm4 617 pxor %xmm4, %xmm3 618 619 jmp .Lschedule_mangle_both 620 .align 16 621 .Lschedule_mangle_dec: 622 # inverse mix columns 623 lea .Lk_dksd(%rip),%r11 624 movdqa %xmm9, %xmm1 625 pandn %xmm4, %xmm1 626 psrld \$4, %xmm1 # 1 = hi 627 pand %xmm9, %xmm4 # 4 = lo 628 629 movdqa 0x00(%r11), %xmm2 630 pshufb %xmm4, %xmm2 631 movdqa 0x10(%r11), %xmm3 632 pshufb %xmm1, %xmm3 633 pxor %xmm2, %xmm3 634 pshufb %xmm5, %xmm3 635 636 movdqa 0x20(%r11), %xmm2 637 pshufb %xmm4, %xmm2 638 pxor %xmm3, %xmm2 639 movdqa 0x30(%r11), %xmm3 640 pshufb %xmm1, %xmm3 641 pxor %xmm2, %xmm3 642 pshufb %xmm5, %xmm3 643 644 movdqa 0x40(%r11), %xmm2 645 pshufb %xmm4, %xmm2 646 pxor %xmm3, %xmm2 647 movdqa 0x50(%r11), %xmm3 648 pshufb %xmm1, %xmm3 649 pxor %xmm2, %xmm3 650 pshufb %xmm5, %xmm3 651 652 movdqa 0x60(%r11), %xmm2 653 pshufb %xmm4, %xmm2 654 pxor %xmm3, %xmm2 655 movdqa 0x70(%r11), %xmm3 656 pshufb %xmm1, %xmm3 657 pxor %xmm2, %xmm3 658 659 add \$-16, %rdx 660 661 .Lschedule_mangle_both: 662 movdqa (%r8,%r10),%xmm1 663 pshufb %xmm1,%xmm3 664 add \$-16, %r8 665 and \$0x30, %r8 666 movdqu %xmm3, (%rdx) 667 ret 668 .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle 669 670 # 671 # Interface to OpenSSL 672 # 673 .globl ${PREFIX}_set_encrypt_key 674 .type ${PREFIX}_set_encrypt_key,\@function,3 675 .align 16 676 ${PREFIX}_set_encrypt_key: 677 ___ 678 $code.=<<___ if ($win64); 679 lea -0xb8(%rsp),%rsp 680 movaps %xmm6,0x10(%rsp) 681 movaps %xmm7,0x20(%rsp) 682 movaps %xmm8,0x30(%rsp) 683 movaps %xmm9,0x40(%rsp) 684 movaps %xmm10,0x50(%rsp) 685 movaps %xmm11,0x60(%rsp) 686 movaps %xmm12,0x70(%rsp) 687 movaps %xmm13,0x80(%rsp) 688 movaps %xmm14,0x90(%rsp) 689 movaps %xmm15,0xa0(%rsp) 690 .Lenc_key_body: 691 ___ 692 $code.=<<___; 693 mov %esi,%eax 694 shr \$5,%eax 695 add \$5,%eax 696 mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 697 698 mov \$0,%ecx 699 mov \$0x30,%r8d 700 call _vpaes_schedule_core 701 ___ 702 $code.=<<___ if ($win64); 703 movaps 0x10(%rsp),%xmm6 704 movaps 0x20(%rsp),%xmm7 705 movaps 0x30(%rsp),%xmm8 706 movaps 0x40(%rsp),%xmm9 707 movaps 0x50(%rsp),%xmm10 708 movaps 0x60(%rsp),%xmm11 709 movaps 0x70(%rsp),%xmm12 710 movaps 0x80(%rsp),%xmm13 711 movaps 0x90(%rsp),%xmm14 712 movaps 0xa0(%rsp),%xmm15 713 lea 0xb8(%rsp),%rsp 714 .Lenc_key_epilogue: 715 ___ 716 $code.=<<___; 717 xor %eax,%eax 718 ret 719 .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key 720 721 .globl ${PREFIX}_set_decrypt_key 722 .type ${PREFIX}_set_decrypt_key,\@function,3 723 .align 16 724 ${PREFIX}_set_decrypt_key: 725 ___ 726 $code.=<<___ if ($win64); 727 lea -0xb8(%rsp),%rsp 728 movaps %xmm6,0x10(%rsp) 729 movaps %xmm7,0x20(%rsp) 730 movaps %xmm8,0x30(%rsp) 731 movaps %xmm9,0x40(%rsp) 732 movaps %xmm10,0x50(%rsp) 733 movaps %xmm11,0x60(%rsp) 734 movaps %xmm12,0x70(%rsp) 735 movaps %xmm13,0x80(%rsp) 736 movaps %xmm14,0x90(%rsp) 737 movaps %xmm15,0xa0(%rsp) 738 .Ldec_key_body: 739 ___ 740 $code.=<<___; 741 mov %esi,%eax 742 shr \$5,%eax 743 add \$5,%eax 744 mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 745 shl \$4,%eax 746 lea 16(%rdx,%rax),%rdx 747 748 mov \$1,%ecx 749 mov %esi,%r8d 750 shr \$1,%r8d 751 and \$32,%r8d 752 xor \$32,%r8d # nbits==192?0:32 753 call _vpaes_schedule_core 754 ___ 755 $code.=<<___ if ($win64); 756 movaps 0x10(%rsp),%xmm6 757 movaps 0x20(%rsp),%xmm7 758 movaps 0x30(%rsp),%xmm8 759 movaps 0x40(%rsp),%xmm9 760 movaps 0x50(%rsp),%xmm10 761 movaps 0x60(%rsp),%xmm11 762 movaps 0x70(%rsp),%xmm12 763 movaps 0x80(%rsp),%xmm13 764 movaps 0x90(%rsp),%xmm14 765 movaps 0xa0(%rsp),%xmm15 766 lea 0xb8(%rsp),%rsp 767 .Ldec_key_epilogue: 768 ___ 769 $code.=<<___; 770 xor %eax,%eax 771 ret 772 .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key 773 774 .globl ${PREFIX}_encrypt 775 .type ${PREFIX}_encrypt,\@function,3 776 .align 16 777 ${PREFIX}_encrypt: 778 ___ 779 $code.=<<___ if ($win64); 780 lea -0xb8(%rsp),%rsp 781 movaps %xmm6,0x10(%rsp) 782 movaps %xmm7,0x20(%rsp) 783 movaps %xmm8,0x30(%rsp) 784 movaps %xmm9,0x40(%rsp) 785 movaps %xmm10,0x50(%rsp) 786 movaps %xmm11,0x60(%rsp) 787 movaps %xmm12,0x70(%rsp) 788 movaps %xmm13,0x80(%rsp) 789 movaps %xmm14,0x90(%rsp) 790 movaps %xmm15,0xa0(%rsp) 791 .Lenc_body: 792 ___ 793 $code.=<<___; 794 movdqu (%rdi),%xmm0 795 call _vpaes_preheat 796 call _vpaes_encrypt_core 797 movdqu %xmm0,(%rsi) 798 ___ 799 $code.=<<___ if ($win64); 800 movaps 0x10(%rsp),%xmm6 801 movaps 0x20(%rsp),%xmm7 802 movaps 0x30(%rsp),%xmm8 803 movaps 0x40(%rsp),%xmm9 804 movaps 0x50(%rsp),%xmm10 805 movaps 0x60(%rsp),%xmm11 806 movaps 0x70(%rsp),%xmm12 807 movaps 0x80(%rsp),%xmm13 808 movaps 0x90(%rsp),%xmm14 809 movaps 0xa0(%rsp),%xmm15 810 lea 0xb8(%rsp),%rsp 811 .Lenc_epilogue: 812 ___ 813 $code.=<<___; 814 ret 815 .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt 816 817 .globl ${PREFIX}_decrypt 818 .type ${PREFIX}_decrypt,\@function,3 819 .align 16 820 ${PREFIX}_decrypt: 821 ___ 822 $code.=<<___ if ($win64); 823 lea -0xb8(%rsp),%rsp 824 movaps %xmm6,0x10(%rsp) 825 movaps %xmm7,0x20(%rsp) 826 movaps %xmm8,0x30(%rsp) 827 movaps %xmm9,0x40(%rsp) 828 movaps %xmm10,0x50(%rsp) 829 movaps %xmm11,0x60(%rsp) 830 movaps %xmm12,0x70(%rsp) 831 movaps %xmm13,0x80(%rsp) 832 movaps %xmm14,0x90(%rsp) 833 movaps %xmm15,0xa0(%rsp) 834 .Ldec_body: 835 ___ 836 $code.=<<___; 837 movdqu (%rdi),%xmm0 838 call _vpaes_preheat 839 call _vpaes_decrypt_core 840 movdqu %xmm0,(%rsi) 841 ___ 842 $code.=<<___ if ($win64); 843 movaps 0x10(%rsp),%xmm6 844 movaps 0x20(%rsp),%xmm7 845 movaps 0x30(%rsp),%xmm8 846 movaps 0x40(%rsp),%xmm9 847 movaps 0x50(%rsp),%xmm10 848 movaps 0x60(%rsp),%xmm11 849 movaps 0x70(%rsp),%xmm12 850 movaps 0x80(%rsp),%xmm13 851 movaps 0x90(%rsp),%xmm14 852 movaps 0xa0(%rsp),%xmm15 853 lea 0xb8(%rsp),%rsp 854 .Ldec_epilogue: 855 ___ 856 $code.=<<___; 857 ret 858 .size ${PREFIX}_decrypt,.-${PREFIX}_decrypt 859 ___ 860 { 861 my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9"); 862 # void AES_cbc_encrypt (const void char *inp, unsigned char *out, 863 # size_t length, const AES_KEY *key, 864 # unsigned char *ivp,const int enc); 865 $code.=<<___; 866 .globl ${PREFIX}_cbc_encrypt 867 .type ${PREFIX}_cbc_encrypt,\@function,6 868 .align 16 869 ${PREFIX}_cbc_encrypt: 870 xchg $key,$len 871 ___ 872 ($len,$key)=($key,$len); 873 $code.=<<___; 874 sub \$16,$len 875 jc .Lcbc_abort 876 ___ 877 $code.=<<___ if ($win64); 878 lea -0xb8(%rsp),%rsp 879 movaps %xmm6,0x10(%rsp) 880 movaps %xmm7,0x20(%rsp) 881 movaps %xmm8,0x30(%rsp) 882 movaps %xmm9,0x40(%rsp) 883 movaps %xmm10,0x50(%rsp) 884 movaps %xmm11,0x60(%rsp) 885 movaps %xmm12,0x70(%rsp) 886 movaps %xmm13,0x80(%rsp) 887 movaps %xmm14,0x90(%rsp) 888 movaps %xmm15,0xa0(%rsp) 889 .Lcbc_body: 890 ___ 891 $code.=<<___; 892 movdqu ($ivp),%xmm6 # load IV 893 sub $inp,$out 894 call _vpaes_preheat 895 cmp \$0,${enc}d 896 je .Lcbc_dec_loop 897 jmp .Lcbc_enc_loop 898 .align 16 899 .Lcbc_enc_loop: 900 movdqu ($inp),%xmm0 901 pxor %xmm6,%xmm0 902 call _vpaes_encrypt_core 903 movdqa %xmm0,%xmm6 904 movdqu %xmm0,($out,$inp) 905 lea 16($inp),$inp 906 sub \$16,$len 907 jnc .Lcbc_enc_loop 908 jmp .Lcbc_done 909 .align 16 910 .Lcbc_dec_loop: 911 movdqu ($inp),%xmm0 912 movdqa %xmm0,%xmm7 913 call _vpaes_decrypt_core 914 pxor %xmm6,%xmm0 915 movdqa %xmm7,%xmm6 916 movdqu %xmm0,($out,$inp) 917 lea 16($inp),$inp 918 sub \$16,$len 919 jnc .Lcbc_dec_loop 920 .Lcbc_done: 921 movdqu %xmm6,($ivp) # save IV 922 ___ 923 $code.=<<___ if ($win64); 924 movaps 0x10(%rsp),%xmm6 925 movaps 0x20(%rsp),%xmm7 926 movaps 0x30(%rsp),%xmm8 927 movaps 0x40(%rsp),%xmm9 928 movaps 0x50(%rsp),%xmm10 929 movaps 0x60(%rsp),%xmm11 930 movaps 0x70(%rsp),%xmm12 931 movaps 0x80(%rsp),%xmm13 932 movaps 0x90(%rsp),%xmm14 933 movaps 0xa0(%rsp),%xmm15 934 lea 0xb8(%rsp),%rsp 935 .Lcbc_epilogue: 936 ___ 937 $code.=<<___; 938 .Lcbc_abort: 939 ret 940 .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt 941 ___ 942 } 943 $code.=<<___; 944 ## 945 ## _aes_preheat 946 ## 947 ## Fills register %r10 -> .aes_consts (so you can -fPIC) 948 ## and %xmm9-%xmm15 as specified below. 949 ## 950 .type _vpaes_preheat,\@abi-omnipotent 951 .align 16 952 _vpaes_preheat: 953 lea .Lk_s0F(%rip), %r10 954 movdqa -0x20(%r10), %xmm10 # .Lk_inv 955 movdqa -0x10(%r10), %xmm11 # .Lk_inv+16 956 movdqa 0x00(%r10), %xmm9 # .Lk_s0F 957 movdqa 0x30(%r10), %xmm13 # .Lk_sb1 958 movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16 959 movdqa 0x50(%r10), %xmm15 # .Lk_sb2 960 movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16 961 ret 962 .size _vpaes_preheat,.-_vpaes_preheat 963 ######################################################## 964 ## ## 965 ## Constants ## 966 ## ## 967 ######################################################## 968 .type _vpaes_consts,\@object 969 .align 64 970 _vpaes_consts: 971 .Lk_inv: # inv, inva 972 .quad 0x0E05060F0D080180, 0x040703090A0B0C02 973 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 974 975 .Lk_s0F: # s0F 976 .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F 977 978 .Lk_ipt: # input transform (lo, hi) 979 .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 980 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 981 982 .Lk_sb1: # sb1u, sb1t 983 .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 984 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF 985 .Lk_sb2: # sb2u, sb2t 986 .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD 987 .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A 988 .Lk_sbo: # sbou, sbot 989 .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 990 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA 991 992 .Lk_mc_forward: # mc_forward 993 .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 994 .quad 0x080B0A0904070605, 0x000302010C0F0E0D 995 .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 996 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 997 998 .Lk_mc_backward:# mc_backward 999 .quad 0x0605040702010003, 0x0E0D0C0F0A09080B 1000 .quad 0x020100030E0D0C0F, 0x0A09080B06050407 1001 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 1002 .quad 0x0A09080B06050407, 0x020100030E0D0C0F 1003 1004 .Lk_sr: # sr 1005 .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 1006 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 1007 .quad 0x0F060D040B020900, 0x070E050C030A0108 1008 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 1009 1010 .Lk_rcon: # rcon 1011 .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 1012 1013 .Lk_s63: # s63: all equal to 0x63 transformed 1014 .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B 1015 1016 .Lk_opt: # output transform 1017 .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 1018 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 1019 1020 .Lk_deskew: # deskew tables: inverts the sbox's "skew" 1021 .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A 1022 .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 1023 1024 ## 1025 ## Decryption stuff 1026 ## Key schedule constants 1027 ## 1028 .Lk_dksd: # decryption key schedule: invskew x*D 1029 .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 1030 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E 1031 .Lk_dksb: # decryption key schedule: invskew x*B 1032 .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 1033 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 1034 .Lk_dkse: # decryption key schedule: invskew x*E + 0x63 1035 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 1036 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 1037 .Lk_dks9: # decryption key schedule: invskew x*9 1038 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC 1039 .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE 1040 1041 ## 1042 ## Decryption stuff 1043 ## Round function constants 1044 ## 1045 .Lk_dipt: # decryption input transform 1046 .quad 0x0F505B040B545F00, 0x154A411E114E451A 1047 .quad 0x86E383E660056500, 0x12771772F491F194 1048 1049 .Lk_dsb9: # decryption sbox output *9*u, *9*t 1050 .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 1051 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 1052 .Lk_dsbd: # decryption sbox output *D*u, *D*t 1053 .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 1054 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 1055 .Lk_dsbb: # decryption sbox output *B*u, *B*t 1056 .quad 0xD022649296B44200, 0x602646F6B0F2D404 1057 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B 1058 .Lk_dsbe: # decryption sbox output *E*u, *E*t 1059 .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 1060 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 1061 .Lk_dsbo: # decryption sbox final output 1062 .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D 1063 .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C 1064 .asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)" 1065 .align 64 1066 .size _vpaes_consts,.-_vpaes_consts 1067 ___ 1068 1069 if ($win64) { 1070 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 1071 # CONTEXT *context,DISPATCHER_CONTEXT *disp) 1072 $rec="%rcx"; 1073 $frame="%rdx"; 1074 $context="%r8"; 1075 $disp="%r9"; 1076 1077 $code.=<<___; 1078 .extern __imp_RtlVirtualUnwind 1079 .type se_handler,\@abi-omnipotent 1080 .align 16 1081 se_handler: 1082 push %rsi 1083 push %rdi 1084 push %rbx 1085 push %rbp 1086 push %r12 1087 push %r13 1088 push %r14 1089 push %r15 1090 pushfq 1091 sub \$64,%rsp 1092 1093 mov 120($context),%rax # pull context->Rax 1094 mov 248($context),%rbx # pull context->Rip 1095 1096 mov 8($disp),%rsi # disp->ImageBase 1097 mov 56($disp),%r11 # disp->HandlerData 1098 1099 mov 0(%r11),%r10d # HandlerData[0] 1100 lea (%rsi,%r10),%r10 # prologue label 1101 cmp %r10,%rbx # context->Rip<prologue label 1102 jb .Lin_prologue 1103 1104 mov 152($context),%rax # pull context->Rsp 1105 1106 mov 4(%r11),%r10d # HandlerData[1] 1107 lea (%rsi,%r10),%r10 # epilogue label 1108 cmp %r10,%rbx # context->Rip>=epilogue label 1109 jae .Lin_prologue 1110 1111 lea 16(%rax),%rsi # %xmm save area 1112 lea 512($context),%rdi # &context.Xmm6 1113 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax) 1114 .long 0xa548f3fc # cld; rep movsq 1115 lea 0xb8(%rax),%rax # adjust stack pointer 1116 1117 .Lin_prologue: 1118 mov 8(%rax),%rdi 1119 mov 16(%rax),%rsi 1120 mov %rax,152($context) # restore context->Rsp 1121 mov %rsi,168($context) # restore context->Rsi 1122 mov %rdi,176($context) # restore context->Rdi 1123 1124 mov 40($disp),%rdi # disp->ContextRecord 1125 mov $context,%rsi # context 1126 mov \$`1232/8`,%ecx # sizeof(CONTEXT) 1127 .long 0xa548f3fc # cld; rep movsq 1128 1129 mov $disp,%rsi 1130 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 1131 mov 8(%rsi),%rdx # arg2, disp->ImageBase 1132 mov 0(%rsi),%r8 # arg3, disp->ControlPc 1133 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 1134 mov 40(%rsi),%r10 # disp->ContextRecord 1135 lea 56(%rsi),%r11 # &disp->HandlerData 1136 lea 24(%rsi),%r12 # &disp->EstablisherFrame 1137 mov %r10,32(%rsp) # arg5 1138 mov %r11,40(%rsp) # arg6 1139 mov %r12,48(%rsp) # arg7 1140 mov %rcx,56(%rsp) # arg8, (NULL) 1141 call *__imp_RtlVirtualUnwind(%rip) 1142 1143 mov \$1,%eax # ExceptionContinueSearch 1144 add \$64,%rsp 1145 popfq 1146 pop %r15 1147 pop %r14 1148 pop %r13 1149 pop %r12 1150 pop %rbp 1151 pop %rbx 1152 pop %rdi 1153 pop %rsi 1154 ret 1155 .size se_handler,.-se_handler 1156 1157 .section .pdata 1158 .align 4 1159 .rva .LSEH_begin_${PREFIX}_set_encrypt_key 1160 .rva .LSEH_end_${PREFIX}_set_encrypt_key 1161 .rva .LSEH_info_${PREFIX}_set_encrypt_key 1162 1163 .rva .LSEH_begin_${PREFIX}_set_decrypt_key 1164 .rva .LSEH_end_${PREFIX}_set_decrypt_key 1165 .rva .LSEH_info_${PREFIX}_set_decrypt_key 1166 1167 .rva .LSEH_begin_${PREFIX}_encrypt 1168 .rva .LSEH_end_${PREFIX}_encrypt 1169 .rva .LSEH_info_${PREFIX}_encrypt 1170 1171 .rva .LSEH_begin_${PREFIX}_decrypt 1172 .rva .LSEH_end_${PREFIX}_decrypt 1173 .rva .LSEH_info_${PREFIX}_decrypt 1174 1175 .rva .LSEH_begin_${PREFIX}_cbc_encrypt 1176 .rva .LSEH_end_${PREFIX}_cbc_encrypt 1177 .rva .LSEH_info_${PREFIX}_cbc_encrypt 1178 1179 .section .xdata 1180 .align 8 1181 .LSEH_info_${PREFIX}_set_encrypt_key: 1182 .byte 9,0,0,0 1183 .rva se_handler 1184 .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[] 1185 .LSEH_info_${PREFIX}_set_decrypt_key: 1186 .byte 9,0,0,0 1187 .rva se_handler 1188 .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[] 1189 .LSEH_info_${PREFIX}_encrypt: 1190 .byte 9,0,0,0 1191 .rva se_handler 1192 .rva .Lenc_body,.Lenc_epilogue # HandlerData[] 1193 .LSEH_info_${PREFIX}_decrypt: 1194 .byte 9,0,0,0 1195 .rva se_handler 1196 .rva .Ldec_body,.Ldec_epilogue # HandlerData[] 1197 .LSEH_info_${PREFIX}_cbc_encrypt: 1198 .byte 9,0,0,0 1199 .rva se_handler 1200 .rva .Lcbc_body,.Lcbc_epilogue # HandlerData[] 1201 ___ 1202 } 1203 1204 $code =~ s/\`([^\`]*)\`/eval($1)/gem; 1205 1206 print $code; 1207 1208 close STDOUT; 1209