Home | History | Annotate | Download | only in asm
      1 #!/usr/bin/env perl
      2 #
      3 # ====================================================================
      4 # Written by Andy Polyakov <appro (at] fy.chalmers.se> for the OpenSSL
      5 # project. The module is, however, dual licensed under OpenSSL and
      6 # CRYPTOGAMS licenses depending on where you obtain it. For further
      7 # details see http://www.openssl.org/~appro/cryptogams/.
      8 # ====================================================================
      9 #
     10 # This module implements support for Intel AES-NI extension. In
     11 # OpenSSL context it's used with Intel engine, but can also be used as
     12 # drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
     13 # details].
     14 #
     15 # Performance.
     16 #
     17 # Given aes(enc|dec) instructions' latency asymptotic performance for
     18 # non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
     19 # processed with 128-bit key. And given their throughput asymptotic
     20 # performance for parallelizable modes is 1.25 cycles per byte. Being
     21 # asymptotic limit it's not something you commonly achieve in reality,
     22 # but how close does one get? Below are results collected for
     23 # different modes and block sized. Pairs of numbers are for en-/
     24 # decryption.
     25 #
     26 #	16-byte     64-byte     256-byte    1-KB        8-KB
     27 # ECB	4.25/4.25   1.38/1.38   1.28/1.28   1.26/1.26	1.26/1.26
     28 # CTR	5.42/5.42   1.92/1.92   1.44/1.44   1.28/1.28   1.26/1.26
     29 # CBC	4.38/4.43   4.15/1.43   4.07/1.32   4.07/1.29   4.06/1.28
     30 # CCM	5.66/9.42   4.42/5.41   4.16/4.40   4.09/4.15   4.06/4.07   
     31 # OFB	5.42/5.42   4.64/4.64   4.44/4.44   4.39/4.39   4.38/4.38
     32 # CFB	5.73/5.85   5.56/5.62   5.48/5.56   5.47/5.55   5.47/5.55
     33 #
     34 # ECB, CTR, CBC and CCM results are free from EVP overhead. This means
     35 # that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
     36 # [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
     37 # The results were collected with specially crafted speed.c benchmark
     38 # in order to compare them with results reported in "Intel Advanced
     39 # Encryption Standard (AES) New Instruction Set" White Paper Revision
     40 # 3.0 dated May 2010. All above results are consistently better. This
     41 # module also provides better performance for block sizes smaller than
     42 # 128 bytes in points *not* represented in the above table.
     43 #
     44 # Looking at the results for 8-KB buffer.
     45 #
     46 # CFB and OFB results are far from the limit, because implementation
     47 # uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
     48 # single-block aesni_encrypt, which is not the most optimal way to go.
     49 # CBC encrypt result is unexpectedly high and there is no documented
     50 # explanation for it. Seemingly there is a small penalty for feeding
     51 # the result back to AES unit the way it's done in CBC mode. There is
     52 # nothing one can do and the result appears optimal. CCM result is
     53 # identical to CBC, because CBC-MAC is essentially CBC encrypt without
     54 # saving output. CCM CTR "stays invisible," because it's neatly
     55 # interleaved wih CBC-MAC. This provides ~30% improvement over
     56 # "straghtforward" CCM implementation with CTR and CBC-MAC performed
     57 # disjointly. Parallelizable modes practically achieve the theoretical
     58 # limit.
     59 #
     60 # Looking at how results vary with buffer size.
     61 #
     62 # Curves are practically saturated at 1-KB buffer size. In most cases
     63 # "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
     64 # CTR curve doesn't follow this pattern and is "slowest" changing one
     65 # with "256-byte" result being 87% of "8-KB." This is because overhead
     66 # in CTR mode is most computationally intensive. Small-block CCM
     67 # decrypt is slower than encrypt, because first CTR and last CBC-MAC
     68 # iterations can't be interleaved.
     69 #
     70 # Results for 192- and 256-bit keys.
     71 #
     72 # EVP-free results were observed to scale perfectly with number of
     73 # rounds for larger block sizes, i.e. 192-bit result being 10/12 times
     74 # lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
     75 # are a tad smaller, because the above mentioned penalty biases all
     76 # results by same constant value. In similar way function call
     77 # overhead affects small-block performance, as well as OFB and CFB
     78 # results. Differences are not large, most common coefficients are
     79 # 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
     80 # observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
     81 
     82 # January 2011
     83 #
     84 # While Westmere processor features 6 cycles latency for aes[enc|dec]
     85 # instructions, which can be scheduled every second cycle, Sandy
     86 # Bridge spends 8 cycles per instruction, but it can schedule them
     87 # every cycle. This means that code targeting Westmere would perform
     88 # suboptimally on Sandy Bridge. Therefore this update.
     89 #
     90 # In addition, non-parallelizable CBC encrypt (as well as CCM) is
     91 # optimized. Relative improvement might appear modest, 8% on Westmere,
     92 # but in absolute terms it's 3.77 cycles per byte encrypted with
     93 # 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
     94 # should be compared to asymptotic limits of 3.75 for Westmere and
     95 # 5.00 for Sandy Bridge. Actually, the fact that they get this close
     96 # to asymptotic limits is quite amazing. Indeed, the limit is
     97 # calculated as latency times number of rounds, 10 for 128-bit key,
     98 # and divided by 16, the number of bytes in block, or in other words
     99 # it accounts *solely* for aesenc instructions. But there are extra
    100 # instructions, and numbers so close to the asymptotic limits mean
    101 # that it's as if it takes as little as *one* additional cycle to
    102 # execute all of them. How is it possible? It is possible thanks to
    103 # out-of-order execution logic, which manages to overlap post-
    104 # processing of previous block, things like saving the output, with
    105 # actual encryption of current block, as well as pre-processing of
    106 # current block, things like fetching input and xor-ing it with
    107 # 0-round element of the key schedule, with actual encryption of
    108 # previous block. Keep this in mind...
    109 #
    110 # For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
    111 # performance is achieved by interleaving instructions working on
    112 # independent blocks. In which case asymptotic limit for such modes
    113 # can be obtained by dividing above mentioned numbers by AES
    114 # instructions' interleave factor. Westmere can execute at most 3 
    115 # instructions at a time, meaning that optimal interleave factor is 3,
    116 # and that's where the "magic" number of 1.25 come from. "Optimal
    117 # interleave factor" means that increase of interleave factor does
    118 # not improve performance. The formula has proven to reflect reality
    119 # pretty well on Westmere... Sandy Bridge on the other hand can
    120 # execute up to 8 AES instructions at a time, so how does varying
    121 # interleave factor affect the performance? Here is table for ECB
    122 # (numbers are cycles per byte processed with 128-bit key):
    123 #
    124 # instruction interleave factor		3x	6x	8x
    125 # theoretical asymptotic limit		1.67	0.83	0.625
    126 # measured performance for 8KB block	1.05	0.86	0.84
    127 #
    128 # "as if" interleave factor		4.7x	5.8x	6.0x
    129 #
    130 # Further data for other parallelizable modes:
    131 #
    132 # CBC decrypt				1.16	0.93	0.93
    133 # CTR					1.14	0.91	n/a
    134 #
    135 # Well, given 3x column it's probably inappropriate to call the limit
    136 # asymptotic, if it can be surpassed, isn't it? What happens there?
    137 # Rewind to CBC paragraph for the answer. Yes, out-of-order execution
    138 # magic is responsible for this. Processor overlaps not only the
    139 # additional instructions with AES ones, but even AES instuctions
    140 # processing adjacent triplets of independent blocks. In the 6x case
    141 # additional instructions  still claim disproportionally small amount
    142 # of additional cycles, but in 8x case number of instructions must be
    143 # a tad too high for out-of-order logic to cope with, and AES unit
    144 # remains underutilized... As you can see 8x interleave is hardly
    145 # justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
    146 # utilizies 6x interleave because of limited register bank capacity.
    147 #
    148 # Higher interleave factors do have negative impact on Westmere
    149 # performance. While for ECB mode it's negligible ~1.5%, other
    150 # parallelizables perform ~5% worse, which is outweighed by ~25%
    151 # improvement on Sandy Bridge. To balance regression on Westmere
    152 # CTR mode was implemented with 6x aesenc interleave factor.
    153 
    154 # April 2011
    155 #
    156 # Add aesni_xts_[en|de]crypt. Westmere spends 1.33 cycles processing
    157 # one byte out of 8KB with 128-bit key, Sandy Bridge - 0.97. Just like
    158 # in CTR mode AES instruction interleave factor was chosen to be 6x.
    159 
    160 $PREFIX="aesni";	# if $PREFIX is set to "AES", the script
    161 			# generates drop-in replacement for
    162 			# crypto/aes/asm/aes-x86_64.pl:-)
    163 
    164 $flavour = shift;
    165 $output  = shift;
    166 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
    167 
    168 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
    169 
    170 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
    171 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
    172 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
    173 die "can't locate x86_64-xlate.pl";
    174 
    175 open OUT,"| \"$^X\" $xlate $flavour $output";
    176 *STDOUT=*OUT;
    177 
    178 $movkey = $PREFIX eq "aesni" ? "movups" : "movups";
    179 @_4args=$win64?	("%rcx","%rdx","%r8", "%r9") :	# Win64 order
    180 		("%rdi","%rsi","%rdx","%rcx");	# Unix order
    181 
    182 $code=".text\n";
    183 
    184 $rounds="%eax";	# input to and changed by aesni_[en|de]cryptN !!!
    185 # this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
    186 $inp="%rdi";
    187 $out="%rsi";
    188 $len="%rdx";
    189 $key="%rcx";	# input to and changed by aesni_[en|de]cryptN !!!
    190 $ivp="%r8";	# cbc, ctr, ...
    191 
    192 $rnds_="%r10d";	# backup copy for $rounds
    193 $key_="%r11";	# backup copy for $key
    194 
    195 # %xmm register layout
    196 $rndkey0="%xmm0";	$rndkey1="%xmm1";
    197 $inout0="%xmm2";	$inout1="%xmm3";
    198 $inout2="%xmm4";	$inout3="%xmm5";
    199 $inout4="%xmm6";	$inout5="%xmm7";
    200 $inout6="%xmm8";	$inout7="%xmm9";
    201 
    202 $in2="%xmm6";		$in1="%xmm7";	# used in CBC decrypt, CTR, ...
    203 $in0="%xmm8";		$iv="%xmm9";
    204 
    206 # Inline version of internal aesni_[en|de]crypt1.
    207 #
    208 # Why folded loop? Because aes[enc|dec] is slow enough to accommodate
    209 # cycles which take care of loop variables...
    210 { my $sn;
    211 sub aesni_generate1 {
    212 my ($p,$key,$rounds,$inout,$ivec)=@_;	$inout=$inout0 if (!defined($inout));
    213 ++$sn;
    214 $code.=<<___;
    215 	$movkey	($key),$rndkey0
    216 	$movkey	16($key),$rndkey1
    217 ___
    218 $code.=<<___ if (defined($ivec));
    219 	xorps	$rndkey0,$ivec
    220 	lea	32($key),$key
    221 	xorps	$ivec,$inout
    222 ___
    223 $code.=<<___ if (!defined($ivec));
    224 	lea	32($key),$key
    225 	xorps	$rndkey0,$inout
    226 ___
    227 $code.=<<___;
    228 .Loop_${p}1_$sn:
    229 	aes${p}	$rndkey1,$inout
    230 	dec	$rounds
    231 	$movkey	($key),$rndkey1
    232 	lea	16($key),$key
    233 	jnz	.Loop_${p}1_$sn	# loop body is 16 bytes
    234 	aes${p}last	$rndkey1,$inout
    235 ___
    236 }}
    237 # void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
    238 #
    239 { my ($inp,$out,$key) = @_4args;
    240 
    241 $code.=<<___;
    242 .globl	${PREFIX}_encrypt
    243 .type	${PREFIX}_encrypt,\@abi-omnipotent
    244 .align	16
    245 ${PREFIX}_encrypt:
    246 	movups	($inp),$inout0		# load input
    247 	mov	240($key),$rounds	# key->rounds
    248 ___
    249 	&aesni_generate1("enc",$key,$rounds);
    250 $code.=<<___;
    251 	movups	$inout0,($out)		# output
    252 	ret
    253 .size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
    254 
    255 .globl	${PREFIX}_decrypt
    256 .type	${PREFIX}_decrypt,\@abi-omnipotent
    257 .align	16
    258 ${PREFIX}_decrypt:
    259 	movups	($inp),$inout0		# load input
    260 	mov	240($key),$rounds	# key->rounds
    261 ___
    262 	&aesni_generate1("dec",$key,$rounds);
    263 $code.=<<___;
    264 	movups	$inout0,($out)		# output
    265 	ret
    266 .size	${PREFIX}_decrypt, .-${PREFIX}_decrypt
    267 ___
    268 }
    269 
    271 # _aesni_[en|de]cryptN are private interfaces, N denotes interleave
    272 # factor. Why 3x subroutine were originally used in loops? Even though
    273 # aes[enc|dec] latency was originally 6, it could be scheduled only
    274 # every *2nd* cycle. Thus 3x interleave was the one providing optimal
    275 # utilization, i.e. when subroutine's throughput is virtually same as
    276 # of non-interleaved subroutine [for number of input blocks up to 3].
    277 # This is why it makes no sense to implement 2x subroutine.
    278 # aes[enc|dec] latency in next processor generation is 8, but the
    279 # instructions can be scheduled every cycle. Optimal interleave for
    280 # new processor is therefore 8x...
    281 sub aesni_generate3 {
    282 my $dir=shift;
    283 # As already mentioned it takes in $key and $rounds, which are *not*
    284 # preserved. $inout[0-2] is cipher/clear text...
    285 $code.=<<___;
    286 .type	_aesni_${dir}rypt3,\@abi-omnipotent
    287 .align	16
    288 _aesni_${dir}rypt3:
    289 	$movkey	($key),$rndkey0
    290 	shr	\$1,$rounds
    291 	$movkey	16($key),$rndkey1
    292 	lea	32($key),$key
    293 	xorps	$rndkey0,$inout0
    294 	xorps	$rndkey0,$inout1
    295 	xorps	$rndkey0,$inout2
    296 	$movkey		($key),$rndkey0
    297 
    298 .L${dir}_loop3:
    299 	aes${dir}	$rndkey1,$inout0
    300 	aes${dir}	$rndkey1,$inout1
    301 	dec		$rounds
    302 	aes${dir}	$rndkey1,$inout2
    303 	$movkey		16($key),$rndkey1
    304 	aes${dir}	$rndkey0,$inout0
    305 	aes${dir}	$rndkey0,$inout1
    306 	lea		32($key),$key
    307 	aes${dir}	$rndkey0,$inout2
    308 	$movkey		($key),$rndkey0
    309 	jnz		.L${dir}_loop3
    310 
    311 	aes${dir}	$rndkey1,$inout0
    312 	aes${dir}	$rndkey1,$inout1
    313 	aes${dir}	$rndkey1,$inout2
    314 	aes${dir}last	$rndkey0,$inout0
    315 	aes${dir}last	$rndkey0,$inout1
    316 	aes${dir}last	$rndkey0,$inout2
    317 	ret
    318 .size	_aesni_${dir}rypt3,.-_aesni_${dir}rypt3
    319 ___
    320 }
    321 # 4x interleave is implemented to improve small block performance,
    322 # most notably [and naturally] 4 block by ~30%. One can argue that one
    323 # should have implemented 5x as well, but improvement would be <20%,
    324 # so it's not worth it...
    325 sub aesni_generate4 {
    326 my $dir=shift;
    327 # As already mentioned it takes in $key and $rounds, which are *not*
    328 # preserved. $inout[0-3] is cipher/clear text...
    329 $code.=<<___;
    330 .type	_aesni_${dir}rypt4,\@abi-omnipotent
    331 .align	16
    332 _aesni_${dir}rypt4:
    333 	$movkey	($key),$rndkey0
    334 	shr	\$1,$rounds
    335 	$movkey	16($key),$rndkey1
    336 	lea	32($key),$key
    337 	xorps	$rndkey0,$inout0
    338 	xorps	$rndkey0,$inout1
    339 	xorps	$rndkey0,$inout2
    340 	xorps	$rndkey0,$inout3
    341 	$movkey	($key),$rndkey0
    342 
    343 .L${dir}_loop4:
    344 	aes${dir}	$rndkey1,$inout0
    345 	aes${dir}	$rndkey1,$inout1
    346 	dec		$rounds
    347 	aes${dir}	$rndkey1,$inout2
    348 	aes${dir}	$rndkey1,$inout3
    349 	$movkey		16($key),$rndkey1
    350 	aes${dir}	$rndkey0,$inout0
    351 	aes${dir}	$rndkey0,$inout1
    352 	lea		32($key),$key
    353 	aes${dir}	$rndkey0,$inout2
    354 	aes${dir}	$rndkey0,$inout3
    355 	$movkey		($key),$rndkey0
    356 	jnz		.L${dir}_loop4
    357 
    358 	aes${dir}	$rndkey1,$inout0
    359 	aes${dir}	$rndkey1,$inout1
    360 	aes${dir}	$rndkey1,$inout2
    361 	aes${dir}	$rndkey1,$inout3
    362 	aes${dir}last	$rndkey0,$inout0
    363 	aes${dir}last	$rndkey0,$inout1
    364 	aes${dir}last	$rndkey0,$inout2
    365 	aes${dir}last	$rndkey0,$inout3
    366 	ret
    367 .size	_aesni_${dir}rypt4,.-_aesni_${dir}rypt4
    368 ___
    369 }
    370 sub aesni_generate6 {
    371 my $dir=shift;
    372 # As already mentioned it takes in $key and $rounds, which are *not*
    373 # preserved. $inout[0-5] is cipher/clear text...
    374 $code.=<<___;
    375 .type	_aesni_${dir}rypt6,\@abi-omnipotent
    376 .align	16
    377 _aesni_${dir}rypt6:
    378 	$movkey		($key),$rndkey0
    379 	shr		\$1,$rounds
    380 	$movkey		16($key),$rndkey1
    381 	lea		32($key),$key
    382 	xorps		$rndkey0,$inout0
    383 	pxor		$rndkey0,$inout1
    384 	aes${dir}	$rndkey1,$inout0
    385 	pxor		$rndkey0,$inout2
    386 	aes${dir}	$rndkey1,$inout1
    387 	pxor		$rndkey0,$inout3
    388 	aes${dir}	$rndkey1,$inout2
    389 	pxor		$rndkey0,$inout4
    390 	aes${dir}	$rndkey1,$inout3
    391 	pxor		$rndkey0,$inout5
    392 	dec		$rounds
    393 	aes${dir}	$rndkey1,$inout4
    394 	$movkey		($key),$rndkey0
    395 	aes${dir}	$rndkey1,$inout5
    396 	jmp		.L${dir}_loop6_enter
    397 .align	16
    398 .L${dir}_loop6:
    399 	aes${dir}	$rndkey1,$inout0
    400 	aes${dir}	$rndkey1,$inout1
    401 	dec		$rounds
    402 	aes${dir}	$rndkey1,$inout2
    403 	aes${dir}	$rndkey1,$inout3
    404 	aes${dir}	$rndkey1,$inout4
    405 	aes${dir}	$rndkey1,$inout5
    406 .L${dir}_loop6_enter:				# happens to be 16-byte aligned
    407 	$movkey		16($key),$rndkey1
    408 	aes${dir}	$rndkey0,$inout0
    409 	aes${dir}	$rndkey0,$inout1
    410 	lea		32($key),$key
    411 	aes${dir}	$rndkey0,$inout2
    412 	aes${dir}	$rndkey0,$inout3
    413 	aes${dir}	$rndkey0,$inout4
    414 	aes${dir}	$rndkey0,$inout5
    415 	$movkey		($key),$rndkey0
    416 	jnz		.L${dir}_loop6
    417 
    418 	aes${dir}	$rndkey1,$inout0
    419 	aes${dir}	$rndkey1,$inout1
    420 	aes${dir}	$rndkey1,$inout2
    421 	aes${dir}	$rndkey1,$inout3
    422 	aes${dir}	$rndkey1,$inout4
    423 	aes${dir}	$rndkey1,$inout5
    424 	aes${dir}last	$rndkey0,$inout0
    425 	aes${dir}last	$rndkey0,$inout1
    426 	aes${dir}last	$rndkey0,$inout2
    427 	aes${dir}last	$rndkey0,$inout3
    428 	aes${dir}last	$rndkey0,$inout4
    429 	aes${dir}last	$rndkey0,$inout5
    430 	ret
    431 .size	_aesni_${dir}rypt6,.-_aesni_${dir}rypt6
    432 ___
    433 }
    434 sub aesni_generate8 {
    435 my $dir=shift;
    436 # As already mentioned it takes in $key and $rounds, which are *not*
    437 # preserved. $inout[0-7] is cipher/clear text...
    438 $code.=<<___;
    439 .type	_aesni_${dir}rypt8,\@abi-omnipotent
    440 .align	16
    441 _aesni_${dir}rypt8:
    442 	$movkey		($key),$rndkey0
    443 	shr		\$1,$rounds
    444 	$movkey		16($key),$rndkey1
    445 	lea		32($key),$key
    446 	xorps		$rndkey0,$inout0
    447 	xorps		$rndkey0,$inout1
    448 	aes${dir}	$rndkey1,$inout0
    449 	pxor		$rndkey0,$inout2
    450 	aes${dir}	$rndkey1,$inout1
    451 	pxor		$rndkey0,$inout3
    452 	aes${dir}	$rndkey1,$inout2
    453 	pxor		$rndkey0,$inout4
    454 	aes${dir}	$rndkey1,$inout3
    455 	pxor		$rndkey0,$inout5
    456 	dec		$rounds
    457 	aes${dir}	$rndkey1,$inout4
    458 	pxor		$rndkey0,$inout6
    459 	aes${dir}	$rndkey1,$inout5
    460 	pxor		$rndkey0,$inout7
    461 	$movkey		($key),$rndkey0
    462 	aes${dir}	$rndkey1,$inout6
    463 	aes${dir}	$rndkey1,$inout7
    464 	$movkey		16($key),$rndkey1
    465 	jmp		.L${dir}_loop8_enter
    466 .align	16
    467 .L${dir}_loop8:
    468 	aes${dir}	$rndkey1,$inout0
    469 	aes${dir}	$rndkey1,$inout1
    470 	dec		$rounds
    471 	aes${dir}	$rndkey1,$inout2
    472 	aes${dir}	$rndkey1,$inout3
    473 	aes${dir}	$rndkey1,$inout4
    474 	aes${dir}	$rndkey1,$inout5
    475 	aes${dir}	$rndkey1,$inout6
    476 	aes${dir}	$rndkey1,$inout7
    477 	$movkey		16($key),$rndkey1
    478 .L${dir}_loop8_enter:				# happens to be 16-byte aligned
    479 	aes${dir}	$rndkey0,$inout0
    480 	aes${dir}	$rndkey0,$inout1
    481 	lea		32($key),$key
    482 	aes${dir}	$rndkey0,$inout2
    483 	aes${dir}	$rndkey0,$inout3
    484 	aes${dir}	$rndkey0,$inout4
    485 	aes${dir}	$rndkey0,$inout5
    486 	aes${dir}	$rndkey0,$inout6
    487 	aes${dir}	$rndkey0,$inout7
    488 	$movkey		($key),$rndkey0
    489 	jnz		.L${dir}_loop8
    490 
    491 	aes${dir}	$rndkey1,$inout0
    492 	aes${dir}	$rndkey1,$inout1
    493 	aes${dir}	$rndkey1,$inout2
    494 	aes${dir}	$rndkey1,$inout3
    495 	aes${dir}	$rndkey1,$inout4
    496 	aes${dir}	$rndkey1,$inout5
    497 	aes${dir}	$rndkey1,$inout6
    498 	aes${dir}	$rndkey1,$inout7
    499 	aes${dir}last	$rndkey0,$inout0
    500 	aes${dir}last	$rndkey0,$inout1
    501 	aes${dir}last	$rndkey0,$inout2
    502 	aes${dir}last	$rndkey0,$inout3
    503 	aes${dir}last	$rndkey0,$inout4
    504 	aes${dir}last	$rndkey0,$inout5
    505 	aes${dir}last	$rndkey0,$inout6
    506 	aes${dir}last	$rndkey0,$inout7
    507 	ret
    508 .size	_aesni_${dir}rypt8,.-_aesni_${dir}rypt8
    509 ___
    510 }
    511 &aesni_generate3("enc") if ($PREFIX eq "aesni");
    512 &aesni_generate3("dec");
    513 &aesni_generate4("enc") if ($PREFIX eq "aesni");
    514 &aesni_generate4("dec");
    515 &aesni_generate6("enc") if ($PREFIX eq "aesni");
    516 &aesni_generate6("dec");
    517 &aesni_generate8("enc") if ($PREFIX eq "aesni");
    518 &aesni_generate8("dec");
    519 
    521 if ($PREFIX eq "aesni") {
    522 ########################################################################
    523 # void aesni_ecb_encrypt (const void *in, void *out,
    524 #			  size_t length, const AES_KEY *key,
    525 #			  int enc);
    526 $code.=<<___;
    527 .globl	aesni_ecb_encrypt
    528 .type	aesni_ecb_encrypt,\@function,5
    529 .align	16
    530 aesni_ecb_encrypt:
    531 	and	\$-16,$len
    532 	jz	.Lecb_ret
    533 
    534 	mov	240($key),$rounds	# key->rounds
    535 	$movkey	($key),$rndkey0
    536 	mov	$key,$key_		# backup $key
    537 	mov	$rounds,$rnds_		# backup $rounds
    538 	test	%r8d,%r8d		# 5th argument
    539 	jz	.Lecb_decrypt
    540 #--------------------------- ECB ENCRYPT ------------------------------#
    541 	cmp	\$0x80,$len
    542 	jb	.Lecb_enc_tail
    543 
    544 	movdqu	($inp),$inout0
    545 	movdqu	0x10($inp),$inout1
    546 	movdqu	0x20($inp),$inout2
    547 	movdqu	0x30($inp),$inout3
    548 	movdqu	0x40($inp),$inout4
    549 	movdqu	0x50($inp),$inout5
    550 	movdqu	0x60($inp),$inout6
    551 	movdqu	0x70($inp),$inout7
    552 	lea	0x80($inp),$inp
    553 	sub	\$0x80,$len
    554 	jmp	.Lecb_enc_loop8_enter
    555 .align 16
    556 .Lecb_enc_loop8:
    557 	movups	$inout0,($out)
    558 	mov	$key_,$key		# restore $key
    559 	movdqu	($inp),$inout0
    560 	mov	$rnds_,$rounds		# restore $rounds
    561 	movups	$inout1,0x10($out)
    562 	movdqu	0x10($inp),$inout1
    563 	movups	$inout2,0x20($out)
    564 	movdqu	0x20($inp),$inout2
    565 	movups	$inout3,0x30($out)
    566 	movdqu	0x30($inp),$inout3
    567 	movups	$inout4,0x40($out)
    568 	movdqu	0x40($inp),$inout4
    569 	movups	$inout5,0x50($out)
    570 	movdqu	0x50($inp),$inout5
    571 	movups	$inout6,0x60($out)
    572 	movdqu	0x60($inp),$inout6
    573 	movups	$inout7,0x70($out)
    574 	lea	0x80($out),$out
    575 	movdqu	0x70($inp),$inout7
    576 	lea	0x80($inp),$inp
    577 .Lecb_enc_loop8_enter:
    578 
    579 	call	_aesni_encrypt8
    580 
    581 	sub	\$0x80,$len
    582 	jnc	.Lecb_enc_loop8
    583 
    584 	movups	$inout0,($out)
    585 	mov	$key_,$key		# restore $key
    586 	movups	$inout1,0x10($out)
    587 	mov	$rnds_,$rounds		# restore $rounds
    588 	movups	$inout2,0x20($out)
    589 	movups	$inout3,0x30($out)
    590 	movups	$inout4,0x40($out)
    591 	movups	$inout5,0x50($out)
    592 	movups	$inout6,0x60($out)
    593 	movups	$inout7,0x70($out)
    594 	lea	0x80($out),$out
    595 	add	\$0x80,$len
    596 	jz	.Lecb_ret
    597 
    598 .Lecb_enc_tail:
    599 	movups	($inp),$inout0
    600 	cmp	\$0x20,$len
    601 	jb	.Lecb_enc_one
    602 	movups	0x10($inp),$inout1
    603 	je	.Lecb_enc_two
    604 	movups	0x20($inp),$inout2
    605 	cmp	\$0x40,$len
    606 	jb	.Lecb_enc_three
    607 	movups	0x30($inp),$inout3
    608 	je	.Lecb_enc_four
    609 	movups	0x40($inp),$inout4
    610 	cmp	\$0x60,$len
    611 	jb	.Lecb_enc_five
    612 	movups	0x50($inp),$inout5
    613 	je	.Lecb_enc_six
    614 	movdqu	0x60($inp),$inout6
    615 	call	_aesni_encrypt8
    616 	movups	$inout0,($out)
    617 	movups	$inout1,0x10($out)
    618 	movups	$inout2,0x20($out)
    619 	movups	$inout3,0x30($out)
    620 	movups	$inout4,0x40($out)
    621 	movups	$inout5,0x50($out)
    622 	movups	$inout6,0x60($out)
    623 	jmp	.Lecb_ret
    624 .align	16
    625 .Lecb_enc_one:
    626 ___
    627 	&aesni_generate1("enc",$key,$rounds);
    628 $code.=<<___;
    629 	movups	$inout0,($out)
    630 	jmp	.Lecb_ret
    631 .align	16
    632 .Lecb_enc_two:
    633 	xorps	$inout2,$inout2
    634 	call	_aesni_encrypt3
    635 	movups	$inout0,($out)
    636 	movups	$inout1,0x10($out)
    637 	jmp	.Lecb_ret
    638 .align	16
    639 .Lecb_enc_three:
    640 	call	_aesni_encrypt3
    641 	movups	$inout0,($out)
    642 	movups	$inout1,0x10($out)
    643 	movups	$inout2,0x20($out)
    644 	jmp	.Lecb_ret
    645 .align	16
    646 .Lecb_enc_four:
    647 	call	_aesni_encrypt4
    648 	movups	$inout0,($out)
    649 	movups	$inout1,0x10($out)
    650 	movups	$inout2,0x20($out)
    651 	movups	$inout3,0x30($out)
    652 	jmp	.Lecb_ret
    653 .align	16
    654 .Lecb_enc_five:
    655 	xorps	$inout5,$inout5
    656 	call	_aesni_encrypt6
    657 	movups	$inout0,($out)
    658 	movups	$inout1,0x10($out)
    659 	movups	$inout2,0x20($out)
    660 	movups	$inout3,0x30($out)
    661 	movups	$inout4,0x40($out)
    662 	jmp	.Lecb_ret
    663 .align	16
    664 .Lecb_enc_six:
    665 	call	_aesni_encrypt6
    666 	movups	$inout0,($out)
    667 	movups	$inout1,0x10($out)
    668 	movups	$inout2,0x20($out)
    669 	movups	$inout3,0x30($out)
    670 	movups	$inout4,0x40($out)
    671 	movups	$inout5,0x50($out)
    672 	jmp	.Lecb_ret
    673 #--------------------------- ECB DECRYPT ------------------------------#
    675 .align	16
    676 .Lecb_decrypt:
    677 	cmp	\$0x80,$len
    678 	jb	.Lecb_dec_tail
    679 
    680 	movdqu	($inp),$inout0
    681 	movdqu	0x10($inp),$inout1
    682 	movdqu	0x20($inp),$inout2
    683 	movdqu	0x30($inp),$inout3
    684 	movdqu	0x40($inp),$inout4
    685 	movdqu	0x50($inp),$inout5
    686 	movdqu	0x60($inp),$inout6
    687 	movdqu	0x70($inp),$inout7
    688 	lea	0x80($inp),$inp
    689 	sub	\$0x80,$len
    690 	jmp	.Lecb_dec_loop8_enter
    691 .align 16
    692 .Lecb_dec_loop8:
    693 	movups	$inout0,($out)
    694 	mov	$key_,$key		# restore $key
    695 	movdqu	($inp),$inout0
    696 	mov	$rnds_,$rounds		# restore $rounds
    697 	movups	$inout1,0x10($out)
    698 	movdqu	0x10($inp),$inout1
    699 	movups	$inout2,0x20($out)
    700 	movdqu	0x20($inp),$inout2
    701 	movups	$inout3,0x30($out)
    702 	movdqu	0x30($inp),$inout3
    703 	movups	$inout4,0x40($out)
    704 	movdqu	0x40($inp),$inout4
    705 	movups	$inout5,0x50($out)
    706 	movdqu	0x50($inp),$inout5
    707 	movups	$inout6,0x60($out)
    708 	movdqu	0x60($inp),$inout6
    709 	movups	$inout7,0x70($out)
    710 	lea	0x80($out),$out
    711 	movdqu	0x70($inp),$inout7
    712 	lea	0x80($inp),$inp
    713 .Lecb_dec_loop8_enter:
    714 
    715 	call	_aesni_decrypt8
    716 
    717 	$movkey	($key_),$rndkey0
    718 	sub	\$0x80,$len
    719 	jnc	.Lecb_dec_loop8
    720 
    721 	movups	$inout0,($out)
    722 	mov	$key_,$key		# restore $key
    723 	movups	$inout1,0x10($out)
    724 	mov	$rnds_,$rounds		# restore $rounds
    725 	movups	$inout2,0x20($out)
    726 	movups	$inout3,0x30($out)
    727 	movups	$inout4,0x40($out)
    728 	movups	$inout5,0x50($out)
    729 	movups	$inout6,0x60($out)
    730 	movups	$inout7,0x70($out)
    731 	lea	0x80($out),$out
    732 	add	\$0x80,$len
    733 	jz	.Lecb_ret
    734 
    735 .Lecb_dec_tail:
    736 	movups	($inp),$inout0
    737 	cmp	\$0x20,$len
    738 	jb	.Lecb_dec_one
    739 	movups	0x10($inp),$inout1
    740 	je	.Lecb_dec_two
    741 	movups	0x20($inp),$inout2
    742 	cmp	\$0x40,$len
    743 	jb	.Lecb_dec_three
    744 	movups	0x30($inp),$inout3
    745 	je	.Lecb_dec_four
    746 	movups	0x40($inp),$inout4
    747 	cmp	\$0x60,$len
    748 	jb	.Lecb_dec_five
    749 	movups	0x50($inp),$inout5
    750 	je	.Lecb_dec_six
    751 	movups	0x60($inp),$inout6
    752 	$movkey	($key),$rndkey0
    753 	call	_aesni_decrypt8
    754 	movups	$inout0,($out)
    755 	movups	$inout1,0x10($out)
    756 	movups	$inout2,0x20($out)
    757 	movups	$inout3,0x30($out)
    758 	movups	$inout4,0x40($out)
    759 	movups	$inout5,0x50($out)
    760 	movups	$inout6,0x60($out)
    761 	jmp	.Lecb_ret
    762 .align	16
    763 .Lecb_dec_one:
    764 ___
    765 	&aesni_generate1("dec",$key,$rounds);
    766 $code.=<<___;
    767 	movups	$inout0,($out)
    768 	jmp	.Lecb_ret
    769 .align	16
    770 .Lecb_dec_two:
    771 	xorps	$inout2,$inout2
    772 	call	_aesni_decrypt3
    773 	movups	$inout0,($out)
    774 	movups	$inout1,0x10($out)
    775 	jmp	.Lecb_ret
    776 .align	16
    777 .Lecb_dec_three:
    778 	call	_aesni_decrypt3
    779 	movups	$inout0,($out)
    780 	movups	$inout1,0x10($out)
    781 	movups	$inout2,0x20($out)
    782 	jmp	.Lecb_ret
    783 .align	16
    784 .Lecb_dec_four:
    785 	call	_aesni_decrypt4
    786 	movups	$inout0,($out)
    787 	movups	$inout1,0x10($out)
    788 	movups	$inout2,0x20($out)
    789 	movups	$inout3,0x30($out)
    790 	jmp	.Lecb_ret
    791 .align	16
    792 .Lecb_dec_five:
    793 	xorps	$inout5,$inout5
    794 	call	_aesni_decrypt6
    795 	movups	$inout0,($out)
    796 	movups	$inout1,0x10($out)
    797 	movups	$inout2,0x20($out)
    798 	movups	$inout3,0x30($out)
    799 	movups	$inout4,0x40($out)
    800 	jmp	.Lecb_ret
    801 .align	16
    802 .Lecb_dec_six:
    803 	call	_aesni_decrypt6
    804 	movups	$inout0,($out)
    805 	movups	$inout1,0x10($out)
    806 	movups	$inout2,0x20($out)
    807 	movups	$inout3,0x30($out)
    808 	movups	$inout4,0x40($out)
    809 	movups	$inout5,0x50($out)
    810 
    811 .Lecb_ret:
    812 	ret
    813 .size	aesni_ecb_encrypt,.-aesni_ecb_encrypt
    814 ___
    815 
    817 {
    818 ######################################################################
    819 # void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
    820 #                         size_t blocks, const AES_KEY *key,
    821 #                         const char *ivec,char *cmac);
    822 #
    823 # Handles only complete blocks, operates on 64-bit counter and
    824 # does not update *ivec! Nor does it finalize CMAC value
    825 # (see engine/eng_aesni.c for details)
    826 #
    827 {
    828 my $cmac="%r9";	# 6th argument
    829 
    830 my $increment="%xmm6";
    831 my $bswap_mask="%xmm7";
    832 
    833 $code.=<<___;
    834 .globl	aesni_ccm64_encrypt_blocks
    835 .type	aesni_ccm64_encrypt_blocks,\@function,6
    836 .align	16
    837 aesni_ccm64_encrypt_blocks:
    838 ___
    839 $code.=<<___ if ($win64);
    840 	lea	-0x58(%rsp),%rsp
    841 	movaps	%xmm6,(%rsp)
    842 	movaps	%xmm7,0x10(%rsp)
    843 	movaps	%xmm8,0x20(%rsp)
    844 	movaps	%xmm9,0x30(%rsp)
    845 .Lccm64_enc_body:
    846 ___
    847 $code.=<<___;
    848 	mov	240($key),$rounds		# key->rounds
    849 	movdqu	($ivp),$iv
    850 	movdqa	.Lincrement64(%rip),$increment
    851 	movdqa	.Lbswap_mask(%rip),$bswap_mask
    852 
    853 	shr	\$1,$rounds
    854 	lea	0($key),$key_
    855 	movdqu	($cmac),$inout1
    856 	movdqa	$iv,$inout0
    857 	mov	$rounds,$rnds_
    858 	pshufb	$bswap_mask,$iv
    859 	jmp	.Lccm64_enc_outer
    860 .align	16
    861 .Lccm64_enc_outer:
    862 	$movkey	($key_),$rndkey0
    863 	mov	$rnds_,$rounds
    864 	movups	($inp),$in0			# load inp
    865 
    866 	xorps	$rndkey0,$inout0		# counter
    867 	$movkey	16($key_),$rndkey1
    868 	xorps	$in0,$rndkey0
    869 	lea	32($key_),$key
    870 	xorps	$rndkey0,$inout1		# cmac^=inp
    871 	$movkey	($key),$rndkey0
    872 
    873 .Lccm64_enc2_loop:
    874 	aesenc	$rndkey1,$inout0
    875 	dec	$rounds
    876 	aesenc	$rndkey1,$inout1
    877 	$movkey	16($key),$rndkey1
    878 	aesenc	$rndkey0,$inout0
    879 	lea	32($key),$key
    880 	aesenc	$rndkey0,$inout1
    881 	$movkey	0($key),$rndkey0
    882 	jnz	.Lccm64_enc2_loop
    883 	aesenc	$rndkey1,$inout0
    884 	aesenc	$rndkey1,$inout1
    885 	paddq	$increment,$iv
    886 	aesenclast	$rndkey0,$inout0
    887 	aesenclast	$rndkey0,$inout1
    888 
    889 	dec	$len
    890 	lea	16($inp),$inp
    891 	xorps	$inout0,$in0			# inp ^= E(iv)
    892 	movdqa	$iv,$inout0
    893 	movups	$in0,($out)			# save output
    894 	lea	16($out),$out
    895 	pshufb	$bswap_mask,$inout0
    896 	jnz	.Lccm64_enc_outer
    897 
    898 	movups	$inout1,($cmac)
    899 ___
    900 $code.=<<___ if ($win64);
    901 	movaps	(%rsp),%xmm6
    902 	movaps	0x10(%rsp),%xmm7
    903 	movaps	0x20(%rsp),%xmm8
    904 	movaps	0x30(%rsp),%xmm9
    905 	lea	0x58(%rsp),%rsp
    906 .Lccm64_enc_ret:
    907 ___
    908 $code.=<<___;
    909 	ret
    910 .size	aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
    911 ___
    912 ######################################################################
    913 $code.=<<___;
    914 .globl	aesni_ccm64_decrypt_blocks
    915 .type	aesni_ccm64_decrypt_blocks,\@function,6
    916 .align	16
    917 aesni_ccm64_decrypt_blocks:
    918 ___
    919 $code.=<<___ if ($win64);
    920 	lea	-0x58(%rsp),%rsp
    921 	movaps	%xmm6,(%rsp)
    922 	movaps	%xmm7,0x10(%rsp)
    923 	movaps	%xmm8,0x20(%rsp)
    924 	movaps	%xmm9,0x30(%rsp)
    925 .Lccm64_dec_body:
    926 ___
    927 $code.=<<___;
    928 	mov	240($key),$rounds		# key->rounds
    929 	movups	($ivp),$iv
    930 	movdqu	($cmac),$inout1
    931 	movdqa	.Lincrement64(%rip),$increment
    932 	movdqa	.Lbswap_mask(%rip),$bswap_mask
    933 
    934 	movaps	$iv,$inout0
    935 	mov	$rounds,$rnds_
    936 	mov	$key,$key_
    937 	pshufb	$bswap_mask,$iv
    938 ___
    939 	&aesni_generate1("enc",$key,$rounds);
    940 $code.=<<___;
    941 	movups	($inp),$in0			# load inp
    942 	paddq	$increment,$iv
    943 	lea	16($inp),$inp
    944 	jmp	.Lccm64_dec_outer
    945 .align	16
    946 .Lccm64_dec_outer:
    947 	xorps	$inout0,$in0			# inp ^= E(iv)
    948 	movdqa	$iv,$inout0
    949 	mov	$rnds_,$rounds
    950 	movups	$in0,($out)			# save output
    951 	lea	16($out),$out
    952 	pshufb	$bswap_mask,$inout0
    953 
    954 	sub	\$1,$len
    955 	jz	.Lccm64_dec_break
    956 
    957 	$movkey	($key_),$rndkey0
    958 	shr	\$1,$rounds
    959 	$movkey	16($key_),$rndkey1
    960 	xorps	$rndkey0,$in0
    961 	lea	32($key_),$key
    962 	xorps	$rndkey0,$inout0
    963 	xorps	$in0,$inout1			# cmac^=out
    964 	$movkey	($key),$rndkey0
    965 
    966 .Lccm64_dec2_loop:
    967 	aesenc	$rndkey1,$inout0
    968 	dec	$rounds
    969 	aesenc	$rndkey1,$inout1
    970 	$movkey	16($key),$rndkey1
    971 	aesenc	$rndkey0,$inout0
    972 	lea	32($key),$key
    973 	aesenc	$rndkey0,$inout1
    974 	$movkey	0($key),$rndkey0
    975 	jnz	.Lccm64_dec2_loop
    976 	movups	($inp),$in0			# load inp
    977 	paddq	$increment,$iv
    978 	aesenc	$rndkey1,$inout0
    979 	aesenc	$rndkey1,$inout1
    980 	lea	16($inp),$inp
    981 	aesenclast	$rndkey0,$inout0
    982 	aesenclast	$rndkey0,$inout1
    983 	jmp	.Lccm64_dec_outer
    984 
    985 .align	16
    986 .Lccm64_dec_break:
    987 	#xorps	$in0,$inout1			# cmac^=out
    988 ___
    989 	&aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
    990 $code.=<<___;
    991 	movups	$inout1,($cmac)
    992 ___
    993 $code.=<<___ if ($win64);
    994 	movaps	(%rsp),%xmm6
    995 	movaps	0x10(%rsp),%xmm7
    996 	movaps	0x20(%rsp),%xmm8
    997 	movaps	0x30(%rsp),%xmm9
    998 	lea	0x58(%rsp),%rsp
    999 .Lccm64_dec_ret:
   1000 ___
   1001 $code.=<<___;
   1002 	ret
   1003 .size	aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
   1004 ___
   1005 }
   1007 ######################################################################
   1008 # void aesni_ctr32_encrypt_blocks (const void *in, void *out,
   1009 #                         size_t blocks, const AES_KEY *key,
   1010 #                         const char *ivec);
   1011 #
   1012 # Handles only complete blocks, operates on 32-bit counter and
   1013 # does not update *ivec! (see engine/eng_aesni.c for details)
   1014 #
   1015 {
   1016 my $reserved = $win64?0:-0x28;
   1017 my ($in0,$in1,$in2,$in3)=map("%xmm$_",(8..11));
   1018 my ($iv0,$iv1,$ivec)=("%xmm12","%xmm13","%xmm14");
   1019 my $bswap_mask="%xmm15";
   1020 
   1021 $code.=<<___;
   1022 .globl	aesni_ctr32_encrypt_blocks
   1023 .type	aesni_ctr32_encrypt_blocks,\@function,5
   1024 .align	16
   1025 aesni_ctr32_encrypt_blocks:
   1026 ___
   1027 $code.=<<___ if ($win64);
   1028 	lea	-0xc8(%rsp),%rsp
   1029 	movaps	%xmm6,0x20(%rsp)
   1030 	movaps	%xmm7,0x30(%rsp)
   1031 	movaps	%xmm8,0x40(%rsp)
   1032 	movaps	%xmm9,0x50(%rsp)
   1033 	movaps	%xmm10,0x60(%rsp)
   1034 	movaps	%xmm11,0x70(%rsp)
   1035 	movaps	%xmm12,0x80(%rsp)
   1036 	movaps	%xmm13,0x90(%rsp)
   1037 	movaps	%xmm14,0xa0(%rsp)
   1038 	movaps	%xmm15,0xb0(%rsp)
   1039 .Lctr32_body:
   1040 ___
   1041 $code.=<<___;
   1042 	cmp	\$1,$len
   1043 	je	.Lctr32_one_shortcut
   1044 
   1045 	movdqu	($ivp),$ivec
   1046 	movdqa	.Lbswap_mask(%rip),$bswap_mask
   1047 	xor	$rounds,$rounds
   1048 	pextrd	\$3,$ivec,$rnds_		# pull 32-bit counter
   1049 	pinsrd	\$3,$rounds,$ivec		# wipe 32-bit counter
   1050 
   1051 	mov	240($key),$rounds		# key->rounds
   1052 	bswap	$rnds_
   1053 	pxor	$iv0,$iv0			# vector of 3 32-bit counters
   1054 	pxor	$iv1,$iv1			# vector of 3 32-bit counters
   1055 	pinsrd	\$0,$rnds_,$iv0
   1056 	lea	3($rnds_),$key_
   1057 	pinsrd	\$0,$key_,$iv1
   1058 	inc	$rnds_
   1059 	pinsrd	\$1,$rnds_,$iv0
   1060 	inc	$key_
   1061 	pinsrd	\$1,$key_,$iv1
   1062 	inc	$rnds_
   1063 	pinsrd	\$2,$rnds_,$iv0
   1064 	inc	$key_
   1065 	pinsrd	\$2,$key_,$iv1
   1066 	movdqa	$iv0,$reserved(%rsp)
   1067 	pshufb	$bswap_mask,$iv0
   1068 	movdqa	$iv1,`$reserved+0x10`(%rsp)
   1069 	pshufb	$bswap_mask,$iv1
   1070 
   1071 	pshufd	\$`3<<6`,$iv0,$inout0		# place counter to upper dword
   1072 	pshufd	\$`2<<6`,$iv0,$inout1
   1073 	pshufd	\$`1<<6`,$iv0,$inout2
   1074 	cmp	\$6,$len
   1075 	jb	.Lctr32_tail
   1076 	shr	\$1,$rounds
   1077 	mov	$key,$key_			# backup $key
   1078 	mov	$rounds,$rnds_			# backup $rounds
   1079 	sub	\$6,$len
   1080 	jmp	.Lctr32_loop6
   1081 
   1082 .align	16
   1083 .Lctr32_loop6:
   1084 	pshufd	\$`3<<6`,$iv1,$inout3
   1085 	por	$ivec,$inout0			# merge counter-less ivec
   1086 	 $movkey	($key_),$rndkey0
   1087 	pshufd	\$`2<<6`,$iv1,$inout4
   1088 	por	$ivec,$inout1
   1089 	 $movkey	16($key_),$rndkey1
   1090 	pshufd	\$`1<<6`,$iv1,$inout5
   1091 	por	$ivec,$inout2
   1092 	por	$ivec,$inout3
   1093 	 xorps		$rndkey0,$inout0
   1094 	por	$ivec,$inout4
   1095 	por	$ivec,$inout5
   1096 
   1097 	# inline _aesni_encrypt6 and interleave last rounds
   1098 	# with own code...
   1099 
   1100 	pxor		$rndkey0,$inout1
   1101 	aesenc		$rndkey1,$inout0
   1102 	lea		32($key_),$key
   1103 	pxor		$rndkey0,$inout2
   1104 	aesenc		$rndkey1,$inout1
   1105 	 movdqa		.Lincrement32(%rip),$iv1
   1106 	pxor		$rndkey0,$inout3
   1107 	aesenc		$rndkey1,$inout2
   1108 	 movdqa		$reserved(%rsp),$iv0
   1109 	pxor		$rndkey0,$inout4
   1110 	aesenc		$rndkey1,$inout3
   1111 	pxor		$rndkey0,$inout5
   1112 	$movkey		($key),$rndkey0
   1113 	dec		$rounds
   1114 	aesenc		$rndkey1,$inout4
   1115 	aesenc		$rndkey1,$inout5
   1116 	jmp		.Lctr32_enc_loop6_enter
   1117 .align	16
   1118 .Lctr32_enc_loop6:
   1119 	aesenc		$rndkey1,$inout0
   1120 	aesenc		$rndkey1,$inout1
   1121 	dec		$rounds
   1122 	aesenc		$rndkey1,$inout2
   1123 	aesenc		$rndkey1,$inout3
   1124 	aesenc		$rndkey1,$inout4
   1125 	aesenc		$rndkey1,$inout5
   1126 .Lctr32_enc_loop6_enter:
   1127 	$movkey		16($key),$rndkey1
   1128 	aesenc		$rndkey0,$inout0
   1129 	aesenc		$rndkey0,$inout1
   1130 	lea		32($key),$key
   1131 	aesenc		$rndkey0,$inout2
   1132 	aesenc		$rndkey0,$inout3
   1133 	aesenc		$rndkey0,$inout4
   1134 	aesenc		$rndkey0,$inout5
   1135 	$movkey		($key),$rndkey0
   1136 	jnz		.Lctr32_enc_loop6
   1137 
   1138 	aesenc		$rndkey1,$inout0
   1139 	 paddd		$iv1,$iv0		# increment counter vector
   1140 	aesenc		$rndkey1,$inout1
   1141 	 paddd		`$reserved+0x10`(%rsp),$iv1
   1142 	aesenc		$rndkey1,$inout2
   1143 	 movdqa		$iv0,$reserved(%rsp)	# save counter vector
   1144 	aesenc		$rndkey1,$inout3
   1145 	 movdqa		$iv1,`$reserved+0x10`(%rsp)
   1146 	aesenc		$rndkey1,$inout4
   1147 	 pshufb		$bswap_mask,$iv0	# byte swap
   1148 	aesenc		$rndkey1,$inout5
   1149 	 pshufb		$bswap_mask,$iv1
   1150 
   1151 	aesenclast	$rndkey0,$inout0
   1152 	 movups		($inp),$in0		# load input
   1153 	aesenclast	$rndkey0,$inout1
   1154 	 movups		0x10($inp),$in1
   1155 	aesenclast	$rndkey0,$inout2
   1156 	 movups		0x20($inp),$in2
   1157 	aesenclast	$rndkey0,$inout3
   1158 	 movups		0x30($inp),$in3
   1159 	aesenclast	$rndkey0,$inout4
   1160 	 movups		0x40($inp),$rndkey1
   1161 	aesenclast	$rndkey0,$inout5
   1162 	 movups		0x50($inp),$rndkey0
   1163 	 lea	0x60($inp),$inp
   1164 
   1165 	xorps	$inout0,$in0			# xor
   1166 	 pshufd	\$`3<<6`,$iv0,$inout0
   1167 	xorps	$inout1,$in1
   1168 	 pshufd	\$`2<<6`,$iv0,$inout1
   1169 	movups	$in0,($out)			# store output
   1170 	xorps	$inout2,$in2
   1171 	 pshufd	\$`1<<6`,$iv0,$inout2
   1172 	movups	$in1,0x10($out)
   1173 	xorps	$inout3,$in3
   1174 	movups	$in2,0x20($out)
   1175 	xorps	$inout4,$rndkey1
   1176 	movups	$in3,0x30($out)
   1177 	xorps	$inout5,$rndkey0
   1178 	movups	$rndkey1,0x40($out)
   1179 	movups	$rndkey0,0x50($out)
   1180 	lea	0x60($out),$out
   1181 	mov	$rnds_,$rounds
   1182 	sub	\$6,$len
   1183 	jnc	.Lctr32_loop6
   1184 
   1185 	add	\$6,$len
   1186 	jz	.Lctr32_done
   1187 	mov	$key_,$key			# restore $key
   1188 	lea	1($rounds,$rounds),$rounds	# restore original value
   1189 
   1190 .Lctr32_tail:
   1191 	por	$ivec,$inout0
   1192 	movups	($inp),$in0
   1193 	cmp	\$2,$len
   1194 	jb	.Lctr32_one
   1195 
   1196 	por	$ivec,$inout1
   1197 	movups	0x10($inp),$in1
   1198 	je	.Lctr32_two
   1199 
   1200 	pshufd	\$`3<<6`,$iv1,$inout3
   1201 	por	$ivec,$inout2
   1202 	movups	0x20($inp),$in2
   1203 	cmp	\$4,$len
   1204 	jb	.Lctr32_three
   1205 
   1206 	pshufd	\$`2<<6`,$iv1,$inout4
   1207 	por	$ivec,$inout3
   1208 	movups	0x30($inp),$in3
   1209 	je	.Lctr32_four
   1210 
   1211 	por	$ivec,$inout4
   1212 	xorps	$inout5,$inout5
   1213 
   1214 	call	_aesni_encrypt6
   1215 
   1216 	movups	0x40($inp),$rndkey1
   1217 	xorps	$inout0,$in0
   1218 	xorps	$inout1,$in1
   1219 	movups	$in0,($out)
   1220 	xorps	$inout2,$in2
   1221 	movups	$in1,0x10($out)
   1222 	xorps	$inout3,$in3
   1223 	movups	$in2,0x20($out)
   1224 	xorps	$inout4,$rndkey1
   1225 	movups	$in3,0x30($out)
   1226 	movups	$rndkey1,0x40($out)
   1227 	jmp	.Lctr32_done
   1228 
   1229 .align	16
   1230 .Lctr32_one_shortcut:
   1231 	movups	($ivp),$inout0
   1232 	movups	($inp),$in0
   1233 	mov	240($key),$rounds		# key->rounds
   1234 .Lctr32_one:
   1235 ___
   1236 	&aesni_generate1("enc",$key,$rounds);
   1237 $code.=<<___;
   1238 	xorps	$inout0,$in0
   1239 	movups	$in0,($out)
   1240 	jmp	.Lctr32_done
   1241 
   1242 .align	16
   1243 .Lctr32_two:
   1244 	xorps	$inout2,$inout2
   1245 	call	_aesni_encrypt3
   1246 	xorps	$inout0,$in0
   1247 	xorps	$inout1,$in1
   1248 	movups	$in0,($out)
   1249 	movups	$in1,0x10($out)
   1250 	jmp	.Lctr32_done
   1251 
   1252 .align	16
   1253 .Lctr32_three:
   1254 	call	_aesni_encrypt3
   1255 	xorps	$inout0,$in0
   1256 	xorps	$inout1,$in1
   1257 	movups	$in0,($out)
   1258 	xorps	$inout2,$in2
   1259 	movups	$in1,0x10($out)
   1260 	movups	$in2,0x20($out)
   1261 	jmp	.Lctr32_done
   1262 
   1263 .align	16
   1264 .Lctr32_four:
   1265 	call	_aesni_encrypt4
   1266 	xorps	$inout0,$in0
   1267 	xorps	$inout1,$in1
   1268 	movups	$in0,($out)
   1269 	xorps	$inout2,$in2
   1270 	movups	$in1,0x10($out)
   1271 	xorps	$inout3,$in3
   1272 	movups	$in2,0x20($out)
   1273 	movups	$in3,0x30($out)
   1274 
   1275 .Lctr32_done:
   1276 ___
   1277 $code.=<<___ if ($win64);
   1278 	movaps	0x20(%rsp),%xmm6
   1279 	movaps	0x30(%rsp),%xmm7
   1280 	movaps	0x40(%rsp),%xmm8
   1281 	movaps	0x50(%rsp),%xmm9
   1282 	movaps	0x60(%rsp),%xmm10
   1283 	movaps	0x70(%rsp),%xmm11
   1284 	movaps	0x80(%rsp),%xmm12
   1285 	movaps	0x90(%rsp),%xmm13
   1286 	movaps	0xa0(%rsp),%xmm14
   1287 	movaps	0xb0(%rsp),%xmm15
   1288 	lea	0xc8(%rsp),%rsp
   1289 .Lctr32_ret:
   1290 ___
   1291 $code.=<<___;
   1292 	ret
   1293 .size	aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
   1294 ___
   1295 }
   1296 
   1298 ######################################################################
   1299 # void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
   1300 #	const AES_KEY *key1, const AES_KEY *key2
   1301 #	const unsigned char iv[16]);
   1302 #
   1303 {
   1304 my @tweak=map("%xmm$_",(10..15));
   1305 my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
   1306 my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
   1307 my $frame_size = 0x68 + ($win64?160:0);
   1308 
   1309 $code.=<<___;
   1310 .globl	aesni_xts_encrypt
   1311 .type	aesni_xts_encrypt,\@function,6
   1312 .align	16
   1313 aesni_xts_encrypt:
   1314 	lea	-$frame_size(%rsp),%rsp
   1315 ___
   1316 $code.=<<___ if ($win64);
   1317 	movaps	%xmm6,0x60(%rsp)
   1318 	movaps	%xmm7,0x70(%rsp)
   1319 	movaps	%xmm8,0x80(%rsp)
   1320 	movaps	%xmm9,0x90(%rsp)
   1321 	movaps	%xmm10,0xa0(%rsp)
   1322 	movaps	%xmm11,0xb0(%rsp)
   1323 	movaps	%xmm12,0xc0(%rsp)
   1324 	movaps	%xmm13,0xd0(%rsp)
   1325 	movaps	%xmm14,0xe0(%rsp)
   1326 	movaps	%xmm15,0xf0(%rsp)
   1327 .Lxts_enc_body:
   1328 ___
   1329 $code.=<<___;
   1330 	movups	($ivp),@tweak[5]		# load clear-text tweak
   1331 	mov	240(%r8),$rounds		# key2->rounds
   1332 	mov	240($key),$rnds_		# key1->rounds
   1333 ___
   1334 	# generate the tweak
   1335 	&aesni_generate1("enc",$key2,$rounds,@tweak[5]);
   1336 $code.=<<___;
   1337 	mov	$key,$key_			# backup $key
   1338 	mov	$rnds_,$rounds			# backup $rounds
   1339 	mov	$len,$len_			# backup $len
   1340 	and	\$-16,$len
   1341 
   1342 	movdqa	.Lxts_magic(%rip),$twmask
   1343 	pxor	$twtmp,$twtmp
   1344 	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
   1345 ___
   1346     for ($i=0;$i<4;$i++) {
   1347     $code.=<<___;
   1348 	pshufd	\$0x13,$twtmp,$twres
   1349 	pxor	$twtmp,$twtmp
   1350 	movdqa	@tweak[5],@tweak[$i]
   1351 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1352 	pand	$twmask,$twres			# isolate carry and residue
   1353 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1354 	pxor	$twres,@tweak[5]
   1355 ___
   1356     }
   1357 $code.=<<___;
   1358 	sub	\$16*6,$len
   1359 	jc	.Lxts_enc_short
   1360 
   1361 	shr	\$1,$rounds
   1362 	sub	\$1,$rounds
   1363 	mov	$rounds,$rnds_
   1364 	jmp	.Lxts_enc_grandloop
   1365 
   1366 .align	16
   1367 .Lxts_enc_grandloop:
   1368 	pshufd	\$0x13,$twtmp,$twres
   1369 	movdqa	@tweak[5],@tweak[4]
   1370 	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
   1371 	movdqu	`16*0`($inp),$inout0		# load input
   1372 	pand	$twmask,$twres			# isolate carry and residue
   1373 	movdqu	`16*1`($inp),$inout1
   1374 	pxor	$twres,@tweak[5]
   1375 
   1376 	movdqu	`16*2`($inp),$inout2
   1377 	pxor	@tweak[0],$inout0		# input^=tweak
   1378 	movdqu	`16*3`($inp),$inout3
   1379 	pxor	@tweak[1],$inout1
   1380 	movdqu	`16*4`($inp),$inout4
   1381 	pxor	@tweak[2],$inout2
   1382 	movdqu	`16*5`($inp),$inout5
   1383 	lea	`16*6`($inp),$inp
   1384 	pxor	@tweak[3],$inout3
   1385 	$movkey		($key_),$rndkey0
   1386 	pxor	@tweak[4],$inout4
   1387 	pxor	@tweak[5],$inout5
   1388 
   1389 	# inline _aesni_encrypt6 and interleave first and last rounds
   1390 	# with own code...
   1391 	$movkey		16($key_),$rndkey1
   1392 	pxor		$rndkey0,$inout0
   1393 	pxor		$rndkey0,$inout1
   1394 	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks
   1395 	aesenc		$rndkey1,$inout0
   1396 	lea		32($key_),$key
   1397 	pxor		$rndkey0,$inout2
   1398 	 movdqa	@tweak[1],`16*1`(%rsp)
   1399 	aesenc		$rndkey1,$inout1
   1400 	pxor		$rndkey0,$inout3
   1401 	 movdqa	@tweak[2],`16*2`(%rsp)
   1402 	aesenc		$rndkey1,$inout2
   1403 	pxor		$rndkey0,$inout4
   1404 	 movdqa	@tweak[3],`16*3`(%rsp)
   1405 	aesenc		$rndkey1,$inout3
   1406 	pxor		$rndkey0,$inout5
   1407 	$movkey		($key),$rndkey0
   1408 	dec		$rounds
   1409 	 movdqa	@tweak[4],`16*4`(%rsp)
   1410 	aesenc		$rndkey1,$inout4
   1411 	 movdqa	@tweak[5],`16*5`(%rsp)
   1412 	aesenc		$rndkey1,$inout5
   1413 	pxor	$twtmp,$twtmp
   1414 	pcmpgtd	@tweak[5],$twtmp
   1415 	jmp		.Lxts_enc_loop6_enter
   1416 
   1417 .align	16
   1418 .Lxts_enc_loop6:
   1419 	aesenc		$rndkey1,$inout0
   1420 	aesenc		$rndkey1,$inout1
   1421 	dec		$rounds
   1422 	aesenc		$rndkey1,$inout2
   1423 	aesenc		$rndkey1,$inout3
   1424 	aesenc		$rndkey1,$inout4
   1425 	aesenc		$rndkey1,$inout5
   1426 .Lxts_enc_loop6_enter:
   1427 	$movkey		16($key),$rndkey1
   1428 	aesenc		$rndkey0,$inout0
   1429 	aesenc		$rndkey0,$inout1
   1430 	lea		32($key),$key
   1431 	aesenc		$rndkey0,$inout2
   1432 	aesenc		$rndkey0,$inout3
   1433 	aesenc		$rndkey0,$inout4
   1434 	aesenc		$rndkey0,$inout5
   1435 	$movkey		($key),$rndkey0
   1436 	jnz		.Lxts_enc_loop6
   1437 
   1438 	pshufd	\$0x13,$twtmp,$twres
   1439 	pxor	$twtmp,$twtmp
   1440 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1441 	 aesenc		$rndkey1,$inout0
   1442 	pand	$twmask,$twres			# isolate carry and residue
   1443 	 aesenc		$rndkey1,$inout1
   1444 	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
   1445 	 aesenc		$rndkey1,$inout2
   1446 	pxor	$twres,@tweak[5]
   1447 	 aesenc		$rndkey1,$inout3
   1448 	 aesenc		$rndkey1,$inout4
   1449 	 aesenc		$rndkey1,$inout5
   1450 	 $movkey	16($key),$rndkey1
   1451 
   1452 	pshufd	\$0x13,$twtmp,$twres
   1453 	pxor	$twtmp,$twtmp
   1454 	movdqa	@tweak[5],@tweak[0]
   1455 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1456 	 aesenc		$rndkey0,$inout0
   1457 	pand	$twmask,$twres			# isolate carry and residue
   1458 	 aesenc		$rndkey0,$inout1
   1459 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1460 	 aesenc		$rndkey0,$inout2
   1461 	pxor	$twres,@tweak[5]
   1462 	 aesenc		$rndkey0,$inout3
   1463 	 aesenc		$rndkey0,$inout4
   1464 	 aesenc		$rndkey0,$inout5
   1465 	 $movkey	32($key),$rndkey0
   1466 
   1467 	pshufd	\$0x13,$twtmp,$twres
   1468 	pxor	$twtmp,$twtmp
   1469 	movdqa	@tweak[5],@tweak[1]
   1470 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1471 	 aesenc		$rndkey1,$inout0
   1472 	pand	$twmask,$twres			# isolate carry and residue
   1473 	 aesenc		$rndkey1,$inout1
   1474 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1475 	 aesenc		$rndkey1,$inout2
   1476 	pxor	$twres,@tweak[5]
   1477 	 aesenc		$rndkey1,$inout3
   1478 	 aesenc		$rndkey1,$inout4
   1479 	 aesenc		$rndkey1,$inout5
   1480 
   1481 	pshufd	\$0x13,$twtmp,$twres
   1482 	pxor	$twtmp,$twtmp
   1483 	movdqa	@tweak[5],@tweak[2]
   1484 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1485 	 aesenclast	$rndkey0,$inout0
   1486 	pand	$twmask,$twres			# isolate carry and residue
   1487 	 aesenclast	$rndkey0,$inout1
   1488 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1489 	 aesenclast	$rndkey0,$inout2
   1490 	pxor	$twres,@tweak[5]
   1491 	 aesenclast	$rndkey0,$inout3
   1492 	 aesenclast	$rndkey0,$inout4
   1493 	 aesenclast	$rndkey0,$inout5
   1494 
   1495 	pshufd	\$0x13,$twtmp,$twres
   1496 	pxor	$twtmp,$twtmp
   1497 	movdqa	@tweak[5],@tweak[3]
   1498 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1499 	 xorps	`16*0`(%rsp),$inout0		# output^=tweak
   1500 	pand	$twmask,$twres			# isolate carry and residue
   1501 	 xorps	`16*1`(%rsp),$inout1
   1502 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1503 	pxor	$twres,@tweak[5]
   1504 
   1505 	xorps	`16*2`(%rsp),$inout2
   1506 	movups	$inout0,`16*0`($out)		# write output
   1507 	xorps	`16*3`(%rsp),$inout3
   1508 	movups	$inout1,`16*1`($out)
   1509 	xorps	`16*4`(%rsp),$inout4
   1510 	movups	$inout2,`16*2`($out)
   1511 	xorps	`16*5`(%rsp),$inout5
   1512 	movups	$inout3,`16*3`($out)
   1513 	mov	$rnds_,$rounds			# restore $rounds
   1514 	movups	$inout4,`16*4`($out)
   1515 	movups	$inout5,`16*5`($out)
   1516 	lea	`16*6`($out),$out
   1517 	sub	\$16*6,$len
   1518 	jnc	.Lxts_enc_grandloop
   1519 
   1520 	lea	3($rounds,$rounds),$rounds	# restore original value
   1521 	mov	$key_,$key			# restore $key
   1522 	mov	$rounds,$rnds_			# backup $rounds
   1523 
   1524 .Lxts_enc_short:
   1525 	add	\$16*6,$len
   1526 	jz	.Lxts_enc_done
   1527 
   1528 	cmp	\$0x20,$len
   1529 	jb	.Lxts_enc_one
   1530 	je	.Lxts_enc_two
   1531 
   1532 	cmp	\$0x40,$len
   1533 	jb	.Lxts_enc_three
   1534 	je	.Lxts_enc_four
   1535 
   1536 	pshufd	\$0x13,$twtmp,$twres
   1537 	movdqa	@tweak[5],@tweak[4]
   1538 	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
   1539 	 movdqu	($inp),$inout0
   1540 	pand	$twmask,$twres			# isolate carry and residue
   1541 	 movdqu	16*1($inp),$inout1
   1542 	pxor	$twres,@tweak[5]
   1543 
   1544 	movdqu	16*2($inp),$inout2
   1545 	pxor	@tweak[0],$inout0
   1546 	movdqu	16*3($inp),$inout3
   1547 	pxor	@tweak[1],$inout1
   1548 	movdqu	16*4($inp),$inout4
   1549 	lea	16*5($inp),$inp
   1550 	pxor	@tweak[2],$inout2
   1551 	pxor	@tweak[3],$inout3
   1552 	pxor	@tweak[4],$inout4
   1553 
   1554 	call	_aesni_encrypt6
   1555 
   1556 	xorps	@tweak[0],$inout0
   1557 	movdqa	@tweak[5],@tweak[0]
   1558 	xorps	@tweak[1],$inout1
   1559 	xorps	@tweak[2],$inout2
   1560 	movdqu	$inout0,($out)
   1561 	xorps	@tweak[3],$inout3
   1562 	movdqu	$inout1,16*1($out)
   1563 	xorps	@tweak[4],$inout4
   1564 	movdqu	$inout2,16*2($out)
   1565 	movdqu	$inout3,16*3($out)
   1566 	movdqu	$inout4,16*4($out)
   1567 	lea	16*5($out),$out
   1568 	jmp	.Lxts_enc_done
   1569 
   1570 .align	16
   1571 .Lxts_enc_one:
   1572 	movups	($inp),$inout0
   1573 	lea	16*1($inp),$inp
   1574 	xorps	@tweak[0],$inout0
   1575 ___
   1576 	&aesni_generate1("enc",$key,$rounds);
   1577 $code.=<<___;
   1578 	xorps	@tweak[0],$inout0
   1579 	movdqa	@tweak[1],@tweak[0]
   1580 	movups	$inout0,($out)
   1581 	lea	16*1($out),$out
   1582 	jmp	.Lxts_enc_done
   1583 
   1584 .align	16
   1585 .Lxts_enc_two:
   1586 	movups	($inp),$inout0
   1587 	movups	16($inp),$inout1
   1588 	lea	32($inp),$inp
   1589 	xorps	@tweak[0],$inout0
   1590 	xorps	@tweak[1],$inout1
   1591 
   1592 	call	_aesni_encrypt3
   1593 
   1594 	xorps	@tweak[0],$inout0
   1595 	movdqa	@tweak[2],@tweak[0]
   1596 	xorps	@tweak[1],$inout1
   1597 	movups	$inout0,($out)
   1598 	movups	$inout1,16*1($out)
   1599 	lea	16*2($out),$out
   1600 	jmp	.Lxts_enc_done
   1601 
   1602 .align	16
   1603 .Lxts_enc_three:
   1604 	movups	($inp),$inout0
   1605 	movups	16*1($inp),$inout1
   1606 	movups	16*2($inp),$inout2
   1607 	lea	16*3($inp),$inp
   1608 	xorps	@tweak[0],$inout0
   1609 	xorps	@tweak[1],$inout1
   1610 	xorps	@tweak[2],$inout2
   1611 
   1612 	call	_aesni_encrypt3
   1613 
   1614 	xorps	@tweak[0],$inout0
   1615 	movdqa	@tweak[3],@tweak[0]
   1616 	xorps	@tweak[1],$inout1
   1617 	xorps	@tweak[2],$inout2
   1618 	movups	$inout0,($out)
   1619 	movups	$inout1,16*1($out)
   1620 	movups	$inout2,16*2($out)
   1621 	lea	16*3($out),$out
   1622 	jmp	.Lxts_enc_done
   1623 
   1624 .align	16
   1625 .Lxts_enc_four:
   1626 	movups	($inp),$inout0
   1627 	movups	16*1($inp),$inout1
   1628 	movups	16*2($inp),$inout2
   1629 	xorps	@tweak[0],$inout0
   1630 	movups	16*3($inp),$inout3
   1631 	lea	16*4($inp),$inp
   1632 	xorps	@tweak[1],$inout1
   1633 	xorps	@tweak[2],$inout2
   1634 	xorps	@tweak[3],$inout3
   1635 
   1636 	call	_aesni_encrypt4
   1637 
   1638 	xorps	@tweak[0],$inout0
   1639 	movdqa	@tweak[5],@tweak[0]
   1640 	xorps	@tweak[1],$inout1
   1641 	xorps	@tweak[2],$inout2
   1642 	movups	$inout0,($out)
   1643 	xorps	@tweak[3],$inout3
   1644 	movups	$inout1,16*1($out)
   1645 	movups	$inout2,16*2($out)
   1646 	movups	$inout3,16*3($out)
   1647 	lea	16*4($out),$out
   1648 	jmp	.Lxts_enc_done
   1649 
   1650 .align	16
   1651 .Lxts_enc_done:
   1652 	and	\$15,$len_
   1653 	jz	.Lxts_enc_ret
   1654 	mov	$len_,$len
   1655 
   1656 .Lxts_enc_steal:
   1657 	movzb	($inp),%eax			# borrow $rounds ...
   1658 	movzb	-16($out),%ecx			# ... and $key
   1659 	lea	1($inp),$inp
   1660 	mov	%al,-16($out)
   1661 	mov	%cl,0($out)
   1662 	lea	1($out),$out
   1663 	sub	\$1,$len
   1664 	jnz	.Lxts_enc_steal
   1665 
   1666 	sub	$len_,$out			# rewind $out
   1667 	mov	$key_,$key			# restore $key
   1668 	mov	$rnds_,$rounds			# restore $rounds
   1669 
   1670 	movups	-16($out),$inout0
   1671 	xorps	@tweak[0],$inout0
   1672 ___
   1673 	&aesni_generate1("enc",$key,$rounds);
   1674 $code.=<<___;
   1675 	xorps	@tweak[0],$inout0
   1676 	movups	$inout0,-16($out)
   1677 
   1678 .Lxts_enc_ret:
   1679 ___
   1680 $code.=<<___ if ($win64);
   1681 	movaps	0x60(%rsp),%xmm6
   1682 	movaps	0x70(%rsp),%xmm7
   1683 	movaps	0x80(%rsp),%xmm8
   1684 	movaps	0x90(%rsp),%xmm9
   1685 	movaps	0xa0(%rsp),%xmm10
   1686 	movaps	0xb0(%rsp),%xmm11
   1687 	movaps	0xc0(%rsp),%xmm12
   1688 	movaps	0xd0(%rsp),%xmm13
   1689 	movaps	0xe0(%rsp),%xmm14
   1690 	movaps	0xf0(%rsp),%xmm15
   1691 ___
   1692 $code.=<<___;
   1693 	lea	$frame_size(%rsp),%rsp
   1694 .Lxts_enc_epilogue:
   1695 	ret
   1696 .size	aesni_xts_encrypt,.-aesni_xts_encrypt
   1697 ___
   1698 
   1699 $code.=<<___;
   1700 .globl	aesni_xts_decrypt
   1701 .type	aesni_xts_decrypt,\@function,6
   1702 .align	16
   1703 aesni_xts_decrypt:
   1704 	lea	-$frame_size(%rsp),%rsp
   1705 ___
   1706 $code.=<<___ if ($win64);
   1707 	movaps	%xmm6,0x60(%rsp)
   1708 	movaps	%xmm7,0x70(%rsp)
   1709 	movaps	%xmm8,0x80(%rsp)
   1710 	movaps	%xmm9,0x90(%rsp)
   1711 	movaps	%xmm10,0xa0(%rsp)
   1712 	movaps	%xmm11,0xb0(%rsp)
   1713 	movaps	%xmm12,0xc0(%rsp)
   1714 	movaps	%xmm13,0xd0(%rsp)
   1715 	movaps	%xmm14,0xe0(%rsp)
   1716 	movaps	%xmm15,0xf0(%rsp)
   1717 .Lxts_dec_body:
   1718 ___
   1719 $code.=<<___;
   1720 	movups	($ivp),@tweak[5]		# load clear-text tweak
   1721 	mov	240($key2),$rounds		# key2->rounds
   1722 	mov	240($key),$rnds_		# key1->rounds
   1723 ___
   1724 	# generate the tweak
   1725 	&aesni_generate1("enc",$key2,$rounds,@tweak[5]);
   1726 $code.=<<___;
   1727 	xor	%eax,%eax			# if ($len%16) len-=16;
   1728 	test	\$15,$len
   1729 	setnz	%al
   1730 	shl	\$4,%rax
   1731 	sub	%rax,$len
   1732 
   1733 	mov	$key,$key_			# backup $key
   1734 	mov	$rnds_,$rounds			# backup $rounds
   1735 	mov	$len,$len_			# backup $len
   1736 	and	\$-16,$len
   1737 
   1738 	movdqa	.Lxts_magic(%rip),$twmask
   1739 	pxor	$twtmp,$twtmp
   1740 	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
   1741 ___
   1742     for ($i=0;$i<4;$i++) {
   1743     $code.=<<___;
   1744 	pshufd	\$0x13,$twtmp,$twres
   1745 	pxor	$twtmp,$twtmp
   1746 	movdqa	@tweak[5],@tweak[$i]
   1747 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1748 	pand	$twmask,$twres			# isolate carry and residue
   1749 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1750 	pxor	$twres,@tweak[5]
   1751 ___
   1752     }
   1753 $code.=<<___;
   1754 	sub	\$16*6,$len
   1755 	jc	.Lxts_dec_short
   1756 
   1757 	shr	\$1,$rounds
   1758 	sub	\$1,$rounds
   1759 	mov	$rounds,$rnds_
   1760 	jmp	.Lxts_dec_grandloop
   1761 
   1762 .align	16
   1763 .Lxts_dec_grandloop:
   1764 	pshufd	\$0x13,$twtmp,$twres
   1765 	movdqa	@tweak[5],@tweak[4]
   1766 	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
   1767 	movdqu	`16*0`($inp),$inout0		# load input
   1768 	pand	$twmask,$twres			# isolate carry and residue
   1769 	movdqu	`16*1`($inp),$inout1
   1770 	pxor	$twres,@tweak[5]
   1771 
   1772 	movdqu	`16*2`($inp),$inout2
   1773 	pxor	@tweak[0],$inout0		# input^=tweak
   1774 	movdqu	`16*3`($inp),$inout3
   1775 	pxor	@tweak[1],$inout1
   1776 	movdqu	`16*4`($inp),$inout4
   1777 	pxor	@tweak[2],$inout2
   1778 	movdqu	`16*5`($inp),$inout5
   1779 	lea	`16*6`($inp),$inp
   1780 	pxor	@tweak[3],$inout3
   1781 	$movkey		($key_),$rndkey0
   1782 	pxor	@tweak[4],$inout4
   1783 	pxor	@tweak[5],$inout5
   1784 
   1785 	# inline _aesni_decrypt6 and interleave first and last rounds
   1786 	# with own code...
   1787 	$movkey		16($key_),$rndkey1
   1788 	pxor		$rndkey0,$inout0
   1789 	pxor		$rndkey0,$inout1
   1790 	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks
   1791 	aesdec		$rndkey1,$inout0
   1792 	lea		32($key_),$key
   1793 	pxor		$rndkey0,$inout2
   1794 	 movdqa	@tweak[1],`16*1`(%rsp)
   1795 	aesdec		$rndkey1,$inout1
   1796 	pxor		$rndkey0,$inout3
   1797 	 movdqa	@tweak[2],`16*2`(%rsp)
   1798 	aesdec		$rndkey1,$inout2
   1799 	pxor		$rndkey0,$inout4
   1800 	 movdqa	@tweak[3],`16*3`(%rsp)
   1801 	aesdec		$rndkey1,$inout3
   1802 	pxor		$rndkey0,$inout5
   1803 	$movkey		($key),$rndkey0
   1804 	dec		$rounds
   1805 	 movdqa	@tweak[4],`16*4`(%rsp)
   1806 	aesdec		$rndkey1,$inout4
   1807 	 movdqa	@tweak[5],`16*5`(%rsp)
   1808 	aesdec		$rndkey1,$inout5
   1809 	pxor	$twtmp,$twtmp
   1810 	pcmpgtd	@tweak[5],$twtmp
   1811 	jmp		.Lxts_dec_loop6_enter
   1812 
   1813 .align	16
   1814 .Lxts_dec_loop6:
   1815 	aesdec		$rndkey1,$inout0
   1816 	aesdec		$rndkey1,$inout1
   1817 	dec		$rounds
   1818 	aesdec		$rndkey1,$inout2
   1819 	aesdec		$rndkey1,$inout3
   1820 	aesdec		$rndkey1,$inout4
   1821 	aesdec		$rndkey1,$inout5
   1822 .Lxts_dec_loop6_enter:
   1823 	$movkey		16($key),$rndkey1
   1824 	aesdec		$rndkey0,$inout0
   1825 	aesdec		$rndkey0,$inout1
   1826 	lea		32($key),$key
   1827 	aesdec		$rndkey0,$inout2
   1828 	aesdec		$rndkey0,$inout3
   1829 	aesdec		$rndkey0,$inout4
   1830 	aesdec		$rndkey0,$inout5
   1831 	$movkey		($key),$rndkey0
   1832 	jnz		.Lxts_dec_loop6
   1833 
   1834 	pshufd	\$0x13,$twtmp,$twres
   1835 	pxor	$twtmp,$twtmp
   1836 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1837 	 aesdec		$rndkey1,$inout0
   1838 	pand	$twmask,$twres			# isolate carry and residue
   1839 	 aesdec		$rndkey1,$inout1
   1840 	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
   1841 	 aesdec		$rndkey1,$inout2
   1842 	pxor	$twres,@tweak[5]
   1843 	 aesdec		$rndkey1,$inout3
   1844 	 aesdec		$rndkey1,$inout4
   1845 	 aesdec		$rndkey1,$inout5
   1846 	 $movkey	16($key),$rndkey1
   1847 
   1848 	pshufd	\$0x13,$twtmp,$twres
   1849 	pxor	$twtmp,$twtmp
   1850 	movdqa	@tweak[5],@tweak[0]
   1851 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1852 	 aesdec		$rndkey0,$inout0
   1853 	pand	$twmask,$twres			# isolate carry and residue
   1854 	 aesdec		$rndkey0,$inout1
   1855 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1856 	 aesdec		$rndkey0,$inout2
   1857 	pxor	$twres,@tweak[5]
   1858 	 aesdec		$rndkey0,$inout3
   1859 	 aesdec		$rndkey0,$inout4
   1860 	 aesdec		$rndkey0,$inout5
   1861 	 $movkey	32($key),$rndkey0
   1862 
   1863 	pshufd	\$0x13,$twtmp,$twres
   1864 	pxor	$twtmp,$twtmp
   1865 	movdqa	@tweak[5],@tweak[1]
   1866 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1867 	 aesdec		$rndkey1,$inout0
   1868 	pand	$twmask,$twres			# isolate carry and residue
   1869 	 aesdec		$rndkey1,$inout1
   1870 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1871 	 aesdec		$rndkey1,$inout2
   1872 	pxor	$twres,@tweak[5]
   1873 	 aesdec		$rndkey1,$inout3
   1874 	 aesdec		$rndkey1,$inout4
   1875 	 aesdec		$rndkey1,$inout5
   1876 
   1877 	pshufd	\$0x13,$twtmp,$twres
   1878 	pxor	$twtmp,$twtmp
   1879 	movdqa	@tweak[5],@tweak[2]
   1880 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1881 	 aesdeclast	$rndkey0,$inout0
   1882 	pand	$twmask,$twres			# isolate carry and residue
   1883 	 aesdeclast	$rndkey0,$inout1
   1884 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1885 	 aesdeclast	$rndkey0,$inout2
   1886 	pxor	$twres,@tweak[5]
   1887 	 aesdeclast	$rndkey0,$inout3
   1888 	 aesdeclast	$rndkey0,$inout4
   1889 	 aesdeclast	$rndkey0,$inout5
   1890 
   1891 	pshufd	\$0x13,$twtmp,$twres
   1892 	pxor	$twtmp,$twtmp
   1893 	movdqa	@tweak[5],@tweak[3]
   1894 	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
   1895 	 xorps	`16*0`(%rsp),$inout0		# output^=tweak
   1896 	pand	$twmask,$twres			# isolate carry and residue
   1897 	 xorps	`16*1`(%rsp),$inout1
   1898 	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
   1899 	pxor	$twres,@tweak[5]
   1900 
   1901 	xorps	`16*2`(%rsp),$inout2
   1902 	movups	$inout0,`16*0`($out)		# write output
   1903 	xorps	`16*3`(%rsp),$inout3
   1904 	movups	$inout1,`16*1`($out)
   1905 	xorps	`16*4`(%rsp),$inout4
   1906 	movups	$inout2,`16*2`($out)
   1907 	xorps	`16*5`(%rsp),$inout5
   1908 	movups	$inout3,`16*3`($out)
   1909 	mov	$rnds_,$rounds			# restore $rounds
   1910 	movups	$inout4,`16*4`($out)
   1911 	movups	$inout5,`16*5`($out)
   1912 	lea	`16*6`($out),$out
   1913 	sub	\$16*6,$len
   1914 	jnc	.Lxts_dec_grandloop
   1915 
   1916 	lea	3($rounds,$rounds),$rounds	# restore original value
   1917 	mov	$key_,$key			# restore $key
   1918 	mov	$rounds,$rnds_			# backup $rounds
   1919 
   1920 .Lxts_dec_short:
   1921 	add	\$16*6,$len
   1922 	jz	.Lxts_dec_done
   1923 
   1924 	cmp	\$0x20,$len
   1925 	jb	.Lxts_dec_one
   1926 	je	.Lxts_dec_two
   1927 
   1928 	cmp	\$0x40,$len
   1929 	jb	.Lxts_dec_three
   1930 	je	.Lxts_dec_four
   1931 
   1932 	pshufd	\$0x13,$twtmp,$twres
   1933 	movdqa	@tweak[5],@tweak[4]
   1934 	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
   1935 	 movdqu	($inp),$inout0
   1936 	pand	$twmask,$twres			# isolate carry and residue
   1937 	 movdqu	16*1($inp),$inout1
   1938 	pxor	$twres,@tweak[5]
   1939 
   1940 	movdqu	16*2($inp),$inout2
   1941 	pxor	@tweak[0],$inout0
   1942 	movdqu	16*3($inp),$inout3
   1943 	pxor	@tweak[1],$inout1
   1944 	movdqu	16*4($inp),$inout4
   1945 	lea	16*5($inp),$inp
   1946 	pxor	@tweak[2],$inout2
   1947 	pxor	@tweak[3],$inout3
   1948 	pxor	@tweak[4],$inout4
   1949 
   1950 	call	_aesni_decrypt6
   1951 
   1952 	xorps	@tweak[0],$inout0
   1953 	xorps	@tweak[1],$inout1
   1954 	xorps	@tweak[2],$inout2
   1955 	movdqu	$inout0,($out)
   1956 	xorps	@tweak[3],$inout3
   1957 	movdqu	$inout1,16*1($out)
   1958 	xorps	@tweak[4],$inout4
   1959 	movdqu	$inout2,16*2($out)
   1960 	 pxor		$twtmp,$twtmp
   1961 	movdqu	$inout3,16*3($out)
   1962 	 pcmpgtd	@tweak[5],$twtmp
   1963 	movdqu	$inout4,16*4($out)
   1964 	lea	16*5($out),$out
   1965 	 pshufd		\$0x13,$twtmp,@tweak[1]	# $twres
   1966 	and	\$15,$len_
   1967 	jz	.Lxts_dec_ret
   1968 
   1969 	movdqa	@tweak[5],@tweak[0]
   1970 	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
   1971 	pand	$twmask,@tweak[1]		# isolate carry and residue
   1972 	pxor	@tweak[5],@tweak[1]
   1973 	jmp	.Lxts_dec_done2
   1974 
   1975 .align	16
   1976 .Lxts_dec_one:
   1977 	movups	($inp),$inout0
   1978 	lea	16*1($inp),$inp
   1979 	xorps	@tweak[0],$inout0
   1980 ___
   1981 	&aesni_generate1("dec",$key,$rounds);
   1982 $code.=<<___;
   1983 	xorps	@tweak[0],$inout0
   1984 	movdqa	@tweak[1],@tweak[0]
   1985 	movups	$inout0,($out)
   1986 	movdqa	@tweak[2],@tweak[1]
   1987 	lea	16*1($out),$out
   1988 	jmp	.Lxts_dec_done
   1989 
   1990 .align	16
   1991 .Lxts_dec_two:
   1992 	movups	($inp),$inout0
   1993 	movups	16($inp),$inout1
   1994 	lea	32($inp),$inp
   1995 	xorps	@tweak[0],$inout0
   1996 	xorps	@tweak[1],$inout1
   1997 
   1998 	call	_aesni_decrypt3
   1999 
   2000 	xorps	@tweak[0],$inout0
   2001 	movdqa	@tweak[2],@tweak[0]
   2002 	xorps	@tweak[1],$inout1
   2003 	movdqa	@tweak[3],@tweak[1]
   2004 	movups	$inout0,($out)
   2005 	movups	$inout1,16*1($out)
   2006 	lea	16*2($out),$out
   2007 	jmp	.Lxts_dec_done
   2008 
   2009 .align	16
   2010 .Lxts_dec_three:
   2011 	movups	($inp),$inout0
   2012 	movups	16*1($inp),$inout1
   2013 	movups	16*2($inp),$inout2
   2014 	lea	16*3($inp),$inp
   2015 	xorps	@tweak[0],$inout0
   2016 	xorps	@tweak[1],$inout1
   2017 	xorps	@tweak[2],$inout2
   2018 
   2019 	call	_aesni_decrypt3
   2020 
   2021 	xorps	@tweak[0],$inout0
   2022 	movdqa	@tweak[3],@tweak[0]
   2023 	xorps	@tweak[1],$inout1
   2024 	movdqa	@tweak[5],@tweak[1]
   2025 	xorps	@tweak[2],$inout2
   2026 	movups	$inout0,($out)
   2027 	movups	$inout1,16*1($out)
   2028 	movups	$inout2,16*2($out)
   2029 	lea	16*3($out),$out
   2030 	jmp	.Lxts_dec_done
   2031 
   2032 .align	16
   2033 .Lxts_dec_four:
   2034 	pshufd	\$0x13,$twtmp,$twres
   2035 	movdqa	@tweak[5],@tweak[4]
   2036 	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
   2037 	 movups	($inp),$inout0
   2038 	pand	$twmask,$twres			# isolate carry and residue
   2039 	 movups	16*1($inp),$inout1
   2040 	pxor	$twres,@tweak[5]
   2041 
   2042 	movups	16*2($inp),$inout2
   2043 	xorps	@tweak[0],$inout0
   2044 	movups	16*3($inp),$inout3
   2045 	lea	16*4($inp),$inp
   2046 	xorps	@tweak[1],$inout1
   2047 	xorps	@tweak[2],$inout2
   2048 	xorps	@tweak[3],$inout3
   2049 
   2050 	call	_aesni_decrypt4
   2051 
   2052 	xorps	@tweak[0],$inout0
   2053 	movdqa	@tweak[4],@tweak[0]
   2054 	xorps	@tweak[1],$inout1
   2055 	movdqa	@tweak[5],@tweak[1]
   2056 	xorps	@tweak[2],$inout2
   2057 	movups	$inout0,($out)
   2058 	xorps	@tweak[3],$inout3
   2059 	movups	$inout1,16*1($out)
   2060 	movups	$inout2,16*2($out)
   2061 	movups	$inout3,16*3($out)
   2062 	lea	16*4($out),$out
   2063 	jmp	.Lxts_dec_done
   2064 
   2065 .align	16
   2066 .Lxts_dec_done:
   2067 	and	\$15,$len_
   2068 	jz	.Lxts_dec_ret
   2069 .Lxts_dec_done2:
   2070 	mov	$len_,$len
   2071 	mov	$key_,$key			# restore $key
   2072 	mov	$rnds_,$rounds			# restore $rounds
   2073 
   2074 	movups	($inp),$inout0
   2075 	xorps	@tweak[1],$inout0
   2076 ___
   2077 	&aesni_generate1("dec",$key,$rounds);
   2078 $code.=<<___;
   2079 	xorps	@tweak[1],$inout0
   2080 	movups	$inout0,($out)
   2081 
   2082 .Lxts_dec_steal:
   2083 	movzb	16($inp),%eax			# borrow $rounds ...
   2084 	movzb	($out),%ecx			# ... and $key
   2085 	lea	1($inp),$inp
   2086 	mov	%al,($out)
   2087 	mov	%cl,16($out)
   2088 	lea	1($out),$out
   2089 	sub	\$1,$len
   2090 	jnz	.Lxts_dec_steal
   2091 
   2092 	sub	$len_,$out			# rewind $out
   2093 	mov	$key_,$key			# restore $key
   2094 	mov	$rnds_,$rounds			# restore $rounds
   2095 
   2096 	movups	($out),$inout0
   2097 	xorps	@tweak[0],$inout0
   2098 ___
   2099 	&aesni_generate1("dec",$key,$rounds);
   2100 $code.=<<___;
   2101 	xorps	@tweak[0],$inout0
   2102 	movups	$inout0,($out)
   2103 
   2104 .Lxts_dec_ret:
   2105 ___
   2106 $code.=<<___ if ($win64);
   2107 	movaps	0x60(%rsp),%xmm6
   2108 	movaps	0x70(%rsp),%xmm7
   2109 	movaps	0x80(%rsp),%xmm8
   2110 	movaps	0x90(%rsp),%xmm9
   2111 	movaps	0xa0(%rsp),%xmm10
   2112 	movaps	0xb0(%rsp),%xmm11
   2113 	movaps	0xc0(%rsp),%xmm12
   2114 	movaps	0xd0(%rsp),%xmm13
   2115 	movaps	0xe0(%rsp),%xmm14
   2116 	movaps	0xf0(%rsp),%xmm15
   2117 ___
   2118 $code.=<<___;
   2119 	lea	$frame_size(%rsp),%rsp
   2120 .Lxts_dec_epilogue:
   2121 	ret
   2122 .size	aesni_xts_decrypt,.-aesni_xts_decrypt
   2123 ___
   2124 } }}
   2125 
   2127 ########################################################################
   2128 # void $PREFIX_cbc_encrypt (const void *inp, void *out,
   2129 #			    size_t length, const AES_KEY *key,
   2130 #			    unsigned char *ivp,const int enc);
   2131 {
   2132 my $reserved = $win64?0x40:-0x18;	# used in decrypt
   2133 $code.=<<___;
   2134 .globl	${PREFIX}_cbc_encrypt
   2135 .type	${PREFIX}_cbc_encrypt,\@function,6
   2136 .align	16
   2137 ${PREFIX}_cbc_encrypt:
   2138 	test	$len,$len		# check length
   2139 	jz	.Lcbc_ret
   2140 
   2141 	mov	240($key),$rnds_	# key->rounds
   2142 	mov	$key,$key_		# backup $key
   2143 	test	%r9d,%r9d		# 6th argument
   2144 	jz	.Lcbc_decrypt
   2145 #--------------------------- CBC ENCRYPT ------------------------------#
   2146 	movups	($ivp),$inout0		# load iv as initial state
   2147 	mov	$rnds_,$rounds
   2148 	cmp	\$16,$len
   2149 	jb	.Lcbc_enc_tail
   2150 	sub	\$16,$len
   2151 	jmp	.Lcbc_enc_loop
   2152 .align	16
   2153 .Lcbc_enc_loop:
   2154 	movups	($inp),$inout1		# load input
   2155 	lea	16($inp),$inp
   2156 	#xorps	$inout1,$inout0
   2157 ___
   2158 	&aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
   2159 $code.=<<___;
   2160 	mov	$rnds_,$rounds		# restore $rounds
   2161 	mov	$key_,$key		# restore $key
   2162 	movups	$inout0,0($out)		# store output
   2163 	lea	16($out),$out
   2164 	sub	\$16,$len
   2165 	jnc	.Lcbc_enc_loop
   2166 	add	\$16,$len
   2167 	jnz	.Lcbc_enc_tail
   2168 	movups	$inout0,($ivp)
   2169 	jmp	.Lcbc_ret
   2170 
   2171 .Lcbc_enc_tail:
   2172 	mov	$len,%rcx	# zaps $key
   2173 	xchg	$inp,$out	# $inp is %rsi and $out is %rdi now
   2174 	.long	0x9066A4F3	# rep movsb
   2175 	mov	\$16,%ecx	# zero tail
   2176 	sub	$len,%rcx
   2177 	xor	%eax,%eax
   2178 	.long	0x9066AAF3	# rep stosb
   2179 	lea	-16(%rdi),%rdi	# rewind $out by 1 block
   2180 	mov	$rnds_,$rounds	# restore $rounds
   2181 	mov	%rdi,%rsi	# $inp and $out are the same
   2182 	mov	$key_,$key	# restore $key
   2183 	xor	$len,$len	# len=16
   2184 	jmp	.Lcbc_enc_loop	# one more spin
   2185 #--------------------------- CBC DECRYPT ------------------------------#
   2187 .align	16
   2188 .Lcbc_decrypt:
   2189 ___
   2190 $code.=<<___ if ($win64);
   2191 	lea	-0x58(%rsp),%rsp
   2192 	movaps	%xmm6,(%rsp)
   2193 	movaps	%xmm7,0x10(%rsp)
   2194 	movaps	%xmm8,0x20(%rsp)
   2195 	movaps	%xmm9,0x30(%rsp)
   2196 .Lcbc_decrypt_body:
   2197 ___
   2198 $code.=<<___;
   2199 	movups	($ivp),$iv
   2200 	mov	$rnds_,$rounds
   2201 	cmp	\$0x70,$len
   2202 	jbe	.Lcbc_dec_tail
   2203 	shr	\$1,$rnds_
   2204 	sub	\$0x70,$len
   2205 	mov	$rnds_,$rounds
   2206 	movaps	$iv,$reserved(%rsp)
   2207 	jmp	.Lcbc_dec_loop8_enter
   2208 .align	16
   2209 .Lcbc_dec_loop8:
   2210 	movaps	$rndkey0,$reserved(%rsp)	# save IV
   2211 	movups	$inout7,($out)
   2212 	lea	0x10($out),$out
   2213 .Lcbc_dec_loop8_enter:
   2214 	$movkey		($key),$rndkey0
   2215 	movups	($inp),$inout0			# load input
   2216 	movups	0x10($inp),$inout1
   2217 	$movkey		16($key),$rndkey1
   2218 
   2219 	lea		32($key),$key
   2220 	movdqu	0x20($inp),$inout2
   2221 	xorps		$rndkey0,$inout0
   2222 	movdqu	0x30($inp),$inout3
   2223 	xorps		$rndkey0,$inout1
   2224 	movdqu	0x40($inp),$inout4
   2225 	aesdec		$rndkey1,$inout0
   2226 	pxor		$rndkey0,$inout2
   2227 	movdqu	0x50($inp),$inout5
   2228 	aesdec		$rndkey1,$inout1
   2229 	pxor		$rndkey0,$inout3
   2230 	movdqu	0x60($inp),$inout6
   2231 	aesdec		$rndkey1,$inout2
   2232 	pxor		$rndkey0,$inout4
   2233 	movdqu	0x70($inp),$inout7
   2234 	aesdec		$rndkey1,$inout3
   2235 	pxor		$rndkey0,$inout5
   2236 	dec		$rounds
   2237 	aesdec		$rndkey1,$inout4
   2238 	pxor		$rndkey0,$inout6
   2239 	aesdec		$rndkey1,$inout5
   2240 	pxor		$rndkey0,$inout7
   2241 	$movkey		($key),$rndkey0
   2242 	aesdec		$rndkey1,$inout6
   2243 	aesdec		$rndkey1,$inout7
   2244 	$movkey		16($key),$rndkey1
   2245 
   2246 	call		.Ldec_loop8_enter
   2247 
   2248 	movups	($inp),$rndkey1		# re-load input
   2249 	movups	0x10($inp),$rndkey0
   2250 	xorps	$reserved(%rsp),$inout0	# ^= IV
   2251 	xorps	$rndkey1,$inout1
   2252 	movups	0x20($inp),$rndkey1
   2253 	xorps	$rndkey0,$inout2
   2254 	movups	0x30($inp),$rndkey0
   2255 	xorps	$rndkey1,$inout3
   2256 	movups	0x40($inp),$rndkey1
   2257 	xorps	$rndkey0,$inout4
   2258 	movups	0x50($inp),$rndkey0
   2259 	xorps	$rndkey1,$inout5
   2260 	movups	0x60($inp),$rndkey1
   2261 	xorps	$rndkey0,$inout6
   2262 	movups	0x70($inp),$rndkey0	# IV
   2263 	xorps	$rndkey1,$inout7
   2264 	movups	$inout0,($out)
   2265 	movups	$inout1,0x10($out)
   2266 	movups	$inout2,0x20($out)
   2267 	movups	$inout3,0x30($out)
   2268 	mov	$rnds_,$rounds		# restore $rounds
   2269 	movups	$inout4,0x40($out)
   2270 	mov	$key_,$key		# restore $key
   2271 	movups	$inout5,0x50($out)
   2272 	lea	0x80($inp),$inp
   2273 	movups	$inout6,0x60($out)
   2274 	lea	0x70($out),$out
   2275 	sub	\$0x80,$len
   2276 	ja	.Lcbc_dec_loop8
   2277 
   2278 	movaps	$inout7,$inout0
   2279 	movaps	$rndkey0,$iv
   2280 	add	\$0x70,$len
   2281 	jle	.Lcbc_dec_tail_collected
   2282 	movups	$inout0,($out)
   2283 	lea	1($rnds_,$rnds_),$rounds
   2284 	lea	0x10($out),$out
   2285 .Lcbc_dec_tail:
   2286 	movups	($inp),$inout0
   2287 	movaps	$inout0,$in0
   2288 	cmp	\$0x10,$len
   2289 	jbe	.Lcbc_dec_one
   2290 
   2291 	movups	0x10($inp),$inout1
   2292 	movaps	$inout1,$in1
   2293 	cmp	\$0x20,$len
   2294 	jbe	.Lcbc_dec_two
   2295 
   2296 	movups	0x20($inp),$inout2
   2297 	movaps	$inout2,$in2
   2298 	cmp	\$0x30,$len
   2299 	jbe	.Lcbc_dec_three
   2300 
   2301 	movups	0x30($inp),$inout3
   2302 	cmp	\$0x40,$len
   2303 	jbe	.Lcbc_dec_four
   2304 
   2305 	movups	0x40($inp),$inout4
   2306 	cmp	\$0x50,$len
   2307 	jbe	.Lcbc_dec_five
   2308 
   2309 	movups	0x50($inp),$inout5
   2310 	cmp	\$0x60,$len
   2311 	jbe	.Lcbc_dec_six
   2312 
   2313 	movups	0x60($inp),$inout6
   2314 	movaps	$iv,$reserved(%rsp)	# save IV
   2315 	call	_aesni_decrypt8
   2316 	movups	($inp),$rndkey1
   2317 	movups	0x10($inp),$rndkey0
   2318 	xorps	$reserved(%rsp),$inout0	# ^= IV
   2319 	xorps	$rndkey1,$inout1
   2320 	movups	0x20($inp),$rndkey1
   2321 	xorps	$rndkey0,$inout2
   2322 	movups	0x30($inp),$rndkey0
   2323 	xorps	$rndkey1,$inout3
   2324 	movups	0x40($inp),$rndkey1
   2325 	xorps	$rndkey0,$inout4
   2326 	movups	0x50($inp),$rndkey0
   2327 	xorps	$rndkey1,$inout5
   2328 	movups	0x60($inp),$iv		# IV
   2329 	xorps	$rndkey0,$inout6
   2330 	movups	$inout0,($out)
   2331 	movups	$inout1,0x10($out)
   2332 	movups	$inout2,0x20($out)
   2333 	movups	$inout3,0x30($out)
   2334 	movups	$inout4,0x40($out)
   2335 	movups	$inout5,0x50($out)
   2336 	lea	0x60($out),$out
   2337 	movaps	$inout6,$inout0
   2338 	sub	\$0x70,$len
   2339 	jmp	.Lcbc_dec_tail_collected
   2340 .align	16
   2341 .Lcbc_dec_one:
   2342 ___
   2343 	&aesni_generate1("dec",$key,$rounds);
   2344 $code.=<<___;
   2345 	xorps	$iv,$inout0
   2346 	movaps	$in0,$iv
   2347 	sub	\$0x10,$len
   2348 	jmp	.Lcbc_dec_tail_collected
   2349 .align	16
   2350 .Lcbc_dec_two:
   2351 	xorps	$inout2,$inout2
   2352 	call	_aesni_decrypt3
   2353 	xorps	$iv,$inout0
   2354 	xorps	$in0,$inout1
   2355 	movups	$inout0,($out)
   2356 	movaps	$in1,$iv
   2357 	movaps	$inout1,$inout0
   2358 	lea	0x10($out),$out
   2359 	sub	\$0x20,$len
   2360 	jmp	.Lcbc_dec_tail_collected
   2361 .align	16
   2362 .Lcbc_dec_three:
   2363 	call	_aesni_decrypt3
   2364 	xorps	$iv,$inout0
   2365 	xorps	$in0,$inout1
   2366 	movups	$inout0,($out)
   2367 	xorps	$in1,$inout2
   2368 	movups	$inout1,0x10($out)
   2369 	movaps	$in2,$iv
   2370 	movaps	$inout2,$inout0
   2371 	lea	0x20($out),$out
   2372 	sub	\$0x30,$len
   2373 	jmp	.Lcbc_dec_tail_collected
   2374 .align	16
   2375 .Lcbc_dec_four:
   2376 	call	_aesni_decrypt4
   2377 	xorps	$iv,$inout0
   2378 	movups	0x30($inp),$iv
   2379 	xorps	$in0,$inout1
   2380 	movups	$inout0,($out)
   2381 	xorps	$in1,$inout2
   2382 	movups	$inout1,0x10($out)
   2383 	xorps	$in2,$inout3
   2384 	movups	$inout2,0x20($out)
   2385 	movaps	$inout3,$inout0
   2386 	lea	0x30($out),$out
   2387 	sub	\$0x40,$len
   2388 	jmp	.Lcbc_dec_tail_collected
   2389 .align	16
   2390 .Lcbc_dec_five:
   2391 	xorps	$inout5,$inout5
   2392 	call	_aesni_decrypt6
   2393 	movups	0x10($inp),$rndkey1
   2394 	movups	0x20($inp),$rndkey0
   2395 	xorps	$iv,$inout0
   2396 	xorps	$in0,$inout1
   2397 	xorps	$rndkey1,$inout2
   2398 	movups	0x30($inp),$rndkey1
   2399 	xorps	$rndkey0,$inout3
   2400 	movups	0x40($inp),$iv
   2401 	xorps	$rndkey1,$inout4
   2402 	movups	$inout0,($out)
   2403 	movups	$inout1,0x10($out)
   2404 	movups	$inout2,0x20($out)
   2405 	movups	$inout3,0x30($out)
   2406 	lea	0x40($out),$out
   2407 	movaps	$inout4,$inout0
   2408 	sub	\$0x50,$len
   2409 	jmp	.Lcbc_dec_tail_collected
   2410 .align	16
   2411 .Lcbc_dec_six:
   2412 	call	_aesni_decrypt6
   2413 	movups	0x10($inp),$rndkey1
   2414 	movups	0x20($inp),$rndkey0
   2415 	xorps	$iv,$inout0
   2416 	xorps	$in0,$inout1
   2417 	xorps	$rndkey1,$inout2
   2418 	movups	0x30($inp),$rndkey1
   2419 	xorps	$rndkey0,$inout3
   2420 	movups	0x40($inp),$rndkey0
   2421 	xorps	$rndkey1,$inout4
   2422 	movups	0x50($inp),$iv
   2423 	xorps	$rndkey0,$inout5
   2424 	movups	$inout0,($out)
   2425 	movups	$inout1,0x10($out)
   2426 	movups	$inout2,0x20($out)
   2427 	movups	$inout3,0x30($out)
   2428 	movups	$inout4,0x40($out)
   2429 	lea	0x50($out),$out
   2430 	movaps	$inout5,$inout0
   2431 	sub	\$0x60,$len
   2432 	jmp	.Lcbc_dec_tail_collected
   2433 .align	16
   2434 .Lcbc_dec_tail_collected:
   2435 	and	\$15,$len
   2436 	movups	$iv,($ivp)
   2437 	jnz	.Lcbc_dec_tail_partial
   2438 	movups	$inout0,($out)
   2439 	jmp	.Lcbc_dec_ret
   2440 .align	16
   2441 .Lcbc_dec_tail_partial:
   2442 	movaps	$inout0,$reserved(%rsp)
   2443 	mov	\$16,%rcx
   2444 	mov	$out,%rdi
   2445 	sub	$len,%rcx
   2446 	lea	$reserved(%rsp),%rsi
   2447 	.long	0x9066A4F3	# rep movsb
   2448 
   2449 .Lcbc_dec_ret:
   2450 ___
   2451 $code.=<<___ if ($win64);
   2452 	movaps	(%rsp),%xmm6
   2453 	movaps	0x10(%rsp),%xmm7
   2454 	movaps	0x20(%rsp),%xmm8
   2455 	movaps	0x30(%rsp),%xmm9
   2456 	lea	0x58(%rsp),%rsp
   2457 ___
   2458 $code.=<<___;
   2459 .Lcbc_ret:
   2460 	ret
   2461 .size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
   2462 ___
   2463 } 
   2465 # int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
   2466 #				int bits, AES_KEY *key)
   2467 { my ($inp,$bits,$key) = @_4args;
   2468   $bits =~ s/%r/%e/;
   2469 
   2470 $code.=<<___;
   2471 .globl	${PREFIX}_set_decrypt_key
   2472 .type	${PREFIX}_set_decrypt_key,\@abi-omnipotent
   2473 .align	16
   2474 ${PREFIX}_set_decrypt_key:
   2475 	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
   2476 	call	__aesni_set_encrypt_key
   2477 	shl	\$4,$bits		# rounds-1 after _aesni_set_encrypt_key
   2478 	test	%eax,%eax
   2479 	jnz	.Ldec_key_ret
   2480 	lea	16($key,$bits),$inp	# points at the end of key schedule
   2481 
   2482 	$movkey	($key),%xmm0		# just swap
   2483 	$movkey	($inp),%xmm1
   2484 	$movkey	%xmm0,($inp)
   2485 	$movkey	%xmm1,($key)
   2486 	lea	16($key),$key
   2487 	lea	-16($inp),$inp
   2488 
   2489 .Ldec_key_inverse:
   2490 	$movkey	($key),%xmm0		# swap and inverse
   2491 	$movkey	($inp),%xmm1
   2492 	aesimc	%xmm0,%xmm0
   2493 	aesimc	%xmm1,%xmm1
   2494 	lea	16($key),$key
   2495 	lea	-16($inp),$inp
   2496 	$movkey	%xmm0,16($inp)
   2497 	$movkey	%xmm1,-16($key)
   2498 	cmp	$key,$inp
   2499 	ja	.Ldec_key_inverse
   2500 
   2501 	$movkey	($key),%xmm0		# inverse middle
   2502 	aesimc	%xmm0,%xmm0
   2503 	$movkey	%xmm0,($inp)
   2504 .Ldec_key_ret:
   2505 	add	\$8,%rsp
   2506 	ret
   2507 .LSEH_end_set_decrypt_key:
   2508 .size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
   2509 ___
   2510 
   2512 # This is based on submission by
   2513 #
   2514 #	Huang Ying <ying.huang (at] intel.com>
   2515 #	Vinodh Gopal <vinodh.gopal (at] intel.com>
   2516 #	Kahraman Akdemir
   2517 #
   2518 # Agressively optimized in respect to aeskeygenassist's critical path
   2519 # and is contained in %xmm0-5 to meet Win64 ABI requirement.
   2520 #
   2521 $code.=<<___;
   2522 .globl	${PREFIX}_set_encrypt_key
   2523 .type	${PREFIX}_set_encrypt_key,\@abi-omnipotent
   2524 .align	16
   2525 ${PREFIX}_set_encrypt_key:
   2526 __aesni_set_encrypt_key:
   2527 	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
   2528 	mov	\$-1,%rax
   2529 	test	$inp,$inp
   2530 	jz	.Lenc_key_ret
   2531 	test	$key,$key
   2532 	jz	.Lenc_key_ret
   2533 
   2534 	movups	($inp),%xmm0		# pull first 128 bits of *userKey
   2535 	xorps	%xmm4,%xmm4		# low dword of xmm4 is assumed 0
   2536 	lea	16($key),%rax
   2537 	cmp	\$256,$bits
   2538 	je	.L14rounds
   2539 	cmp	\$192,$bits
   2540 	je	.L12rounds
   2541 	cmp	\$128,$bits
   2542 	jne	.Lbad_keybits
   2543 
   2544 .L10rounds:
   2545 	mov	\$9,$bits			# 10 rounds for 128-bit key
   2546 	$movkey	%xmm0,($key)			# round 0
   2547 	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 1
   2548 	call		.Lkey_expansion_128_cold
   2549 	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 2
   2550 	call		.Lkey_expansion_128
   2551 	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 3
   2552 	call		.Lkey_expansion_128
   2553 	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 4
   2554 	call		.Lkey_expansion_128
   2555 	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 5
   2556 	call		.Lkey_expansion_128
   2557 	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 6
   2558 	call		.Lkey_expansion_128
   2559 	aeskeygenassist	\$0x40,%xmm0,%xmm1	# round 7
   2560 	call		.Lkey_expansion_128
   2561 	aeskeygenassist	\$0x80,%xmm0,%xmm1	# round 8
   2562 	call		.Lkey_expansion_128
   2563 	aeskeygenassist	\$0x1b,%xmm0,%xmm1	# round 9
   2564 	call		.Lkey_expansion_128
   2565 	aeskeygenassist	\$0x36,%xmm0,%xmm1	# round 10
   2566 	call		.Lkey_expansion_128
   2567 	$movkey	%xmm0,(%rax)
   2568 	mov	$bits,80(%rax)	# 240(%rdx)
   2569 	xor	%eax,%eax
   2570 	jmp	.Lenc_key_ret
   2571 
   2572 .align	16
   2573 .L12rounds:
   2574 	movq	16($inp),%xmm2			# remaining 1/3 of *userKey
   2575 	mov	\$11,$bits			# 12 rounds for 192
   2576 	$movkey	%xmm0,($key)			# round 0
   2577 	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 1,2
   2578 	call		.Lkey_expansion_192a_cold
   2579 	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 2,3
   2580 	call		.Lkey_expansion_192b
   2581 	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 4,5
   2582 	call		.Lkey_expansion_192a
   2583 	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 5,6
   2584 	call		.Lkey_expansion_192b
   2585 	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 7,8
   2586 	call		.Lkey_expansion_192a
   2587 	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 8,9
   2588 	call		.Lkey_expansion_192b
   2589 	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 10,11
   2590 	call		.Lkey_expansion_192a
   2591 	aeskeygenassist	\$0x80,%xmm2,%xmm1	# round 11,12
   2592 	call		.Lkey_expansion_192b
   2593 	$movkey	%xmm0,(%rax)
   2594 	mov	$bits,48(%rax)	# 240(%rdx)
   2595 	xor	%rax, %rax
   2596 	jmp	.Lenc_key_ret
   2597 
   2598 .align	16
   2599 .L14rounds:
   2600 	movups	16($inp),%xmm2			# remaning half of *userKey
   2601 	mov	\$13,$bits			# 14 rounds for 256
   2602 	lea	16(%rax),%rax
   2603 	$movkey	%xmm0,($key)			# round 0
   2604 	$movkey	%xmm2,16($key)			# round 1
   2605 	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 2
   2606 	call		.Lkey_expansion_256a_cold
   2607 	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 3
   2608 	call		.Lkey_expansion_256b
   2609 	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 4
   2610 	call		.Lkey_expansion_256a
   2611 	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 5
   2612 	call		.Lkey_expansion_256b
   2613 	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 6
   2614 	call		.Lkey_expansion_256a
   2615 	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 7
   2616 	call		.Lkey_expansion_256b
   2617 	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 8
   2618 	call		.Lkey_expansion_256a
   2619 	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 9
   2620 	call		.Lkey_expansion_256b
   2621 	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 10
   2622 	call		.Lkey_expansion_256a
   2623 	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 11
   2624 	call		.Lkey_expansion_256b
   2625 	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 12
   2626 	call		.Lkey_expansion_256a
   2627 	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 13
   2628 	call		.Lkey_expansion_256b
   2629 	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 14
   2630 	call		.Lkey_expansion_256a
   2631 	$movkey	%xmm0,(%rax)
   2632 	mov	$bits,16(%rax)	# 240(%rdx)
   2633 	xor	%rax,%rax
   2634 	jmp	.Lenc_key_ret
   2635 
   2636 .align	16
   2637 .Lbad_keybits:
   2638 	mov	\$-2,%rax
   2639 .Lenc_key_ret:
   2640 	add	\$8,%rsp
   2641 	ret
   2642 .LSEH_end_set_encrypt_key:
   2643 
   2645 .align	16
   2646 .Lkey_expansion_128:
   2647 	$movkey	%xmm0,(%rax)
   2648 	lea	16(%rax),%rax
   2649 .Lkey_expansion_128_cold:
   2650 	shufps	\$0b00010000,%xmm0,%xmm4
   2651 	xorps	%xmm4, %xmm0
   2652 	shufps	\$0b10001100,%xmm0,%xmm4
   2653 	xorps	%xmm4, %xmm0
   2654 	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
   2655 	xorps	%xmm1,%xmm0
   2656 	ret
   2657 
   2658 .align 16
   2659 .Lkey_expansion_192a:
   2660 	$movkey	%xmm0,(%rax)
   2661 	lea	16(%rax),%rax
   2662 .Lkey_expansion_192a_cold:
   2663 	movaps	%xmm2, %xmm5
   2664 .Lkey_expansion_192b_warm:
   2665 	shufps	\$0b00010000,%xmm0,%xmm4
   2666 	movdqa	%xmm2,%xmm3
   2667 	xorps	%xmm4,%xmm0
   2668 	shufps	\$0b10001100,%xmm0,%xmm4
   2669 	pslldq	\$4,%xmm3
   2670 	xorps	%xmm4,%xmm0
   2671 	pshufd	\$0b01010101,%xmm1,%xmm1	# critical path
   2672 	pxor	%xmm3,%xmm2
   2673 	pxor	%xmm1,%xmm0
   2674 	pshufd	\$0b11111111,%xmm0,%xmm3
   2675 	pxor	%xmm3,%xmm2
   2676 	ret
   2677 
   2678 .align 16
   2679 .Lkey_expansion_192b:
   2680 	movaps	%xmm0,%xmm3
   2681 	shufps	\$0b01000100,%xmm0,%xmm5
   2682 	$movkey	%xmm5,(%rax)
   2683 	shufps	\$0b01001110,%xmm2,%xmm3
   2684 	$movkey	%xmm3,16(%rax)
   2685 	lea	32(%rax),%rax
   2686 	jmp	.Lkey_expansion_192b_warm
   2687 
   2688 .align	16
   2689 .Lkey_expansion_256a:
   2690 	$movkey	%xmm2,(%rax)
   2691 	lea	16(%rax),%rax
   2692 .Lkey_expansion_256a_cold:
   2693 	shufps	\$0b00010000,%xmm0,%xmm4
   2694 	xorps	%xmm4,%xmm0
   2695 	shufps	\$0b10001100,%xmm0,%xmm4
   2696 	xorps	%xmm4,%xmm0
   2697 	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
   2698 	xorps	%xmm1,%xmm0
   2699 	ret
   2700 
   2701 .align 16
   2702 .Lkey_expansion_256b:
   2703 	$movkey	%xmm0,(%rax)
   2704 	lea	16(%rax),%rax
   2705 
   2706 	shufps	\$0b00010000,%xmm2,%xmm4
   2707 	xorps	%xmm4,%xmm2
   2708 	shufps	\$0b10001100,%xmm2,%xmm4
   2709 	xorps	%xmm4,%xmm2
   2710 	shufps	\$0b10101010,%xmm1,%xmm1	# critical path
   2711 	xorps	%xmm1,%xmm2
   2712 	ret
   2713 .size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
   2714 .size	__aesni_set_encrypt_key,.-__aesni_set_encrypt_key
   2715 ___
   2716 }
   2717 
   2719 $code.=<<___;
   2720 .align	64
   2721 .Lbswap_mask:
   2722 	.byte	15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
   2723 .Lincrement32:
   2724 	.long	6,6,6,0
   2725 .Lincrement64:
   2726 	.long	1,0,0,0
   2727 .Lxts_magic:
   2728 	.long	0x87,0,1,0
   2729 
   2730 .asciz  "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
   2731 .align	64
   2732 ___
   2733 
   2734 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
   2735 #		CONTEXT *context,DISPATCHER_CONTEXT *disp)
   2736 if ($win64) {
   2737 $rec="%rcx";
   2738 $frame="%rdx";
   2739 $context="%r8";
   2740 $disp="%r9";
   2741 
   2742 $code.=<<___;
   2743 .extern	__imp_RtlVirtualUnwind
   2744 ___
   2745 $code.=<<___ if ($PREFIX eq "aesni");
   2746 .type	ecb_se_handler,\@abi-omnipotent
   2747 .align	16
   2748 ecb_se_handler:
   2749 	push	%rsi
   2750 	push	%rdi
   2751 	push	%rbx
   2752 	push	%rbp
   2753 	push	%r12
   2754 	push	%r13
   2755 	push	%r14
   2756 	push	%r15
   2757 	pushfq
   2758 	sub	\$64,%rsp
   2759 
   2760 	mov	152($context),%rax	# pull context->Rsp
   2761 
   2762 	jmp	.Lcommon_seh_tail
   2763 .size	ecb_se_handler,.-ecb_se_handler
   2764 
   2765 .type	ccm64_se_handler,\@abi-omnipotent
   2766 .align	16
   2767 ccm64_se_handler:
   2768 	push	%rsi
   2769 	push	%rdi
   2770 	push	%rbx
   2771 	push	%rbp
   2772 	push	%r12
   2773 	push	%r13
   2774 	push	%r14
   2775 	push	%r15
   2776 	pushfq
   2777 	sub	\$64,%rsp
   2778 
   2779 	mov	120($context),%rax	# pull context->Rax
   2780 	mov	248($context),%rbx	# pull context->Rip
   2781 
   2782 	mov	8($disp),%rsi		# disp->ImageBase
   2783 	mov	56($disp),%r11		# disp->HandlerData
   2784 
   2785 	mov	0(%r11),%r10d		# HandlerData[0]
   2786 	lea	(%rsi,%r10),%r10	# prologue label
   2787 	cmp	%r10,%rbx		# context->Rip<prologue label
   2788 	jb	.Lcommon_seh_tail
   2789 
   2790 	mov	152($context),%rax	# pull context->Rsp
   2791 
   2792 	mov	4(%r11),%r10d		# HandlerData[1]
   2793 	lea	(%rsi,%r10),%r10	# epilogue label
   2794 	cmp	%r10,%rbx		# context->Rip>=epilogue label
   2795 	jae	.Lcommon_seh_tail
   2796 
   2797 	lea	0(%rax),%rsi		# %xmm save area
   2798 	lea	512($context),%rdi	# &context.Xmm6
   2799 	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
   2800 	.long	0xa548f3fc		# cld; rep movsq
   2801 	lea	0x58(%rax),%rax		# adjust stack pointer
   2802 
   2803 	jmp	.Lcommon_seh_tail
   2804 .size	ccm64_se_handler,.-ccm64_se_handler
   2805 
   2806 .type	ctr32_se_handler,\@abi-omnipotent
   2807 .align	16
   2808 ctr32_se_handler:
   2809 	push	%rsi
   2810 	push	%rdi
   2811 	push	%rbx
   2812 	push	%rbp
   2813 	push	%r12
   2814 	push	%r13
   2815 	push	%r14
   2816 	push	%r15
   2817 	pushfq
   2818 	sub	\$64,%rsp
   2819 
   2820 	mov	120($context),%rax	# pull context->Rax
   2821 	mov	248($context),%rbx	# pull context->Rip
   2822 
   2823 	lea	.Lctr32_body(%rip),%r10
   2824 	cmp	%r10,%rbx		# context->Rip<"prologue" label
   2825 	jb	.Lcommon_seh_tail
   2826 
   2827 	mov	152($context),%rax	# pull context->Rsp
   2828 
   2829 	lea	.Lctr32_ret(%rip),%r10
   2830 	cmp	%r10,%rbx
   2831 	jae	.Lcommon_seh_tail
   2832 
   2833 	lea	0x20(%rax),%rsi		# %xmm save area
   2834 	lea	512($context),%rdi	# &context.Xmm6
   2835 	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
   2836 	.long	0xa548f3fc		# cld; rep movsq
   2837 	lea	0xc8(%rax),%rax		# adjust stack pointer
   2838 
   2839 	jmp	.Lcommon_seh_tail
   2840 .size	ctr32_se_handler,.-ctr32_se_handler
   2841 
   2842 .type	xts_se_handler,\@abi-omnipotent
   2843 .align	16
   2844 xts_se_handler:
   2845 	push	%rsi
   2846 	push	%rdi
   2847 	push	%rbx
   2848 	push	%rbp
   2849 	push	%r12
   2850 	push	%r13
   2851 	push	%r14
   2852 	push	%r15
   2853 	pushfq
   2854 	sub	\$64,%rsp
   2855 
   2856 	mov	120($context),%rax	# pull context->Rax
   2857 	mov	248($context),%rbx	# pull context->Rip
   2858 
   2859 	mov	8($disp),%rsi		# disp->ImageBase
   2860 	mov	56($disp),%r11		# disp->HandlerData
   2861 
   2862 	mov	0(%r11),%r10d		# HandlerData[0]
   2863 	lea	(%rsi,%r10),%r10	# prologue lable
   2864 	cmp	%r10,%rbx		# context->Rip<prologue label
   2865 	jb	.Lcommon_seh_tail
   2866 
   2867 	mov	152($context),%rax	# pull context->Rsp
   2868 
   2869 	mov	4(%r11),%r10d		# HandlerData[1]
   2870 	lea	(%rsi,%r10),%r10	# epilogue label
   2871 	cmp	%r10,%rbx		# context->Rip>=epilogue label
   2872 	jae	.Lcommon_seh_tail
   2873 
   2874 	lea	0x60(%rax),%rsi		# %xmm save area
   2875 	lea	512($context),%rdi	# & context.Xmm6
   2876 	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
   2877 	.long	0xa548f3fc		# cld; rep movsq
   2878 	lea	0x68+160(%rax),%rax	# adjust stack pointer
   2879 
   2880 	jmp	.Lcommon_seh_tail
   2881 .size	xts_se_handler,.-xts_se_handler
   2882 ___
   2883 $code.=<<___;
   2884 .type	cbc_se_handler,\@abi-omnipotent
   2885 .align	16
   2886 cbc_se_handler:
   2887 	push	%rsi
   2888 	push	%rdi
   2889 	push	%rbx
   2890 	push	%rbp
   2891 	push	%r12
   2892 	push	%r13
   2893 	push	%r14
   2894 	push	%r15
   2895 	pushfq
   2896 	sub	\$64,%rsp
   2897 
   2898 	mov	152($context),%rax	# pull context->Rsp
   2899 	mov	248($context),%rbx	# pull context->Rip
   2900 
   2901 	lea	.Lcbc_decrypt(%rip),%r10
   2902 	cmp	%r10,%rbx		# context->Rip<"prologue" label
   2903 	jb	.Lcommon_seh_tail
   2904 
   2905 	lea	.Lcbc_decrypt_body(%rip),%r10
   2906 	cmp	%r10,%rbx		# context->Rip<cbc_decrypt_body
   2907 	jb	.Lrestore_cbc_rax
   2908 
   2909 	lea	.Lcbc_ret(%rip),%r10
   2910 	cmp	%r10,%rbx		# context->Rip>="epilogue" label
   2911 	jae	.Lcommon_seh_tail
   2912 
   2913 	lea	0(%rax),%rsi		# top of stack
   2914 	lea	512($context),%rdi	# &context.Xmm6
   2915 	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
   2916 	.long	0xa548f3fc		# cld; rep movsq
   2917 	lea	0x58(%rax),%rax		# adjust stack pointer
   2918 	jmp	.Lcommon_seh_tail
   2919 
   2920 .Lrestore_cbc_rax:
   2921 	mov	120($context),%rax
   2922 
   2923 .Lcommon_seh_tail:
   2924 	mov	8(%rax),%rdi
   2925 	mov	16(%rax),%rsi
   2926 	mov	%rax,152($context)	# restore context->Rsp
   2927 	mov	%rsi,168($context)	# restore context->Rsi
   2928 	mov	%rdi,176($context)	# restore context->Rdi
   2929 
   2930 	mov	40($disp),%rdi		# disp->ContextRecord
   2931 	mov	$context,%rsi		# context
   2932 	mov	\$154,%ecx		# sizeof(CONTEXT)
   2933 	.long	0xa548f3fc		# cld; rep movsq
   2934 
   2935 	mov	$disp,%rsi
   2936 	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
   2937 	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
   2938 	mov	0(%rsi),%r8		# arg3, disp->ControlPc
   2939 	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
   2940 	mov	40(%rsi),%r10		# disp->ContextRecord
   2941 	lea	56(%rsi),%r11		# &disp->HandlerData
   2942 	lea	24(%rsi),%r12		# &disp->EstablisherFrame
   2943 	mov	%r10,32(%rsp)		# arg5
   2944 	mov	%r11,40(%rsp)		# arg6
   2945 	mov	%r12,48(%rsp)		# arg7
   2946 	mov	%rcx,56(%rsp)		# arg8, (NULL)
   2947 	call	*__imp_RtlVirtualUnwind(%rip)
   2948 
   2949 	mov	\$1,%eax		# ExceptionContinueSearch
   2950 	add	\$64,%rsp
   2951 	popfq
   2952 	pop	%r15
   2953 	pop	%r14
   2954 	pop	%r13
   2955 	pop	%r12
   2956 	pop	%rbp
   2957 	pop	%rbx
   2958 	pop	%rdi
   2959 	pop	%rsi
   2960 	ret
   2961 .size	cbc_se_handler,.-cbc_se_handler
   2962 
   2963 .section	.pdata
   2964 .align	4
   2965 ___
   2966 $code.=<<___ if ($PREFIX eq "aesni");
   2967 	.rva	.LSEH_begin_aesni_ecb_encrypt
   2968 	.rva	.LSEH_end_aesni_ecb_encrypt
   2969 	.rva	.LSEH_info_ecb
   2970 
   2971 	.rva	.LSEH_begin_aesni_ccm64_encrypt_blocks
   2972 	.rva	.LSEH_end_aesni_ccm64_encrypt_blocks
   2973 	.rva	.LSEH_info_ccm64_enc
   2974 
   2975 	.rva	.LSEH_begin_aesni_ccm64_decrypt_blocks
   2976 	.rva	.LSEH_end_aesni_ccm64_decrypt_blocks
   2977 	.rva	.LSEH_info_ccm64_dec
   2978 
   2979 	.rva	.LSEH_begin_aesni_ctr32_encrypt_blocks
   2980 	.rva	.LSEH_end_aesni_ctr32_encrypt_blocks
   2981 	.rva	.LSEH_info_ctr32
   2982 
   2983 	.rva	.LSEH_begin_aesni_xts_encrypt
   2984 	.rva	.LSEH_end_aesni_xts_encrypt
   2985 	.rva	.LSEH_info_xts_enc
   2986 
   2987 	.rva	.LSEH_begin_aesni_xts_decrypt
   2988 	.rva	.LSEH_end_aesni_xts_decrypt
   2989 	.rva	.LSEH_info_xts_dec
   2990 ___
   2991 $code.=<<___;
   2992 	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
   2993 	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
   2994 	.rva	.LSEH_info_cbc
   2995 
   2996 	.rva	${PREFIX}_set_decrypt_key
   2997 	.rva	.LSEH_end_set_decrypt_key
   2998 	.rva	.LSEH_info_key
   2999 
   3000 	.rva	${PREFIX}_set_encrypt_key
   3001 	.rva	.LSEH_end_set_encrypt_key
   3002 	.rva	.LSEH_info_key
   3003 .section	.xdata
   3004 .align	8
   3005 ___
   3006 $code.=<<___ if ($PREFIX eq "aesni");
   3007 .LSEH_info_ecb:
   3008 	.byte	9,0,0,0
   3009 	.rva	ecb_se_handler
   3010 .LSEH_info_ccm64_enc:
   3011 	.byte	9,0,0,0
   3012 	.rva	ccm64_se_handler
   3013 	.rva	.Lccm64_enc_body,.Lccm64_enc_ret	# HandlerData[]
   3014 .LSEH_info_ccm64_dec:
   3015 	.byte	9,0,0,0
   3016 	.rva	ccm64_se_handler
   3017 	.rva	.Lccm64_dec_body,.Lccm64_dec_ret	# HandlerData[]
   3018 .LSEH_info_ctr32:
   3019 	.byte	9,0,0,0
   3020 	.rva	ctr32_se_handler
   3021 .LSEH_info_xts_enc:
   3022 	.byte	9,0,0,0
   3023 	.rva	xts_se_handler
   3024 	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
   3025 .LSEH_info_xts_dec:
   3026 	.byte	9,0,0,0
   3027 	.rva	xts_se_handler
   3028 	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
   3029 ___
   3030 $code.=<<___;
   3031 .LSEH_info_cbc:
   3032 	.byte	9,0,0,0
   3033 	.rva	cbc_se_handler
   3034 .LSEH_info_key:
   3035 	.byte	0x01,0x04,0x01,0x00
   3036 	.byte	0x04,0x02,0x00,0x00	# sub rsp,8
   3037 ___
   3038 }
   3039 
   3040 sub rex {
   3041   local *opcode=shift;
   3042   my ($dst,$src)=@_;
   3043   my $rex=0;
   3044 
   3045     $rex|=0x04			if($dst>=8);
   3046     $rex|=0x01			if($src>=8);
   3047     push @opcode,$rex|0x40	if($rex);
   3048 }
   3049 
   3050 sub aesni {
   3051   my $line=shift;
   3052   my @opcode=(0x66);
   3053 
   3054     if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
   3055 	rex(\@opcode,$4,$3);
   3056 	push @opcode,0x0f,0x3a,0xdf;
   3057 	push @opcode,0xc0|($3&7)|(($4&7)<<3);	# ModR/M
   3058 	my $c=$2;
   3059 	push @opcode,$c=~/^0/?oct($c):$c;
   3060 	return ".byte\t".join(',',@opcode);
   3061     }
   3062     elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
   3063 	my %opcodelet = (
   3064 		"aesimc" => 0xdb,
   3065 		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
   3066 		"aesdec" => 0xde,	"aesdeclast" => 0xdf
   3067 	);
   3068 	return undef if (!defined($opcodelet{$1}));
   3069 	rex(\@opcode,$3,$2);
   3070 	push @opcode,0x0f,0x38,$opcodelet{$1};
   3071 	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
   3072 	return ".byte\t".join(',',@opcode);
   3073     }
   3074     return $line;
   3075 }
   3076 
   3077 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
   3078 $code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
   3079 
   3080 print $code;
   3081 
   3082 close STDOUT;
   3083