Home | History | Annotate | Download | only in asm
      1 #!/usr/bin/env perl
      2 #
      3 # ====================================================================
      4 # Written by Andy Polyakov <appro (at] fy.chalmers.se> for the OpenSSL
      5 # project. The module is, however, dual licensed under OpenSSL and
      6 # CRYPTOGAMS licenses depending on where you obtain it. For further
      7 # details see http://www.openssl.org/~appro/cryptogams/.
      8 # ====================================================================
      9 #
     10 # 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
     11 # "hand-coded assembler"] doesn't stand for the whole improvement
     12 # coefficient. It turned out that eliminating RC4_CHAR from config
     13 # line results in ~40% improvement (yes, even for C implementation).
     14 # Presumably it has everything to do with AMD cache architecture and
     15 # RAW or whatever penalties. Once again! The module *requires* config
     16 # line *without* RC4_CHAR! As for coding "secret," I bet on partial
     17 # register arithmetics. For example instead of 'inc %r8; and $255,%r8'
     18 # I simply 'inc %r8b'. Even though optimization manual discourages
     19 # to operate on partial registers, it turned out to be the best bet.
     20 # At least for AMD... How IA32E would perform remains to be seen...
     21 
     22 # As was shown by Marc Bevand reordering of couple of load operations
     23 # results in even higher performance gain of 3.3x:-) At least on
     24 # Opteron... For reference, 1x in this case is RC4_CHAR C-code
     25 # compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
     26 # Latter means that if you want to *estimate* what to expect from
     27 # *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
     28 
     29 # Intel P4 EM64T core was found to run the AMD64 code really slow...
     30 # The only way to achieve comparable performance on P4 was to keep
     31 # RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
     32 # compose blended code, which would perform even within 30% marginal
     33 # on either AMD and Intel platforms, I implement both cases. See
     34 # rc4_skey.c for further details...
     35 
     36 # P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing 
     37 # those with add/sub results in 50% performance improvement of folded
     38 # loop...
     39 
     40 # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
     41 # performance by >30% [unlike P4 32-bit case that is]. But this is
     42 # provided that loads are reordered even more aggressively! Both code
     43 # pathes, AMD64 and EM64T, reorder loads in essentially same manner
     44 # as my IA-64 implementation. On Opteron this resulted in modest 5%
     45 # improvement [I had to test it], while final Intel P4 performance
     46 # achieves respectful 432MBps on 2.8GHz processor now. For reference.
     47 # If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
     48 # RC4_INT code-path. While if executed on Opteron, it's only 25%
     49 # slower than the RC4_INT one [meaning that if CPU -arch detection
     50 # is not implemented, then this final RC4_CHAR code-path should be
     51 # preferred, as it provides better *all-round* performance].
     52 
     53 # Intel Core2 was observed to perform poorly on both code paths:-( It
     54 # apparently suffers from some kind of partial register stall, which
     55 # occurs in 64-bit mode only [as virtually identical 32-bit loop was
     56 # observed to outperform 64-bit one by almost 50%]. Adding two movzb to
     57 # cloop1 boosts its performance by 80%! This loop appears to be optimal
     58 # fit for Core2 and therefore the code was modified to skip cloop8 on
     59 # this CPU.
     60 
     61 $output=shift;
     62 
     63 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
     64 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
     65 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
     66 die "can't locate x86_64-xlate.pl";
     67 
     68 open STDOUT,"| $^X $xlate $output";
     69 
     70 $dat="%rdi";	    # arg1
     71 $len="%rsi";	    # arg2
     72 $inp="%rdx";	    # arg3
     73 $out="%rcx";	    # arg4
     74 
     75 @XX=("%r8","%r10");
     76 @TX=("%r9","%r11");
     77 $YY="%r12";
     78 $TY="%r13";
     79 
     80 $code=<<___;
     81 .text
     82 
     83 .globl	RC4
     84 .type	RC4,\@function,4
     85 .align	16
     86 RC4:	or	$len,$len
     87 	jne	.Lentry
     88 	ret
     89 .Lentry:
     90 	push	%r12
     91 	push	%r13
     92 
     93 	add	\$8,$dat
     94 	movl	-8($dat),$XX[0]#d
     95 	movl	-4($dat),$YY#d
     96 	cmpl	\$-1,256($dat)
     97 	je	.LRC4_CHAR
     98 	inc	$XX[0]#b
     99 	movl	($dat,$XX[0],4),$TX[0]#d
    100 	test	\$-8,$len
    101 	jz	.Lloop1
    102 	jmp	.Lloop8
    103 .align	16
    104 .Lloop8:
    105 ___
    106 for ($i=0;$i<8;$i++) {
    107 $code.=<<___;
    108 	add	$TX[0]#b,$YY#b
    109 	mov	$XX[0],$XX[1]
    110 	movl	($dat,$YY,4),$TY#d
    111 	ror	\$8,%rax			# ror is redundant when $i=0
    112 	inc	$XX[1]#b
    113 	movl	($dat,$XX[1],4),$TX[1]#d
    114 	cmp	$XX[1],$YY
    115 	movl	$TX[0]#d,($dat,$YY,4)
    116 	cmove	$TX[0],$TX[1]
    117 	movl	$TY#d,($dat,$XX[0],4)
    118 	add	$TX[0]#b,$TY#b
    119 	movb	($dat,$TY,4),%al
    120 ___
    121 push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
    122 }
    123 $code.=<<___;
    124 	ror	\$8,%rax
    125 	sub	\$8,$len
    126 
    127 	xor	($inp),%rax
    128 	add	\$8,$inp
    129 	mov	%rax,($out)
    130 	add	\$8,$out
    131 
    132 	test	\$-8,$len
    133 	jnz	.Lloop8
    134 	cmp	\$0,$len
    135 	jne	.Lloop1
    136 ___
    137 $code.=<<___;
    138 .Lexit:
    139 	sub	\$1,$XX[0]#b
    140 	movl	$XX[0]#d,-8($dat)
    141 	movl	$YY#d,-4($dat)
    142 
    143 	pop	%r13
    144 	pop	%r12
    145 	ret
    146 .align	16
    147 .Lloop1:
    148 	add	$TX[0]#b,$YY#b
    149 	movl	($dat,$YY,4),$TY#d
    150 	movl	$TX[0]#d,($dat,$YY,4)
    151 	movl	$TY#d,($dat,$XX[0],4)
    152 	add	$TY#b,$TX[0]#b
    153 	inc	$XX[0]#b
    154 	movl	($dat,$TX[0],4),$TY#d
    155 	movl	($dat,$XX[0],4),$TX[0]#d
    156 	xorb	($inp),$TY#b
    157 	inc	$inp
    158 	movb	$TY#b,($out)
    159 	inc	$out
    160 	dec	$len
    161 	jnz	.Lloop1
    162 	jmp	.Lexit
    163 
    164 .align	16
    165 .LRC4_CHAR:
    166 	add	\$1,$XX[0]#b
    167 	movzb	($dat,$XX[0]),$TX[0]#d
    168 	test	\$-8,$len
    169 	jz	.Lcloop1
    170 	cmp	\$0,260($dat)
    171 	jnz	.Lcloop1
    172 	push	%rbx
    173 	jmp	.Lcloop8
    174 .align	16
    175 .Lcloop8:
    176 	mov	($inp),%eax
    177 	mov	4($inp),%ebx
    178 ___
    179 # unroll 2x4-wise, because 64-bit rotates kill Intel P4...
    180 for ($i=0;$i<4;$i++) {
    181 $code.=<<___;
    182 	add	$TX[0]#b,$YY#b
    183 	lea	1($XX[0]),$XX[1]
    184 	movzb	($dat,$YY),$TY#d
    185 	movzb	$XX[1]#b,$XX[1]#d
    186 	movzb	($dat,$XX[1]),$TX[1]#d
    187 	movb	$TX[0]#b,($dat,$YY)
    188 	cmp	$XX[1],$YY
    189 	movb	$TY#b,($dat,$XX[0])
    190 	jne	.Lcmov$i			# Intel cmov is sloooow...
    191 	mov	$TX[0],$TX[1]
    192 .Lcmov$i:
    193 	add	$TX[0]#b,$TY#b
    194 	xor	($dat,$TY),%al
    195 	ror	\$8,%eax
    196 ___
    197 push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
    198 }
    199 for ($i=4;$i<8;$i++) {
    200 $code.=<<___;
    201 	add	$TX[0]#b,$YY#b
    202 	lea	1($XX[0]),$XX[1]
    203 	movzb	($dat,$YY),$TY#d
    204 	movzb	$XX[1]#b,$XX[1]#d
    205 	movzb	($dat,$XX[1]),$TX[1]#d
    206 	movb	$TX[0]#b,($dat,$YY)
    207 	cmp	$XX[1],$YY
    208 	movb	$TY#b,($dat,$XX[0])
    209 	jne	.Lcmov$i			# Intel cmov is sloooow...
    210 	mov	$TX[0],$TX[1]
    211 .Lcmov$i:
    212 	add	$TX[0]#b,$TY#b
    213 	xor	($dat,$TY),%bl
    214 	ror	\$8,%ebx
    215 ___
    216 push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
    217 }
    218 $code.=<<___;
    219 	lea	-8($len),$len
    220 	mov	%eax,($out)
    221 	lea	8($inp),$inp
    222 	mov	%ebx,4($out)
    223 	lea	8($out),$out
    224 
    225 	test	\$-8,$len
    226 	jnz	.Lcloop8
    227 	pop	%rbx
    228 	cmp	\$0,$len
    229 	jne	.Lcloop1
    230 	jmp	.Lexit
    231 ___
    232 $code.=<<___;
    233 .align	16
    234 .Lcloop1:
    235 	add	$TX[0]#b,$YY#b
    236 	movzb	($dat,$YY),$TY#d
    237 	movb	$TX[0]#b,($dat,$YY)
    238 	movb	$TY#b,($dat,$XX[0])
    239 	add	$TX[0]#b,$TY#b
    240 	add	\$1,$XX[0]#b
    241 	movzb	$TY#b,$TY#d
    242 	movzb	$XX[0]#b,$XX[0]#d
    243 	movzb	($dat,$TY),$TY#d
    244 	movzb	($dat,$XX[0]),$TX[0]#d
    245 	xorb	($inp),$TY#b
    246 	lea	1($inp),$inp
    247 	movb	$TY#b,($out)
    248 	lea	1($out),$out
    249 	sub	\$1,$len
    250 	jnz	.Lcloop1
    251 	jmp	.Lexit
    252 .size	RC4,.-RC4
    253 ___
    254 
    255 $idx="%r8";
    256 $ido="%r9";
    257 
    258 $code.=<<___;
    259 .extern	OPENSSL_ia32cap_P
    260 .globl	RC4_set_key
    261 .type	RC4_set_key,\@function,3
    262 .align	16
    263 RC4_set_key:
    264 	lea	8($dat),$dat
    265 	lea	($inp,$len),$inp
    266 	neg	$len
    267 	mov	$len,%rcx
    268 	xor	%eax,%eax
    269 	xor	$ido,$ido
    270 	xor	%r10,%r10
    271 	xor	%r11,%r11
    272 
    273 	mov	OPENSSL_ia32cap_P(%rip),$idx#d
    274 	bt	\$20,$idx#d
    275 	jnc	.Lw1stloop
    276 	bt	\$30,$idx#d
    277 	setc	$ido#b
    278 	mov	$ido#d,260($dat)
    279 	jmp	.Lc1stloop
    280 
    281 .align	16
    282 .Lw1stloop:
    283 	mov	%eax,($dat,%rax,4)
    284 	add	\$1,%al
    285 	jnc	.Lw1stloop
    286 
    287 	xor	$ido,$ido
    288 	xor	$idx,$idx
    289 .align	16
    290 .Lw2ndloop:
    291 	mov	($dat,$ido,4),%r10d
    292 	add	($inp,$len,1),$idx#b
    293 	add	%r10b,$idx#b
    294 	add	\$1,$len
    295 	mov	($dat,$idx,4),%r11d
    296 	cmovz	%rcx,$len
    297 	mov	%r10d,($dat,$idx,4)
    298 	mov	%r11d,($dat,$ido,4)
    299 	add	\$1,$ido#b
    300 	jnc	.Lw2ndloop
    301 	jmp	.Lexit_key
    302 
    303 .align	16
    304 .Lc1stloop:
    305 	mov	%al,($dat,%rax)
    306 	add	\$1,%al
    307 	jnc	.Lc1stloop
    308 
    309 	xor	$ido,$ido
    310 	xor	$idx,$idx
    311 .align	16
    312 .Lc2ndloop:
    313 	mov	($dat,$ido),%r10b
    314 	add	($inp,$len),$idx#b
    315 	add	%r10b,$idx#b
    316 	add	\$1,$len
    317 	mov	($dat,$idx),%r11b
    318 	jnz	.Lcnowrap
    319 	mov	%rcx,$len
    320 .Lcnowrap:
    321 	mov	%r10b,($dat,$idx)
    322 	mov	%r11b,($dat,$ido)
    323 	add	\$1,$ido#b
    324 	jnc	.Lc2ndloop
    325 	movl	\$-1,256($dat)
    326 
    327 .align	16
    328 .Lexit_key:
    329 	xor	%eax,%eax
    330 	mov	%eax,-8($dat)
    331 	mov	%eax,-4($dat)
    332 	ret
    333 .size	RC4_set_key,.-RC4_set_key
    334 
    335 .globl	RC4_options
    336 .type	RC4_options,\@function,0
    337 .align	16
    338 RC4_options:
    339 	.picmeup %rax
    340 	lea	.Lopts-.(%rax),%rax
    341 	mov	OPENSSL_ia32cap_P(%rip),%edx
    342 	bt	\$20,%edx
    343 	jnc	.Ldone
    344 	add	\$12,%rax
    345 	bt	\$30,%edx
    346 	jnc	.Ldone
    347 	add	\$13,%rax
    348 .Ldone:
    349 	ret
    350 .align	64
    351 .Lopts:
    352 .asciz	"rc4(8x,int)"
    353 .asciz	"rc4(8x,char)"
    354 .asciz	"rc4(1x,char)"
    355 .asciz	"RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
    356 .align	64
    357 .size	RC4_options,.-RC4_options
    358 ___
    359 
    360 $code =~ s/#([bwd])/$1/gm;
    361 
    362 $code =~ s/RC4_set_key/private_RC4_set_key/g if ($ENV{FIPSCANLIB} ne "");
    363 
    364 print $code;
    365 
    366 close STDOUT;
    367