/external/chromium_org/third_party/openssl/openssl/crypto/modes/asm/ |
ghash-s390x.pl | 71 $rem_4bit="%r14"; 111 larl $rem_4bit,rem_4bit 150 larl $rem_4bit,rem_4bit 199 xg $Zhi,0($rem0,$rem_4bit) 212 xg $Zhi,0($rem1,$rem_4bit) 224 xg $Zhi,0($rem0,$rem_4bit) 234 xg $Zhi,0($rem1,$rem_4bit) 236 lg $tmp,0($xi,$rem_4bit) 250 rem_4bit: label [all...] |
ghash-sparcv9.pl | 59 $rem_4bit="%l4"; 73 rem_4bit: label 78 .type rem_4bit,#object 79 .size rem_4bit,(.-rem_4bit) 92 add %o7,rem_4bit-1b,$rem_4bit 108 ldx [$rem_4bit+$remi],$rem 130 ldx [$rem_4bit+$remi],$rem 145 ldx [$rem_4bit+$remi],$re [all...] |
ghash-parisc.pl | 62 $rem_4bit="%r28"; 107 blr %r0,$rem_4bit 110 andcm $rem_4bit,$rem,$rem_4bit 112 ldo L\$rem_4bit-L\$pic_gmult($rem_4bit),$rem_4bit 146 ldd $rem($rem_4bit),$rem 162 ldd $rem($rem_4bit),$rem 175 ldd $rem($rem_4bit),$re [all...] |
ghash-alpha.pl | 45 $rem_4bit="AT"; # $28 69 s8addq $remp,$rem_4bit,$remp 97 s8addq $remp,$rem_4bit,$remp 115 s8addq $remp,$rem_4bit,$remp 139 s8addq $remp,$rem_4bit,$remp 156 s8addq $remp,$rem_4bit,$remp 181 s8addq $remp,$rem_4bit,$remp 199 s8addq $remp,$rem_4bit,$remp 222 s8addq $remp,$rem_4bit,$remp 235 s8addq $remp,$rem_4bit,$rem 439 rem_4bit: label [all...] |
ghash-armv4.pl | 81 $rem_4bit=$inp; # used in gcm_gmult_4bit 115 .type rem_4bit,%object 117 rem_4bit: label 122 .size rem_4bit,.-rem_4bit 126 sub $rem_4bit,pc,#8 127 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit 138 sub r12,r12,#48 @ &rem_4bit [all...] |
ghash-armv4.S | 6 .type rem_4bit,%object 8 rem_4bit: label 13 .size rem_4bit,.-rem_4bit 18 sub r2,r2,#32 @ &rem_4bit 29 sub r12,r12,#48 @ &rem_4bit 31 ldmia r12,{r4-r11} @ copy rem_4bit ... 51 ldrh r8,[sp,r14] @ rem_4bit[rem] 74 ldrh r8,[sp,r12] @ rem_4bit[rem] 82 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem [all...] |
ghash-x86_64.pl | 62 $rem_4bit = "%r11"; 113 xor ($rem_4bit,$rem,8),$Zhi 128 xor ($rem_4bit,$rem,8),$Zhi 143 xor ($rem_4bit,$rem,8),$Zhi 155 xor ($rem_4bit,$rem,8),$Zhi 175 lea .Lrem_4bit(%rip),$rem_4bit 193 $rem_8bit=$rem_4bit;
|
ghash-x86.pl | 335 &static_label("rem_4bit"); 339 $S=12; # shift factor for rem_4bit 347 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'. 350 # Reference to rem_4bit is scheduled so late that I had to >>4 351 # rem_4bit elements. This resulted in 20-45% procent improvement 355 my $rem_4bit = "eax"; 384 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28); 394 &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem] 401 &shl ($inp,4); # compensate for rem_4bit[i] being >> [all...] |
ghash-ia64.pl | 53 # &rem_4bit[Zlo&0xf]. It works, because rem_4bit is aligned at 128 132 add rem_4bitp=rem_4bit#-gcm_gmult_4bit#,rem_4bitp 414 .type rem_4bit#,\@object 415 rem_4bit: label 420 .size rem_4bit#,128
|
/external/openssl/crypto/modes/asm/ |
ghash-s390x.pl | 71 $rem_4bit="%r14"; 111 larl $rem_4bit,rem_4bit 150 larl $rem_4bit,rem_4bit 199 xg $Zhi,0($rem0,$rem_4bit) 212 xg $Zhi,0($rem1,$rem_4bit) 224 xg $Zhi,0($rem0,$rem_4bit) 234 xg $Zhi,0($rem1,$rem_4bit) 236 lg $tmp,0($xi,$rem_4bit) 250 rem_4bit: label [all...] |
ghash-sparcv9.pl | 59 $rem_4bit="%l4"; 73 rem_4bit: label 78 .type rem_4bit,#object 79 .size rem_4bit,(.-rem_4bit) 92 add %o7,rem_4bit-1b,$rem_4bit 108 ldx [$rem_4bit+$remi],$rem 130 ldx [$rem_4bit+$remi],$rem 145 ldx [$rem_4bit+$remi],$re [all...] |
ghash-parisc.pl | 62 $rem_4bit="%r28"; 107 blr %r0,$rem_4bit 110 andcm $rem_4bit,$rem,$rem_4bit 112 ldo L\$rem_4bit-L\$pic_gmult($rem_4bit),$rem_4bit 146 ldd $rem($rem_4bit),$rem 162 ldd $rem($rem_4bit),$rem 175 ldd $rem($rem_4bit),$re [all...] |
ghash-armv4.pl | 95 $rem_4bit=$inp; # used in gcm_gmult_4bit 129 .type rem_4bit,%object 131 rem_4bit: label 136 .size rem_4bit,.-rem_4bit 140 sub $rem_4bit,pc,#8 141 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit 152 sub r12,r12,#48 @ &rem_4bit [all...] |
ghash-alpha.pl | 45 $rem_4bit="AT"; # $28 69 s8addq $remp,$rem_4bit,$remp 97 s8addq $remp,$rem_4bit,$remp 115 s8addq $remp,$rem_4bit,$remp 139 s8addq $remp,$rem_4bit,$remp 156 s8addq $remp,$rem_4bit,$remp 181 s8addq $remp,$rem_4bit,$remp 199 s8addq $remp,$rem_4bit,$remp 222 s8addq $remp,$rem_4bit,$remp 235 s8addq $remp,$rem_4bit,$rem 448 rem_4bit: label [all...] |
ghash-armv4.S | 6 .type rem_4bit,%object 8 rem_4bit: label 13 .size rem_4bit,.-rem_4bit 18 sub r2,r2,#32 @ &rem_4bit 29 sub r12,r12,#48 @ &rem_4bit 31 ldmia r12,{r4-r11} @ copy rem_4bit ... 51 ldrh r8,[sp,r14] @ rem_4bit[rem] 74 ldrh r8,[sp,r12] @ rem_4bit[rem] 82 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem [all...] |
ghash-x86_64.pl | 62 $rem_4bit = "%r11"; 113 xor ($rem_4bit,$rem,8),$Zhi 128 xor ($rem_4bit,$rem,8),$Zhi 143 xor ($rem_4bit,$rem,8),$Zhi 155 xor ($rem_4bit,$rem,8),$Zhi 175 lea .Lrem_4bit(%rip),$rem_4bit 193 $rem_8bit=$rem_4bit;
|
ghash-x86.pl | 335 &static_label("rem_4bit"); 339 $S=12; # shift factor for rem_4bit 347 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'. 350 # Reference to rem_4bit is scheduled so late that I had to >>4 351 # rem_4bit elements. This resulted in 20-45% procent improvement 355 my $rem_4bit = "eax"; 384 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28); 394 &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem] 401 &shl ($inp,4); # compensate for rem_4bit[i] being >> [all...] |
ghash-ia64.pl | 53 # &rem_4bit[Zlo&0xf]. It works, because rem_4bit is aligned at 128 132 add rem_4bitp=rem_4bit#-gcm_gmult_4bit#,rem_4bitp 414 .type rem_4bit#,\@object 415 rem_4bit: label 420 .size rem_4bit#,128
|
/external/chromium_org/third_party/openssl/openssl/crypto/modes/ |
gcm128.c | 329 static const size_t rem_4bit[16] = { variable 354 Z.hi ^= rem_4bit[rem]; 356 Z.hi ^= (u64)rem_4bit[rem]<<32; 371 Z.hi ^= rem_4bit[rem]; 373 Z.hi ^= (u64)rem_4bit[rem]<<32; 430 Z.hi ^= rem_4bit[rem]; 432 Z.hi ^= (u64)rem_4bit[rem]<<32; 448 Z.hi ^= rem_4bit[rem]; 450 Z.hi ^= (u64)rem_4bit[rem]<<32; [all...] |
/external/openssl/crypto/modes/ |
gcm128.c | 329 static const size_t rem_4bit[16] = { variable 354 Z.hi ^= rem_4bit[rem]; 356 Z.hi ^= (u64)rem_4bit[rem]<<32; 371 Z.hi ^= rem_4bit[rem]; 373 Z.hi ^= (u64)rem_4bit[rem]<<32; 430 Z.hi ^= rem_4bit[rem]; 432 Z.hi ^= (u64)rem_4bit[rem]<<32; 448 Z.hi ^= rem_4bit[rem]; 450 Z.hi ^= (u64)rem_4bit[rem]<<32; [all...] |