1 #!/usr/bin/env perl 2 # 3 # ==================================================================== 4 # Written by Andy Polyakov <appro (at] openssl.org> for the OpenSSL 5 # project. The module is, however, dual licensed under OpenSSL and 6 # CRYPTOGAMS licenses depending on where you obtain it. For further 7 # details see http://www.openssl.org/~appro/cryptogams/. 8 # ==================================================================== 9 # 10 # April 2010 11 # 12 # The module implements "4-bit" GCM GHASH function and underlying 13 # single multiplication operation in GF(2^128). "4-bit" means that it 14 # uses 256 bytes per-key table [+32 bytes shared table]. There is no 15 # experimental performance data available yet. The only approximation 16 # that can be made at this point is based on code size. Inner loop is 17 # 32 instructions long and on single-issue core should execute in <40 18 # cycles. Having verified that gcc 3.4 didn't unroll corresponding 19 # loop, this assembler loop body was found to be ~3x smaller than 20 # compiler-generated one... 21 # 22 # July 2010 23 # 24 # Rescheduling for dual-issue pipeline resulted in 8.5% improvement on 25 # Cortex A8 core and ~25 cycles per processed byte (which was observed 26 # to be ~3 times faster than gcc-generated code:-) 27 # 28 # February 2011 29 # 30 # Profiler-assisted and platform-specific optimization resulted in 7% 31 # improvement on Cortex A8 core and ~23.5 cycles per byte. 32 # 33 # March 2011 34 # 35 # Add NEON implementation featuring polynomial multiplication, i.e. no 36 # lookup tables involved. On Cortex A8 it was measured to process one 37 # byte in 15 cycles or 55% faster than integer-only code. 38 # 39 # April 2014 40 # 41 # Switch to multiplication algorithm suggested in paper referred 42 # below and combine it with reduction algorithm from x86 module. 43 # Performance improvement over previous version varies from 65% on 44 # Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8 45 # processes one byte in 8.45 cycles, A9 - in 10.2, Snapdragon S4 - 46 # in 9.33. 47 # 48 # Cmara, D.; Gouva, C. P. L.; Lpez, J. & Dahab, R.: Fast Software 49 # Polynomial Multiplication on ARM Processors using the NEON Engine. 50 # 51 # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf 52 53 # ==================================================================== 54 # Note about "528B" variant. In ARM case it makes lesser sense to 55 # implement it for following reasons: 56 # 57 # - performance improvement won't be anywhere near 50%, because 128- 58 # bit shift operation is neatly fused with 128-bit xor here, and 59 # "538B" variant would eliminate only 4-5 instructions out of 32 60 # in the inner loop (meaning that estimated improvement is ~15%); 61 # - ARM-based systems are often embedded ones and extra memory 62 # consumption might be unappreciated (for so little improvement); 63 # 64 # Byte order [in]dependence. ========================================= 65 # 66 # Caller is expected to maintain specific *dword* order in Htable, 67 # namely with *least* significant dword of 128-bit value at *lower* 68 # address. This differs completely from C code and has everything to 69 # do with ldm instruction and order in which dwords are "consumed" by 70 # algorithm. *Byte* order within these dwords in turn is whatever 71 # *native* byte order on current platform. See gcm128.c for working 72 # example... 73 74 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} 75 open STDOUT,">$output"; 76 77 $Xi="r0"; # argument block 78 $Htbl="r1"; 79 $inp="r2"; 80 $len="r3"; 81 82 $Zll="r4"; # variables 83 $Zlh="r5"; 84 $Zhl="r6"; 85 $Zhh="r7"; 86 $Tll="r8"; 87 $Tlh="r9"; 88 $Thl="r10"; 89 $Thh="r11"; 90 $nlo="r12"; 91 ################# r13 is stack pointer 92 $nhi="r14"; 93 ################# r15 is program counter 94 95 $rem_4bit=$inp; # used in gcm_gmult_4bit 96 $cnt=$len; 97 98 sub Zsmash() { 99 my $i=12; 100 my @args=@_; 101 for ($Zll,$Zlh,$Zhl,$Zhh) { 102 $code.=<<___; 103 #if __ARM_ARCH__>=7 && defined(__ARMEL__) 104 rev $_,$_ 105 str $_,[$Xi,#$i] 106 #elif defined(__ARMEB__) 107 str $_,[$Xi,#$i] 108 #else 109 mov $Tlh,$_,lsr#8 110 strb $_,[$Xi,#$i+3] 111 mov $Thl,$_,lsr#16 112 strb $Tlh,[$Xi,#$i+2] 113 mov $Thh,$_,lsr#24 114 strb $Thl,[$Xi,#$i+1] 115 strb $Thh,[$Xi,#$i] 116 #endif 117 ___ 118 $code.="\t".shift(@args)."\n"; 119 $i-=4; 120 } 121 } 122 123 $code=<<___; 124 #if defined(__arm__) 125 #include "arm_arch.h" 126 127 .syntax unified 128 129 .text 130 .code 32 131 132 .type rem_4bit,%object 133 .align 5 134 rem_4bit: 135 .short 0x0000,0x1C20,0x3840,0x2460 136 .short 0x7080,0x6CA0,0x48C0,0x54E0 137 .short 0xE100,0xFD20,0xD940,0xC560 138 .short 0x9180,0x8DA0,0xA9C0,0xB5E0 139 .size rem_4bit,.-rem_4bit 140 141 .type rem_4bit_get,%function 142 rem_4bit_get: 143 sub $rem_4bit,pc,#8 144 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit 145 b .Lrem_4bit_got 146 nop 147 .size rem_4bit_get,.-rem_4bit_get 148 149 .global gcm_ghash_4bit 150 .hidden gcm_ghash_4bit 151 .type gcm_ghash_4bit,%function 152 gcm_ghash_4bit: 153 sub r12,pc,#8 154 add $len,$inp,$len @ $len to point at the end 155 stmdb sp!,{r3-r11,lr} @ save $len/end too 156 sub r12,r12,#48 @ &rem_4bit 157 158 ldmia r12,{r4-r11} @ copy rem_4bit ... 159 stmdb sp!,{r4-r11} @ ... to stack 160 161 ldrb $nlo,[$inp,#15] 162 ldrb $nhi,[$Xi,#15] 163 .Louter: 164 eor $nlo,$nlo,$nhi 165 and $nhi,$nlo,#0xf0 166 and $nlo,$nlo,#0x0f 167 mov $cnt,#14 168 169 add $Zhh,$Htbl,$nlo,lsl#4 170 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo] 171 add $Thh,$Htbl,$nhi 172 ldrb $nlo,[$inp,#14] 173 174 and $nhi,$Zll,#0xf @ rem 175 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 176 add $nhi,$nhi,$nhi 177 eor $Zll,$Tll,$Zll,lsr#4 178 ldrh $Tll,[sp,$nhi] @ rem_4bit[rem] 179 eor $Zll,$Zll,$Zlh,lsl#28 180 ldrb $nhi,[$Xi,#14] 181 eor $Zlh,$Tlh,$Zlh,lsr#4 182 eor $Zlh,$Zlh,$Zhl,lsl#28 183 eor $Zhl,$Thl,$Zhl,lsr#4 184 eor $Zhl,$Zhl,$Zhh,lsl#28 185 eor $Zhh,$Thh,$Zhh,lsr#4 186 eor $nlo,$nlo,$nhi 187 and $nhi,$nlo,#0xf0 188 and $nlo,$nlo,#0x0f 189 eor $Zhh,$Zhh,$Tll,lsl#16 190 191 .Linner: 192 add $Thh,$Htbl,$nlo,lsl#4 193 and $nlo,$Zll,#0xf @ rem 194 subs $cnt,$cnt,#1 195 add $nlo,$nlo,$nlo 196 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo] 197 eor $Zll,$Tll,$Zll,lsr#4 198 eor $Zll,$Zll,$Zlh,lsl#28 199 eor $Zlh,$Tlh,$Zlh,lsr#4 200 eor $Zlh,$Zlh,$Zhl,lsl#28 201 ldrh $Tll,[sp,$nlo] @ rem_4bit[rem] 202 eor $Zhl,$Thl,$Zhl,lsr#4 203 ldrbpl $nlo,[$inp,$cnt] 204 eor $Zhl,$Zhl,$Zhh,lsl#28 205 eor $Zhh,$Thh,$Zhh,lsr#4 206 207 add $Thh,$Htbl,$nhi 208 and $nhi,$Zll,#0xf @ rem 209 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] 210 add $nhi,$nhi,$nhi 211 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 212 eor $Zll,$Tll,$Zll,lsr#4 213 ldrbpl $Tll,[$Xi,$cnt] 214 eor $Zll,$Zll,$Zlh,lsl#28 215 eor $Zlh,$Tlh,$Zlh,lsr#4 216 ldrh $Tlh,[sp,$nhi] 217 eor $Zlh,$Zlh,$Zhl,lsl#28 218 eor $Zhl,$Thl,$Zhl,lsr#4 219 eor $Zhl,$Zhl,$Zhh,lsl#28 220 eorpl $nlo,$nlo,$Tll 221 eor $Zhh,$Thh,$Zhh,lsr#4 222 andpl $nhi,$nlo,#0xf0 223 andpl $nlo,$nlo,#0x0f 224 eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem] 225 bpl .Linner 226 227 ldr $len,[sp,#32] @ re-load $len/end 228 add $inp,$inp,#16 229 mov $nhi,$Zll 230 ___ 231 &Zsmash("cmp\t$inp,$len","ldrbne\t$nlo,[$inp,#15]"); 232 $code.=<<___; 233 bne .Louter 234 235 add sp,sp,#36 236 #if __ARM_ARCH__>=5 237 ldmia sp!,{r4-r11,pc} 238 #else 239 ldmia sp!,{r4-r11,lr} 240 tst lr,#1 241 moveq pc,lr @ be binary compatible with V4, yet 242 bx lr @ interoperable with Thumb ISA:-) 243 #endif 244 .size gcm_ghash_4bit,.-gcm_ghash_4bit 245 246 .global gcm_gmult_4bit 247 .hidden gcm_gmult_4bit 248 .type gcm_gmult_4bit,%function 249 gcm_gmult_4bit: 250 stmdb sp!,{r4-r11,lr} 251 ldrb $nlo,[$Xi,#15] 252 b rem_4bit_get 253 .Lrem_4bit_got: 254 and $nhi,$nlo,#0xf0 255 and $nlo,$nlo,#0x0f 256 mov $cnt,#14 257 258 add $Zhh,$Htbl,$nlo,lsl#4 259 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo] 260 ldrb $nlo,[$Xi,#14] 261 262 add $Thh,$Htbl,$nhi 263 and $nhi,$Zll,#0xf @ rem 264 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 265 add $nhi,$nhi,$nhi 266 eor $Zll,$Tll,$Zll,lsr#4 267 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem] 268 eor $Zll,$Zll,$Zlh,lsl#28 269 eor $Zlh,$Tlh,$Zlh,lsr#4 270 eor $Zlh,$Zlh,$Zhl,lsl#28 271 eor $Zhl,$Thl,$Zhl,lsr#4 272 eor $Zhl,$Zhl,$Zhh,lsl#28 273 eor $Zhh,$Thh,$Zhh,lsr#4 274 and $nhi,$nlo,#0xf0 275 eor $Zhh,$Zhh,$Tll,lsl#16 276 and $nlo,$nlo,#0x0f 277 278 .Loop: 279 add $Thh,$Htbl,$nlo,lsl#4 280 and $nlo,$Zll,#0xf @ rem 281 subs $cnt,$cnt,#1 282 add $nlo,$nlo,$nlo 283 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo] 284 eor $Zll,$Tll,$Zll,lsr#4 285 eor $Zll,$Zll,$Zlh,lsl#28 286 eor $Zlh,$Tlh,$Zlh,lsr#4 287 eor $Zlh,$Zlh,$Zhl,lsl#28 288 ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem] 289 eor $Zhl,$Thl,$Zhl,lsr#4 290 ldrbpl $nlo,[$Xi,$cnt] 291 eor $Zhl,$Zhl,$Zhh,lsl#28 292 eor $Zhh,$Thh,$Zhh,lsr#4 293 294 add $Thh,$Htbl,$nhi 295 and $nhi,$Zll,#0xf @ rem 296 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] 297 add $nhi,$nhi,$nhi 298 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 299 eor $Zll,$Tll,$Zll,lsr#4 300 eor $Zll,$Zll,$Zlh,lsl#28 301 eor $Zlh,$Tlh,$Zlh,lsr#4 302 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem] 303 eor $Zlh,$Zlh,$Zhl,lsl#28 304 eor $Zhl,$Thl,$Zhl,lsr#4 305 eor $Zhl,$Zhl,$Zhh,lsl#28 306 eor $Zhh,$Thh,$Zhh,lsr#4 307 andpl $nhi,$nlo,#0xf0 308 andpl $nlo,$nlo,#0x0f 309 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] 310 bpl .Loop 311 ___ 312 &Zsmash(); 313 $code.=<<___; 314 #if __ARM_ARCH__>=5 315 ldmia sp!,{r4-r11,pc} 316 #else 317 ldmia sp!,{r4-r11,lr} 318 tst lr,#1 319 moveq pc,lr @ be binary compatible with V4, yet 320 bx lr @ interoperable with Thumb ISA:-) 321 #endif 322 .size gcm_gmult_4bit,.-gcm_gmult_4bit 323 ___ 324 { 325 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3)); 326 my ($t0,$t1,$t2,$t3)=map("q$_",(8..12)); 327 my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31)); 328 329 sub clmul64x64 { 330 my ($r,$a,$b)=@_; 331 $code.=<<___; 332 vext.8 $t0#lo, $a, $a, #1 @ A1 333 vmull.p8 $t0, $t0#lo, $b @ F = A1*B 334 vext.8 $r#lo, $b, $b, #1 @ B1 335 vmull.p8 $r, $a, $r#lo @ E = A*B1 336 vext.8 $t1#lo, $a, $a, #2 @ A2 337 vmull.p8 $t1, $t1#lo, $b @ H = A2*B 338 vext.8 $t3#lo, $b, $b, #2 @ B2 339 vmull.p8 $t3, $a, $t3#lo @ G = A*B2 340 vext.8 $t2#lo, $a, $a, #3 @ A3 341 veor $t0, $t0, $r @ L = E + F 342 vmull.p8 $t2, $t2#lo, $b @ J = A3*B 343 vext.8 $r#lo, $b, $b, #3 @ B3 344 veor $t1, $t1, $t3 @ M = G + H 345 vmull.p8 $r, $a, $r#lo @ I = A*B3 346 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8 347 vand $t0#hi, $t0#hi, $k48 348 vext.8 $t3#lo, $b, $b, #4 @ B4 349 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16 350 vand $t1#hi, $t1#hi, $k32 351 vmull.p8 $t3, $a, $t3#lo @ K = A*B4 352 veor $t2, $t2, $r @ N = I + J 353 veor $t0#lo, $t0#lo, $t0#hi 354 veor $t1#lo, $t1#lo, $t1#hi 355 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24 356 vand $t2#hi, $t2#hi, $k16 357 vext.8 $t0, $t0, $t0, #15 358 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32 359 vmov.i64 $t3#hi, #0 360 vext.8 $t1, $t1, $t1, #14 361 veor $t2#lo, $t2#lo, $t2#hi 362 vmull.p8 $r, $a, $b @ D = A*B 363 vext.8 $t3, $t3, $t3, #12 364 vext.8 $t2, $t2, $t2, #13 365 veor $t0, $t0, $t1 366 veor $t2, $t2, $t3 367 veor $r, $r, $t0 368 veor $r, $r, $t2 369 ___ 370 } 371 372 $code.=<<___; 373 #if __ARM_ARCH__>=7 374 .fpu neon 375 376 .global gcm_init_neon 377 .hidden gcm_init_neon 378 .type gcm_init_neon,%function 379 .align 4 380 gcm_init_neon: 381 vld1.64 $IN#hi,[r1,:64]! @ load H 382 vmov.i8 $t0,#0xe1 383 vld1.64 $IN#lo,[r1,:64] 384 vshl.i64 $t0#hi,#57 385 vshr.u64 $t0#lo,#63 @ t0=0xc2....01 386 vdup.8 $t1,$IN#hi[7] 387 vshr.u64 $Hlo,$IN#lo,#63 388 vshr.s8 $t1,#7 @ broadcast carry bit 389 vshl.i64 $IN,$IN,#1 390 vand $t0,$t0,$t1 391 vorr $IN#hi,$Hlo @ H<<<=1 392 veor $IN,$IN,$t0 @ twisted H 393 vstmia r0,{$IN} 394 395 bx lr 396 .size gcm_init_neon,.-gcm_init_neon 397 398 .global gcm_gmult_neon 399 .hidden gcm_gmult_neon 400 .type gcm_gmult_neon,%function 401 .align 4 402 gcm_gmult_neon: 403 vld1.64 $IN#hi,[$Xi,:64]! @ load Xi 404 vld1.64 $IN#lo,[$Xi,:64]! 405 vmov.i64 $k48,#0x0000ffffffffffff 406 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H 407 vmov.i64 $k32,#0x00000000ffffffff 408 #ifdef __ARMEL__ 409 vrev64.8 $IN,$IN 410 #endif 411 vmov.i64 $k16,#0x000000000000ffff 412 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing 413 mov $len,#16 414 b .Lgmult_neon 415 .size gcm_gmult_neon,.-gcm_gmult_neon 416 417 .global gcm_ghash_neon 418 .hidden gcm_ghash_neon 419 .type gcm_ghash_neon,%function 420 .align 4 421 gcm_ghash_neon: 422 vld1.64 $Xl#hi,[$Xi,:64]! @ load Xi 423 vld1.64 $Xl#lo,[$Xi,:64]! 424 vmov.i64 $k48,#0x0000ffffffffffff 425 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H 426 vmov.i64 $k32,#0x00000000ffffffff 427 #ifdef __ARMEL__ 428 vrev64.8 $Xl,$Xl 429 #endif 430 vmov.i64 $k16,#0x000000000000ffff 431 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing 432 433 .Loop_neon: 434 vld1.64 $IN#hi,[$inp]! @ load inp 435 vld1.64 $IN#lo,[$inp]! 436 #ifdef __ARMEL__ 437 vrev64.8 $IN,$IN 438 #endif 439 veor $IN,$Xl @ inp^=Xi 440 .Lgmult_neon: 441 ___ 442 &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.loXi.lo 443 $code.=<<___; 444 veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing 445 ___ 446 &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)(Xi.lo+Xi.hi) 447 &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hiXi.hi 448 $code.=<<___; 449 veor $Xm,$Xm,$Xl @ Karatsuba post-processing 450 veor $Xm,$Xm,$Xh 451 veor $Xl#hi,$Xl#hi,$Xm#lo 452 veor $Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result 453 454 @ equivalent of reduction_avx from ghash-x86_64.pl 455 vshl.i64 $t1,$Xl,#57 @ 1st phase 456 vshl.i64 $t2,$Xl,#62 457 veor $t2,$t2,$t1 @ 458 vshl.i64 $t1,$Xl,#63 459 veor $t2, $t2, $t1 @ 460 veor $Xl#hi,$Xl#hi,$t2#lo @ 461 veor $Xh#lo,$Xh#lo,$t2#hi 462 463 vshr.u64 $t2,$Xl,#1 @ 2nd phase 464 veor $Xh,$Xh,$Xl 465 veor $Xl,$Xl,$t2 @ 466 vshr.u64 $t2,$t2,#6 467 vshr.u64 $Xl,$Xl,#1 @ 468 veor $Xl,$Xl,$Xh @ 469 veor $Xl,$Xl,$t2 @ 470 471 subs $len,#16 472 bne .Loop_neon 473 474 #ifdef __ARMEL__ 475 vrev64.8 $Xl,$Xl 476 #endif 477 sub $Xi,#16 478 vst1.64 $Xl#hi,[$Xi,:64]! @ write out Xi 479 vst1.64 $Xl#lo,[$Xi,:64] 480 481 bx lr 482 .size gcm_ghash_neon,.-gcm_ghash_neon 483 #endif 484 ___ 485 } 486 $code.=<<___; 487 .asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" 488 .align 2 489 490 #endif 491 ___ 492 493 foreach (split("\n",$code)) { 494 s/\`([^\`]*)\`/eval $1/geo; 495 496 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or 497 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 498 499 print $_,"\n"; 500 } 501 close STDOUT; # enforce flush 502