1 #!/usr/bin/env perl 2 # 3 # ==================================================================== 4 # Written by Andy Polyakov <appro (at] openssl.org> for the OpenSSL 5 # project. The module is, however, dual licensed under OpenSSL and 6 # CRYPTOGAMS licenses depending on where you obtain it. For further 7 # details see http://www.openssl.org/~appro/cryptogams/. 8 # ==================================================================== 9 # 10 # May 2011 11 # 12 # The module implements bn_GF2m_mul_2x2 polynomial multiplication 13 # used in bn_gf2m.c. It's kind of low-hanging mechanical port from 14 # C for the time being... Except that it has two code paths: pure 15 # integer code suitable for any ARMv4 and later CPU and NEON code 16 # suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs 17 # in ~45 cycles on dual-issue core such as Cortex A8, which is ~50% 18 # faster than compiler-generated code. For ECDH and ECDSA verify (but 19 # not for ECDSA sign) it means 25%-45% improvement depending on key 20 # length, more for longer keys. Even though NEON 1x1 multiplication 21 # runs in even less cycles, ~30, improvement is measurable only on 22 # longer keys. One has to optimize code elsewhere to get NEON glow... 23 # 24 # April 2014 25 # 26 # Double bn_GF2m_mul_2x2 performance by using algorithm from paper 27 # referred below, which improves ECDH and ECDSA verify benchmarks 28 # by 18-40%. 29 # 30 # Cmara, D.; Gouva, C. P. L.; Lpez, J. & Dahab, R.: Fast Software 31 # Polynomial Multiplication on ARM Processors using the NEON Engine. 32 # 33 # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf 34 35 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} 36 open STDOUT,">$output"; 37 38 $code=<<___; 39 #include "arm_arch.h" 40 41 .text 42 .code 32 43 44 #if __ARM_ARCH__>=7 45 .fpu neon 46 #endif 47 ___ 48 ################ 49 # private interface to mul_1x1_ialu 50 # 51 $a="r1"; 52 $b="r0"; 53 54 ($a0,$a1,$a2,$a12,$a4,$a14)= 55 ($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12); 56 57 $mask="r12"; 58 59 $code.=<<___; 60 .type mul_1x1_ialu,%function 61 .align 5 62 mul_1x1_ialu: 63 mov $a0,#0 64 bic $a1,$a,#3<<30 @ a1=a&0x3fffffff 65 str $a0,[sp,#0] @ tab[0]=0 66 add $a2,$a1,$a1 @ a2=a1<<1 67 str $a1,[sp,#4] @ tab[1]=a1 68 eor $a12,$a1,$a2 @ a1^a2 69 str $a2,[sp,#8] @ tab[2]=a2 70 mov $a4,$a1,lsl#2 @ a4=a1<<2 71 str $a12,[sp,#12] @ tab[3]=a1^a2 72 eor $a14,$a1,$a4 @ a1^a4 73 str $a4,[sp,#16] @ tab[4]=a4 74 eor $a0,$a2,$a4 @ a2^a4 75 str $a14,[sp,#20] @ tab[5]=a1^a4 76 eor $a12,$a12,$a4 @ a1^a2^a4 77 str $a0,[sp,#24] @ tab[6]=a2^a4 78 and $i0,$mask,$b,lsl#2 79 str $a12,[sp,#28] @ tab[7]=a1^a2^a4 80 81 and $i1,$mask,$b,lsr#1 82 ldr $lo,[sp,$i0] @ tab[b & 0x7] 83 and $i0,$mask,$b,lsr#4 84 ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7] 85 and $i1,$mask,$b,lsr#7 86 ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7] 87 eor $lo,$lo,$t1,lsl#3 @ stall 88 mov $hi,$t1,lsr#29 89 ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7] 90 91 and $i0,$mask,$b,lsr#10 92 eor $lo,$lo,$t0,lsl#6 93 eor $hi,$hi,$t0,lsr#26 94 ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7] 95 96 and $i1,$mask,$b,lsr#13 97 eor $lo,$lo,$t1,lsl#9 98 eor $hi,$hi,$t1,lsr#23 99 ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7] 100 101 and $i0,$mask,$b,lsr#16 102 eor $lo,$lo,$t0,lsl#12 103 eor $hi,$hi,$t0,lsr#20 104 ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7] 105 106 and $i1,$mask,$b,lsr#19 107 eor $lo,$lo,$t1,lsl#15 108 eor $hi,$hi,$t1,lsr#17 109 ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7] 110 111 and $i0,$mask,$b,lsr#22 112 eor $lo,$lo,$t0,lsl#18 113 eor $hi,$hi,$t0,lsr#14 114 ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7] 115 116 and $i1,$mask,$b,lsr#25 117 eor $lo,$lo,$t1,lsl#21 118 eor $hi,$hi,$t1,lsr#11 119 ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7] 120 121 tst $a,#1<<30 122 and $i0,$mask,$b,lsr#28 123 eor $lo,$lo,$t0,lsl#24 124 eor $hi,$hi,$t0,lsr#8 125 ldr $t0,[sp,$i0] @ tab[b >> 30 ] 126 127 eorne $lo,$lo,$b,lsl#30 128 eorne $hi,$hi,$b,lsr#2 129 tst $a,#1<<31 130 eor $lo,$lo,$t1,lsl#27 131 eor $hi,$hi,$t1,lsr#5 132 eorne $lo,$lo,$b,lsl#31 133 eorne $hi,$hi,$b,lsr#1 134 eor $lo,$lo,$t0,lsl#30 135 eor $hi,$hi,$t0,lsr#2 136 137 mov pc,lr 138 .size mul_1x1_ialu,.-mul_1x1_ialu 139 ___ 140 ################ 141 # void bn_GF2m_mul_2x2(BN_ULONG *r, 142 # BN_ULONG a1,BN_ULONG a0, 143 # BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0b1b0 144 { 145 my ($r,$t0,$t1,$t2,$t3)=map("q$_",(0..3,8..12)); 146 my ($a,$b,$k48,$k32,$k16)=map("d$_",(26..31)); 147 148 $code.=<<___; 149 .global bn_GF2m_mul_2x2 150 .type bn_GF2m_mul_2x2,%function 151 .align 5 152 bn_GF2m_mul_2x2: 153 #if __ARM_ARCH__>=7 154 ldr r12,.LOPENSSL_armcap 155 .Lpic: ldr r12,[pc,r12] 156 tst r12,#1 157 beq .Lialu 158 159 ldr r12, [sp] @ 5th argument 160 vmov.32 $a, r2, r1 161 vmov.32 $b, r12, r3 162 vmov.i64 $k48, #0x0000ffffffffffff 163 vmov.i64 $k32, #0x00000000ffffffff 164 vmov.i64 $k16, #0x000000000000ffff 165 166 vext.8 $t0#lo, $a, $a, #1 @ A1 167 vmull.p8 $t0, $t0#lo, $b @ F = A1*B 168 vext.8 $r#lo, $b, $b, #1 @ B1 169 vmull.p8 $r, $a, $r#lo @ E = A*B1 170 vext.8 $t1#lo, $a, $a, #2 @ A2 171 vmull.p8 $t1, $t1#lo, $b @ H = A2*B 172 vext.8 $t3#lo, $b, $b, #2 @ B2 173 vmull.p8 $t3, $a, $t3#lo @ G = A*B2 174 vext.8 $t2#lo, $a, $a, #3 @ A3 175 veor $t0, $t0, $r @ L = E + F 176 vmull.p8 $t2, $t2#lo, $b @ J = A3*B 177 vext.8 $r#lo, $b, $b, #3 @ B3 178 veor $t1, $t1, $t3 @ M = G + H 179 vmull.p8 $r, $a, $r#lo @ I = A*B3 180 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8 181 vand $t0#hi, $t0#hi, $k48 182 vext.8 $t3#lo, $b, $b, #4 @ B4 183 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16 184 vand $t1#hi, $t1#hi, $k32 185 vmull.p8 $t3, $a, $t3#lo @ K = A*B4 186 veor $t2, $t2, $r @ N = I + J 187 veor $t0#lo, $t0#lo, $t0#hi 188 veor $t1#lo, $t1#lo, $t1#hi 189 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24 190 vand $t2#hi, $t2#hi, $k16 191 vext.8 $t0, $t0, $t0, #15 192 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32 193 vmov.i64 $t3#hi, #0 194 vext.8 $t1, $t1, $t1, #14 195 veor $t2#lo, $t2#lo, $t2#hi 196 vmull.p8 $r, $a, $b @ D = A*B 197 vext.8 $t3, $t3, $t3, #12 198 vext.8 $t2, $t2, $t2, #13 199 veor $t0, $t0, $t1 200 veor $t2, $t2, $t3 201 veor $r, $r, $t0 202 veor $r, $r, $t2 203 204 vst1.32 {$r}, [r0] 205 ret @ bx lr 206 .align 4 207 .Lialu: 208 #endif 209 ___ 210 } 211 $ret="r10"; # reassigned 1st argument 212 $code.=<<___; 213 stmdb sp!,{r4-r10,lr} 214 mov $ret,r0 @ reassign 1st argument 215 mov $b,r3 @ $b=b1 216 ldr r3,[sp,#32] @ load b0 217 mov $mask,#7<<2 218 sub sp,sp,#32 @ allocate tab[8] 219 220 bl mul_1x1_ialu @ a1b1 221 str $lo,[$ret,#8] 222 str $hi,[$ret,#12] 223 224 eor $b,$b,r3 @ flip b0 and b1 225 eor $a,$a,r2 @ flip a0 and a1 226 eor r3,r3,$b 227 eor r2,r2,$a 228 eor $b,$b,r3 229 eor $a,$a,r2 230 bl mul_1x1_ialu @ a0b0 231 str $lo,[$ret] 232 str $hi,[$ret,#4] 233 234 eor $a,$a,r2 235 eor $b,$b,r3 236 bl mul_1x1_ialu @ (a1+a0)(b1+b0) 237 ___ 238 @r=map("r$_",(6..9)); 239 $code.=<<___; 240 ldmia $ret,{@r[0]-@r[3]} 241 eor $lo,$lo,$hi 242 eor $hi,$hi,@r[1] 243 eor $lo,$lo,@r[0] 244 eor $hi,$hi,@r[2] 245 eor $lo,$lo,@r[3] 246 eor $hi,$hi,@r[3] 247 str $hi,[$ret,#8] 248 eor $lo,$lo,$hi 249 add sp,sp,#32 @ destroy tab[8] 250 str $lo,[$ret,#4] 251 252 #if __ARM_ARCH__>=5 253 ldmia sp!,{r4-r10,pc} 254 #else 255 ldmia sp!,{r4-r10,lr} 256 tst lr,#1 257 moveq pc,lr @ be binary compatible with V4, yet 258 bx lr @ interoperable with Thumb ISA:-) 259 #endif 260 .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 261 #if __ARM_ARCH__>=7 262 .align 5 263 .LOPENSSL_armcap: 264 .word OPENSSL_armcap_P-(.Lpic+8) 265 #endif 266 .asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" 267 .align 5 268 269 .comm OPENSSL_armcap_P,4,4 270 ___ 271 272 foreach (split("\n",$code)) { 273 s/\`([^\`]*)\`/eval $1/geo; 274 275 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or 276 s/\bret\b/bx lr/go or 277 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 278 279 print $_,"\n"; 280 } 281 close STDOUT; # enforce flush 282