Home | History | Annotate | Download | only in fipsmodule
      1 #include <openssl/arm_arch.h>
      2 
      3 @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
      4 @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL
      5 @ instructions are in aesv8-armx.pl.)
      6 
      7 
      8 .text
      9 #if defined(__thumb2__) || defined(__clang__)
     10 .syntax	unified
     11 #endif
     12 #if defined(__thumb2__)
     13 .thumb
     14 #else
     15 .code	32
     16 #endif
     17 
     18 #ifdef  __clang__
     19 #define ldrplb  ldrbpl
     20 #define ldrneb  ldrbne
     21 #endif
     22 
     23 
     24 .align	5
     25 rem_4bit:
     26 .short	0x0000,0x1C20,0x3840,0x2460
     27 .short	0x7080,0x6CA0,0x48C0,0x54E0
     28 .short	0xE100,0xFD20,0xD940,0xC560
     29 .short	0x9180,0x8DA0,0xA9C0,0xB5E0
     30 
     31 
     32 #ifdef __thumb2__
     33 .thumb_func	rem_4bit_get
     34 #endif
     35 rem_4bit_get:
     36 #if defined(__thumb2__)
     37 	adr	r2,rem_4bit
     38 #else
     39 	sub	r2,pc,#8+32	@ &rem_4bit
     40 #endif
     41 	b	Lrem_4bit_got
     42 	nop
     43 	nop
     44 
     45 
     46 .globl	_gcm_ghash_4bit
     47 .private_extern	_gcm_ghash_4bit
     48 #ifdef __thumb2__
     49 .thumb_func	_gcm_ghash_4bit
     50 #endif
     51 .align	4
     52 _gcm_ghash_4bit:
     53 #if defined(__thumb2__)
     54 	adr	r12,rem_4bit
     55 #else
     56 	sub	r12,pc,#8+48		@ &rem_4bit
     57 #endif
     58 	add	r3,r2,r3		@ r3 to point at the end
     59 	stmdb	sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}		@ save r3/end too
     60 
     61 	ldmia	r12,{r4,r5,r6,r7,r8,r9,r10,r11}		@ copy rem_4bit ...
     62 	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11}		@ ... to stack
     63 
     64 	ldrb	r12,[r2,#15]
     65 	ldrb	r14,[r0,#15]
     66 Louter:
     67 	eor	r12,r12,r14
     68 	and	r14,r12,#0xf0
     69 	and	r12,r12,#0x0f
     70 	mov	r3,#14
     71 
     72 	add	r7,r1,r12,lsl#4
     73 	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
     74 	add	r11,r1,r14
     75 	ldrb	r12,[r2,#14]
     76 
     77 	and	r14,r4,#0xf		@ rem
     78 	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
     79 	add	r14,r14,r14
     80 	eor	r4,r8,r4,lsr#4
     81 	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
     82 	eor	r4,r4,r5,lsl#28
     83 	ldrb	r14,[r0,#14]
     84 	eor	r5,r9,r5,lsr#4
     85 	eor	r5,r5,r6,lsl#28
     86 	eor	r6,r10,r6,lsr#4
     87 	eor	r6,r6,r7,lsl#28
     88 	eor	r7,r11,r7,lsr#4
     89 	eor	r12,r12,r14
     90 	and	r14,r12,#0xf0
     91 	and	r12,r12,#0x0f
     92 	eor	r7,r7,r8,lsl#16
     93 
     94 Linner:
     95 	add	r11,r1,r12,lsl#4
     96 	and	r12,r4,#0xf		@ rem
     97 	subs	r3,r3,#1
     98 	add	r12,r12,r12
     99 	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
    100 	eor	r4,r8,r4,lsr#4
    101 	eor	r4,r4,r5,lsl#28
    102 	eor	r5,r9,r5,lsr#4
    103 	eor	r5,r5,r6,lsl#28
    104 	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
    105 	eor	r6,r10,r6,lsr#4
    106 #ifdef	__thumb2__
    107 	it	pl
    108 #endif
    109 	ldrplb	r12,[r2,r3]
    110 	eor	r6,r6,r7,lsl#28
    111 	eor	r7,r11,r7,lsr#4
    112 
    113 	add	r11,r1,r14
    114 	and	r14,r4,#0xf		@ rem
    115 	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
    116 	add	r14,r14,r14
    117 	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
    118 	eor	r4,r8,r4,lsr#4
    119 #ifdef	__thumb2__
    120 	it	pl
    121 #endif
    122 	ldrplb	r8,[r0,r3]
    123 	eor	r4,r4,r5,lsl#28
    124 	eor	r5,r9,r5,lsr#4
    125 	ldrh	r9,[sp,r14]
    126 	eor	r5,r5,r6,lsl#28
    127 	eor	r6,r10,r6,lsr#4
    128 	eor	r6,r6,r7,lsl#28
    129 #ifdef	__thumb2__
    130 	it	pl
    131 #endif
    132 	eorpl	r12,r12,r8
    133 	eor	r7,r11,r7,lsr#4
    134 #ifdef	__thumb2__
    135 	itt	pl
    136 #endif
    137 	andpl	r14,r12,#0xf0
    138 	andpl	r12,r12,#0x0f
    139 	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
    140 	bpl	Linner
    141 
    142 	ldr	r3,[sp,#32]		@ re-load r3/end
    143 	add	r2,r2,#16
    144 	mov	r14,r4
    145 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    146 	rev	r4,r4
    147 	str	r4,[r0,#12]
    148 #elif defined(__ARMEB__)
    149 	str	r4,[r0,#12]
    150 #else
    151 	mov	r9,r4,lsr#8
    152 	strb	r4,[r0,#12+3]
    153 	mov	r10,r4,lsr#16
    154 	strb	r9,[r0,#12+2]
    155 	mov	r11,r4,lsr#24
    156 	strb	r10,[r0,#12+1]
    157 	strb	r11,[r0,#12]
    158 #endif
    159 	cmp	r2,r3
    160 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    161 	rev	r5,r5
    162 	str	r5,[r0,#8]
    163 #elif defined(__ARMEB__)
    164 	str	r5,[r0,#8]
    165 #else
    166 	mov	r9,r5,lsr#8
    167 	strb	r5,[r0,#8+3]
    168 	mov	r10,r5,lsr#16
    169 	strb	r9,[r0,#8+2]
    170 	mov	r11,r5,lsr#24
    171 	strb	r10,[r0,#8+1]
    172 	strb	r11,[r0,#8]
    173 #endif
    174 
    175 #ifdef __thumb2__
    176 	it	ne
    177 #endif
    178 	ldrneb	r12,[r2,#15]
    179 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    180 	rev	r6,r6
    181 	str	r6,[r0,#4]
    182 #elif defined(__ARMEB__)
    183 	str	r6,[r0,#4]
    184 #else
    185 	mov	r9,r6,lsr#8
    186 	strb	r6,[r0,#4+3]
    187 	mov	r10,r6,lsr#16
    188 	strb	r9,[r0,#4+2]
    189 	mov	r11,r6,lsr#24
    190 	strb	r10,[r0,#4+1]
    191 	strb	r11,[r0,#4]
    192 #endif
    193 
    194 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    195 	rev	r7,r7
    196 	str	r7,[r0,#0]
    197 #elif defined(__ARMEB__)
    198 	str	r7,[r0,#0]
    199 #else
    200 	mov	r9,r7,lsr#8
    201 	strb	r7,[r0,#0+3]
    202 	mov	r10,r7,lsr#16
    203 	strb	r9,[r0,#0+2]
    204 	mov	r11,r7,lsr#24
    205 	strb	r10,[r0,#0+1]
    206 	strb	r11,[r0,#0]
    207 #endif
    208 
    209 	bne	Louter
    210 
    211 	add	sp,sp,#36
    212 #if __ARM_ARCH__>=5
    213 	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
    214 #else
    215 	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
    216 	tst	lr,#1
    217 	moveq	pc,lr			@ be binary compatible with V4, yet
    218 .word	0xe12fff1e			@ interoperable with Thumb ISA:-)
    219 #endif
    220 
    221 
    222 .globl	_gcm_gmult_4bit
    223 .private_extern	_gcm_gmult_4bit
    224 #ifdef __thumb2__
    225 .thumb_func	_gcm_gmult_4bit
    226 #endif
    227 _gcm_gmult_4bit:
    228 	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
    229 	ldrb	r12,[r0,#15]
    230 	b	rem_4bit_get
    231 Lrem_4bit_got:
    232 	and	r14,r12,#0xf0
    233 	and	r12,r12,#0x0f
    234 	mov	r3,#14
    235 
    236 	add	r7,r1,r12,lsl#4
    237 	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
    238 	ldrb	r12,[r0,#14]
    239 
    240 	add	r11,r1,r14
    241 	and	r14,r4,#0xf		@ rem
    242 	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
    243 	add	r14,r14,r14
    244 	eor	r4,r8,r4,lsr#4
    245 	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
    246 	eor	r4,r4,r5,lsl#28
    247 	eor	r5,r9,r5,lsr#4
    248 	eor	r5,r5,r6,lsl#28
    249 	eor	r6,r10,r6,lsr#4
    250 	eor	r6,r6,r7,lsl#28
    251 	eor	r7,r11,r7,lsr#4
    252 	and	r14,r12,#0xf0
    253 	eor	r7,r7,r8,lsl#16
    254 	and	r12,r12,#0x0f
    255 
    256 Loop:
    257 	add	r11,r1,r12,lsl#4
    258 	and	r12,r4,#0xf		@ rem
    259 	subs	r3,r3,#1
    260 	add	r12,r12,r12
    261 	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
    262 	eor	r4,r8,r4,lsr#4
    263 	eor	r4,r4,r5,lsl#28
    264 	eor	r5,r9,r5,lsr#4
    265 	eor	r5,r5,r6,lsl#28
    266 	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
    267 	eor	r6,r10,r6,lsr#4
    268 #ifdef	__thumb2__
    269 	it	pl
    270 #endif
    271 	ldrplb	r12,[r0,r3]
    272 	eor	r6,r6,r7,lsl#28
    273 	eor	r7,r11,r7,lsr#4
    274 
    275 	add	r11,r1,r14
    276 	and	r14,r4,#0xf		@ rem
    277 	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
    278 	add	r14,r14,r14
    279 	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
    280 	eor	r4,r8,r4,lsr#4
    281 	eor	r4,r4,r5,lsl#28
    282 	eor	r5,r9,r5,lsr#4
    283 	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
    284 	eor	r5,r5,r6,lsl#28
    285 	eor	r6,r10,r6,lsr#4
    286 	eor	r6,r6,r7,lsl#28
    287 	eor	r7,r11,r7,lsr#4
    288 #ifdef	__thumb2__
    289 	itt	pl
    290 #endif
    291 	andpl	r14,r12,#0xf0
    292 	andpl	r12,r12,#0x0f
    293 	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
    294 	bpl	Loop
    295 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    296 	rev	r4,r4
    297 	str	r4,[r0,#12]
    298 #elif defined(__ARMEB__)
    299 	str	r4,[r0,#12]
    300 #else
    301 	mov	r9,r4,lsr#8
    302 	strb	r4,[r0,#12+3]
    303 	mov	r10,r4,lsr#16
    304 	strb	r9,[r0,#12+2]
    305 	mov	r11,r4,lsr#24
    306 	strb	r10,[r0,#12+1]
    307 	strb	r11,[r0,#12]
    308 #endif
    309 
    310 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    311 	rev	r5,r5
    312 	str	r5,[r0,#8]
    313 #elif defined(__ARMEB__)
    314 	str	r5,[r0,#8]
    315 #else
    316 	mov	r9,r5,lsr#8
    317 	strb	r5,[r0,#8+3]
    318 	mov	r10,r5,lsr#16
    319 	strb	r9,[r0,#8+2]
    320 	mov	r11,r5,lsr#24
    321 	strb	r10,[r0,#8+1]
    322 	strb	r11,[r0,#8]
    323 #endif
    324 
    325 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    326 	rev	r6,r6
    327 	str	r6,[r0,#4]
    328 #elif defined(__ARMEB__)
    329 	str	r6,[r0,#4]
    330 #else
    331 	mov	r9,r6,lsr#8
    332 	strb	r6,[r0,#4+3]
    333 	mov	r10,r6,lsr#16
    334 	strb	r9,[r0,#4+2]
    335 	mov	r11,r6,lsr#24
    336 	strb	r10,[r0,#4+1]
    337 	strb	r11,[r0,#4]
    338 #endif
    339 
    340 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    341 	rev	r7,r7
    342 	str	r7,[r0,#0]
    343 #elif defined(__ARMEB__)
    344 	str	r7,[r0,#0]
    345 #else
    346 	mov	r9,r7,lsr#8
    347 	strb	r7,[r0,#0+3]
    348 	mov	r10,r7,lsr#16
    349 	strb	r9,[r0,#0+2]
    350 	mov	r11,r7,lsr#24
    351 	strb	r10,[r0,#0+1]
    352 	strb	r11,[r0,#0]
    353 #endif
    354 
    355 #if __ARM_ARCH__>=5
    356 	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
    357 #else
    358 	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
    359 	tst	lr,#1
    360 	moveq	pc,lr			@ be binary compatible with V4, yet
    361 .word	0xe12fff1e			@ interoperable with Thumb ISA:-)
    362 #endif
    363 
    364 #if __ARM_MAX_ARCH__>=7
    365 
    366 
    367 
    368 .globl	_gcm_init_neon
    369 .private_extern	_gcm_init_neon
    370 #ifdef __thumb2__
    371 .thumb_func	_gcm_init_neon
    372 #endif
    373 .align	4
    374 _gcm_init_neon:
    375 	vld1.64	d7,[r1]!		@ load H
    376 	vmov.i8	q8,#0xe1
    377 	vld1.64	d6,[r1]
    378 	vshl.i64	d17,#57
    379 	vshr.u64	d16,#63		@ t0=0xc2....01
    380 	vdup.8	q9,d7[7]
    381 	vshr.u64	d26,d6,#63
    382 	vshr.s8	q9,#7			@ broadcast carry bit
    383 	vshl.i64	q3,q3,#1
    384 	vand	q8,q8,q9
    385 	vorr	d7,d26		@ H<<<=1
    386 	veor	q3,q3,q8		@ twisted H
    387 	vstmia	r0,{q3}
    388 
    389 	bx	lr					@ bx lr
    390 
    391 
    392 .globl	_gcm_gmult_neon
    393 .private_extern	_gcm_gmult_neon
    394 #ifdef __thumb2__
    395 .thumb_func	_gcm_gmult_neon
    396 #endif
    397 .align	4
    398 _gcm_gmult_neon:
    399 	vld1.64	d7,[r0]!		@ load Xi
    400 	vld1.64	d6,[r0]!
    401 	vmov.i64	d29,#0x0000ffffffffffff
    402 	vldmia	r1,{d26,d27}	@ load twisted H
    403 	vmov.i64	d30,#0x00000000ffffffff
    404 #ifdef __ARMEL__
    405 	vrev64.8	q3,q3
    406 #endif
    407 	vmov.i64	d31,#0x000000000000ffff
    408 	veor	d28,d26,d27		@ Karatsuba pre-processing
    409 	mov	r3,#16
    410 	b	Lgmult_neon
    411 
    412 
    413 .globl	_gcm_ghash_neon
    414 .private_extern	_gcm_ghash_neon
    415 #ifdef __thumb2__
    416 .thumb_func	_gcm_ghash_neon
    417 #endif
    418 .align	4
    419 _gcm_ghash_neon:
    420 	vld1.64	d1,[r0]!		@ load Xi
    421 	vld1.64	d0,[r0]!
    422 	vmov.i64	d29,#0x0000ffffffffffff
    423 	vldmia	r1,{d26,d27}	@ load twisted H
    424 	vmov.i64	d30,#0x00000000ffffffff
    425 #ifdef __ARMEL__
    426 	vrev64.8	q0,q0
    427 #endif
    428 	vmov.i64	d31,#0x000000000000ffff
    429 	veor	d28,d26,d27		@ Karatsuba pre-processing
    430 
    431 Loop_neon:
    432 	vld1.64	d7,[r2]!		@ load inp
    433 	vld1.64	d6,[r2]!
    434 #ifdef __ARMEL__
    435 	vrev64.8	q3,q3
    436 #endif
    437 	veor	q3,q0			@ inp^=Xi
    438 Lgmult_neon:
    439 	vext.8	d16, d26, d26, #1	@ A1
    440 	vmull.p8	q8, d16, d6		@ F = A1*B
    441 	vext.8	d0, d6, d6, #1	@ B1
    442 	vmull.p8	q0, d26, d0		@ E = A*B1
    443 	vext.8	d18, d26, d26, #2	@ A2
    444 	vmull.p8	q9, d18, d6		@ H = A2*B
    445 	vext.8	d22, d6, d6, #2	@ B2
    446 	vmull.p8	q11, d26, d22		@ G = A*B2
    447 	vext.8	d20, d26, d26, #3	@ A3
    448 	veor	q8, q8, q0		@ L = E + F
    449 	vmull.p8	q10, d20, d6		@ J = A3*B
    450 	vext.8	d0, d6, d6, #3	@ B3
    451 	veor	q9, q9, q11		@ M = G + H
    452 	vmull.p8	q0, d26, d0		@ I = A*B3
    453 	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
    454 	vand	d17, d17, d29
    455 	vext.8	d22, d6, d6, #4	@ B4
    456 	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
    457 	vand	d19, d19, d30
    458 	vmull.p8	q11, d26, d22		@ K = A*B4
    459 	veor	q10, q10, q0		@ N = I + J
    460 	veor	d16, d16, d17
    461 	veor	d18, d18, d19
    462 	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
    463 	vand	d21, d21, d31
    464 	vext.8	q8, q8, q8, #15
    465 	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
    466 	vmov.i64	d23, #0
    467 	vext.8	q9, q9, q9, #14
    468 	veor	d20, d20, d21
    469 	vmull.p8	q0, d26, d6		@ D = A*B
    470 	vext.8	q11, q11, q11, #12
    471 	vext.8	q10, q10, q10, #13
    472 	veor	q8, q8, q9
    473 	veor	q10, q10, q11
    474 	veor	q0, q0, q8
    475 	veor	q0, q0, q10
    476 	veor	d6,d6,d7	@ Karatsuba pre-processing
    477 	vext.8	d16, d28, d28, #1	@ A1
    478 	vmull.p8	q8, d16, d6		@ F = A1*B
    479 	vext.8	d2, d6, d6, #1	@ B1
    480 	vmull.p8	q1, d28, d2		@ E = A*B1
    481 	vext.8	d18, d28, d28, #2	@ A2
    482 	vmull.p8	q9, d18, d6		@ H = A2*B
    483 	vext.8	d22, d6, d6, #2	@ B2
    484 	vmull.p8	q11, d28, d22		@ G = A*B2
    485 	vext.8	d20, d28, d28, #3	@ A3
    486 	veor	q8, q8, q1		@ L = E + F
    487 	vmull.p8	q10, d20, d6		@ J = A3*B
    488 	vext.8	d2, d6, d6, #3	@ B3
    489 	veor	q9, q9, q11		@ M = G + H
    490 	vmull.p8	q1, d28, d2		@ I = A*B3
    491 	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
    492 	vand	d17, d17, d29
    493 	vext.8	d22, d6, d6, #4	@ B4
    494 	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
    495 	vand	d19, d19, d30
    496 	vmull.p8	q11, d28, d22		@ K = A*B4
    497 	veor	q10, q10, q1		@ N = I + J
    498 	veor	d16, d16, d17
    499 	veor	d18, d18, d19
    500 	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
    501 	vand	d21, d21, d31
    502 	vext.8	q8, q8, q8, #15
    503 	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
    504 	vmov.i64	d23, #0
    505 	vext.8	q9, q9, q9, #14
    506 	veor	d20, d20, d21
    507 	vmull.p8	q1, d28, d6		@ D = A*B
    508 	vext.8	q11, q11, q11, #12
    509 	vext.8	q10, q10, q10, #13
    510 	veor	q8, q8, q9
    511 	veor	q10, q10, q11
    512 	veor	q1, q1, q8
    513 	veor	q1, q1, q10
    514 	vext.8	d16, d27, d27, #1	@ A1
    515 	vmull.p8	q8, d16, d7		@ F = A1*B
    516 	vext.8	d4, d7, d7, #1	@ B1
    517 	vmull.p8	q2, d27, d4		@ E = A*B1
    518 	vext.8	d18, d27, d27, #2	@ A2
    519 	vmull.p8	q9, d18, d7		@ H = A2*B
    520 	vext.8	d22, d7, d7, #2	@ B2
    521 	vmull.p8	q11, d27, d22		@ G = A*B2
    522 	vext.8	d20, d27, d27, #3	@ A3
    523 	veor	q8, q8, q2		@ L = E + F
    524 	vmull.p8	q10, d20, d7		@ J = A3*B
    525 	vext.8	d4, d7, d7, #3	@ B3
    526 	veor	q9, q9, q11		@ M = G + H
    527 	vmull.p8	q2, d27, d4		@ I = A*B3
    528 	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
    529 	vand	d17, d17, d29
    530 	vext.8	d22, d7, d7, #4	@ B4
    531 	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
    532 	vand	d19, d19, d30
    533 	vmull.p8	q11, d27, d22		@ K = A*B4
    534 	veor	q10, q10, q2		@ N = I + J
    535 	veor	d16, d16, d17
    536 	veor	d18, d18, d19
    537 	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
    538 	vand	d21, d21, d31
    539 	vext.8	q8, q8, q8, #15
    540 	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
    541 	vmov.i64	d23, #0
    542 	vext.8	q9, q9, q9, #14
    543 	veor	d20, d20, d21
    544 	vmull.p8	q2, d27, d7		@ D = A*B
    545 	vext.8	q11, q11, q11, #12
    546 	vext.8	q10, q10, q10, #13
    547 	veor	q8, q8, q9
    548 	veor	q10, q10, q11
    549 	veor	q2, q2, q8
    550 	veor	q2, q2, q10
    551 	veor	q1,q1,q0		@ Karatsuba post-processing
    552 	veor	q1,q1,q2
    553 	veor	d1,d1,d2
    554 	veor	d4,d4,d3	@ Xh|Xl - 256-bit result
    555 
    556 	@ equivalent of reduction_avx from ghash-x86_64.pl
    557 	vshl.i64	q9,q0,#57		@ 1st phase
    558 	vshl.i64	q10,q0,#62
    559 	veor	q10,q10,q9		@
    560 	vshl.i64	q9,q0,#63
    561 	veor	q10, q10, q9		@
    562 	veor	d1,d1,d20	@
    563 	veor	d4,d4,d21
    564 
    565 	vshr.u64	q10,q0,#1		@ 2nd phase
    566 	veor	q2,q2,q0
    567 	veor	q0,q0,q10		@
    568 	vshr.u64	q10,q10,#6
    569 	vshr.u64	q0,q0,#1		@
    570 	veor	q0,q0,q2		@
    571 	veor	q0,q0,q10		@
    572 
    573 	subs	r3,#16
    574 	bne	Loop_neon
    575 
    576 #ifdef __ARMEL__
    577 	vrev64.8	q0,q0
    578 #endif
    579 	sub	r0,#16
    580 	vst1.64	d1,[r0]!		@ write out Xi
    581 	vst1.64	d0,[r0]
    582 
    583 	bx	lr					@ bx lr
    584 
    585 #endif
    586 .byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
    587 .align	2
    588 .align	2
    589