Home | History | Annotate | Download | only in modes
      1 #if defined(__arm__)
      2 #include "arm_arch.h"
      3 
      4 .syntax unified
      5 
      6 .text
      7 .code	32
      8 
      9 .type	rem_4bit,%object
     10 .align	5
     11 rem_4bit:
     12 .short	0x0000,0x1C20,0x3840,0x2460
     13 .short	0x7080,0x6CA0,0x48C0,0x54E0
     14 .short	0xE100,0xFD20,0xD940,0xC560
     15 .short	0x9180,0x8DA0,0xA9C0,0xB5E0
     16 .size	rem_4bit,.-rem_4bit
     17 
     18 .type	rem_4bit_get,%function
     19 rem_4bit_get:
     20 	sub	r2,pc,#8
     21 	sub	r2,r2,#32	@ &rem_4bit
     22 	b	.Lrem_4bit_got
     23 	nop
     24 .size	rem_4bit_get,.-rem_4bit_get
     25 
     26 .global	gcm_ghash_4bit
     27 .hidden	gcm_ghash_4bit
     28 .type	gcm_ghash_4bit,%function
     29 gcm_ghash_4bit:
     30 	sub	r12,pc,#8
     31 	add	r3,r2,r3		@ r3 to point at the end
     32 	stmdb	sp!,{r3-r11,lr}		@ save r3/end too
     33 	sub	r12,r12,#48		@ &rem_4bit
     34 
     35 	ldmia	r12,{r4-r11}		@ copy rem_4bit ...
     36 	stmdb	sp!,{r4-r11}		@ ... to stack
     37 
     38 	ldrb	r12,[r2,#15]
     39 	ldrb	r14,[r0,#15]
     40 .Louter:
     41 	eor	r12,r12,r14
     42 	and	r14,r12,#0xf0
     43 	and	r12,r12,#0x0f
     44 	mov	r3,#14
     45 
     46 	add	r7,r1,r12,lsl#4
     47 	ldmia	r7,{r4-r7}	@ load Htbl[nlo]
     48 	add	r11,r1,r14
     49 	ldrb	r12,[r2,#14]
     50 
     51 	and	r14,r4,#0xf		@ rem
     52 	ldmia	r11,{r8-r11}	@ load Htbl[nhi]
     53 	add	r14,r14,r14
     54 	eor	r4,r8,r4,lsr#4
     55 	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
     56 	eor	r4,r4,r5,lsl#28
     57 	ldrb	r14,[r0,#14]
     58 	eor	r5,r9,r5,lsr#4
     59 	eor	r5,r5,r6,lsl#28
     60 	eor	r6,r10,r6,lsr#4
     61 	eor	r6,r6,r7,lsl#28
     62 	eor	r7,r11,r7,lsr#4
     63 	eor	r12,r12,r14
     64 	and	r14,r12,#0xf0
     65 	and	r12,r12,#0x0f
     66 	eor	r7,r7,r8,lsl#16
     67 
     68 .Linner:
     69 	add	r11,r1,r12,lsl#4
     70 	and	r12,r4,#0xf		@ rem
     71 	subs	r3,r3,#1
     72 	add	r12,r12,r12
     73 	ldmia	r11,{r8-r11}	@ load Htbl[nlo]
     74 	eor	r4,r8,r4,lsr#4
     75 	eor	r4,r4,r5,lsl#28
     76 	eor	r5,r9,r5,lsr#4
     77 	eor	r5,r5,r6,lsl#28
     78 	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
     79 	eor	r6,r10,r6,lsr#4
     80 	ldrbpl	r12,[r2,r3]
     81 	eor	r6,r6,r7,lsl#28
     82 	eor	r7,r11,r7,lsr#4
     83 
     84 	add	r11,r1,r14
     85 	and	r14,r4,#0xf		@ rem
     86 	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
     87 	add	r14,r14,r14
     88 	ldmia	r11,{r8-r11}	@ load Htbl[nhi]
     89 	eor	r4,r8,r4,lsr#4
     90 	ldrbpl	r8,[r0,r3]
     91 	eor	r4,r4,r5,lsl#28
     92 	eor	r5,r9,r5,lsr#4
     93 	ldrh	r9,[sp,r14]
     94 	eor	r5,r5,r6,lsl#28
     95 	eor	r6,r10,r6,lsr#4
     96 	eor	r6,r6,r7,lsl#28
     97 	eorpl	r12,r12,r8
     98 	eor	r7,r11,r7,lsr#4
     99 	andpl	r14,r12,#0xf0
    100 	andpl	r12,r12,#0x0f
    101 	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
    102 	bpl	.Linner
    103 
    104 	ldr	r3,[sp,#32]		@ re-load r3/end
    105 	add	r2,r2,#16
    106 	mov	r14,r4
    107 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    108 	rev	r4,r4
    109 	str	r4,[r0,#12]
    110 #elif defined(__ARMEB__)
    111 	str	r4,[r0,#12]
    112 #else
    113 	mov	r9,r4,lsr#8
    114 	strb	r4,[r0,#12+3]
    115 	mov	r10,r4,lsr#16
    116 	strb	r9,[r0,#12+2]
    117 	mov	r11,r4,lsr#24
    118 	strb	r10,[r0,#12+1]
    119 	strb	r11,[r0,#12]
    120 #endif
    121 	cmp	r2,r3
    122 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    123 	rev	r5,r5
    124 	str	r5,[r0,#8]
    125 #elif defined(__ARMEB__)
    126 	str	r5,[r0,#8]
    127 #else
    128 	mov	r9,r5,lsr#8
    129 	strb	r5,[r0,#8+3]
    130 	mov	r10,r5,lsr#16
    131 	strb	r9,[r0,#8+2]
    132 	mov	r11,r5,lsr#24
    133 	strb	r10,[r0,#8+1]
    134 	strb	r11,[r0,#8]
    135 #endif
    136 	ldrbne	r12,[r2,#15]
    137 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    138 	rev	r6,r6
    139 	str	r6,[r0,#4]
    140 #elif defined(__ARMEB__)
    141 	str	r6,[r0,#4]
    142 #else
    143 	mov	r9,r6,lsr#8
    144 	strb	r6,[r0,#4+3]
    145 	mov	r10,r6,lsr#16
    146 	strb	r9,[r0,#4+2]
    147 	mov	r11,r6,lsr#24
    148 	strb	r10,[r0,#4+1]
    149 	strb	r11,[r0,#4]
    150 #endif
    151 
    152 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    153 	rev	r7,r7
    154 	str	r7,[r0,#0]
    155 #elif defined(__ARMEB__)
    156 	str	r7,[r0,#0]
    157 #else
    158 	mov	r9,r7,lsr#8
    159 	strb	r7,[r0,#0+3]
    160 	mov	r10,r7,lsr#16
    161 	strb	r9,[r0,#0+2]
    162 	mov	r11,r7,lsr#24
    163 	strb	r10,[r0,#0+1]
    164 	strb	r11,[r0,#0]
    165 #endif
    166 
    167 	bne	.Louter
    168 
    169 	add	sp,sp,#36
    170 #if __ARM_ARCH__>=5
    171 	ldmia	sp!,{r4-r11,pc}
    172 #else
    173 	ldmia	sp!,{r4-r11,lr}
    174 	tst	lr,#1
    175 	moveq	pc,lr			@ be binary compatible with V4, yet
    176 	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
    177 #endif
    178 .size	gcm_ghash_4bit,.-gcm_ghash_4bit
    179 
    180 .global	gcm_gmult_4bit
    181 .hidden	gcm_gmult_4bit
    182 .type	gcm_gmult_4bit,%function
    183 gcm_gmult_4bit:
    184 	stmdb	sp!,{r4-r11,lr}
    185 	ldrb	r12,[r0,#15]
    186 	b	rem_4bit_get
    187 .Lrem_4bit_got:
    188 	and	r14,r12,#0xf0
    189 	and	r12,r12,#0x0f
    190 	mov	r3,#14
    191 
    192 	add	r7,r1,r12,lsl#4
    193 	ldmia	r7,{r4-r7}	@ load Htbl[nlo]
    194 	ldrb	r12,[r0,#14]
    195 
    196 	add	r11,r1,r14
    197 	and	r14,r4,#0xf		@ rem
    198 	ldmia	r11,{r8-r11}	@ load Htbl[nhi]
    199 	add	r14,r14,r14
    200 	eor	r4,r8,r4,lsr#4
    201 	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
    202 	eor	r4,r4,r5,lsl#28
    203 	eor	r5,r9,r5,lsr#4
    204 	eor	r5,r5,r6,lsl#28
    205 	eor	r6,r10,r6,lsr#4
    206 	eor	r6,r6,r7,lsl#28
    207 	eor	r7,r11,r7,lsr#4
    208 	and	r14,r12,#0xf0
    209 	eor	r7,r7,r8,lsl#16
    210 	and	r12,r12,#0x0f
    211 
    212 .Loop:
    213 	add	r11,r1,r12,lsl#4
    214 	and	r12,r4,#0xf		@ rem
    215 	subs	r3,r3,#1
    216 	add	r12,r12,r12
    217 	ldmia	r11,{r8-r11}	@ load Htbl[nlo]
    218 	eor	r4,r8,r4,lsr#4
    219 	eor	r4,r4,r5,lsl#28
    220 	eor	r5,r9,r5,lsr#4
    221 	eor	r5,r5,r6,lsl#28
    222 	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
    223 	eor	r6,r10,r6,lsr#4
    224 	ldrbpl	r12,[r0,r3]
    225 	eor	r6,r6,r7,lsl#28
    226 	eor	r7,r11,r7,lsr#4
    227 
    228 	add	r11,r1,r14
    229 	and	r14,r4,#0xf		@ rem
    230 	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
    231 	add	r14,r14,r14
    232 	ldmia	r11,{r8-r11}	@ load Htbl[nhi]
    233 	eor	r4,r8,r4,lsr#4
    234 	eor	r4,r4,r5,lsl#28
    235 	eor	r5,r9,r5,lsr#4
    236 	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
    237 	eor	r5,r5,r6,lsl#28
    238 	eor	r6,r10,r6,lsr#4
    239 	eor	r6,r6,r7,lsl#28
    240 	eor	r7,r11,r7,lsr#4
    241 	andpl	r14,r12,#0xf0
    242 	andpl	r12,r12,#0x0f
    243 	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
    244 	bpl	.Loop
    245 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    246 	rev	r4,r4
    247 	str	r4,[r0,#12]
    248 #elif defined(__ARMEB__)
    249 	str	r4,[r0,#12]
    250 #else
    251 	mov	r9,r4,lsr#8
    252 	strb	r4,[r0,#12+3]
    253 	mov	r10,r4,lsr#16
    254 	strb	r9,[r0,#12+2]
    255 	mov	r11,r4,lsr#24
    256 	strb	r10,[r0,#12+1]
    257 	strb	r11,[r0,#12]
    258 #endif
    259 
    260 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    261 	rev	r5,r5
    262 	str	r5,[r0,#8]
    263 #elif defined(__ARMEB__)
    264 	str	r5,[r0,#8]
    265 #else
    266 	mov	r9,r5,lsr#8
    267 	strb	r5,[r0,#8+3]
    268 	mov	r10,r5,lsr#16
    269 	strb	r9,[r0,#8+2]
    270 	mov	r11,r5,lsr#24
    271 	strb	r10,[r0,#8+1]
    272 	strb	r11,[r0,#8]
    273 #endif
    274 
    275 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    276 	rev	r6,r6
    277 	str	r6,[r0,#4]
    278 #elif defined(__ARMEB__)
    279 	str	r6,[r0,#4]
    280 #else
    281 	mov	r9,r6,lsr#8
    282 	strb	r6,[r0,#4+3]
    283 	mov	r10,r6,lsr#16
    284 	strb	r9,[r0,#4+2]
    285 	mov	r11,r6,lsr#24
    286 	strb	r10,[r0,#4+1]
    287 	strb	r11,[r0,#4]
    288 #endif
    289 
    290 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
    291 	rev	r7,r7
    292 	str	r7,[r0,#0]
    293 #elif defined(__ARMEB__)
    294 	str	r7,[r0,#0]
    295 #else
    296 	mov	r9,r7,lsr#8
    297 	strb	r7,[r0,#0+3]
    298 	mov	r10,r7,lsr#16
    299 	strb	r9,[r0,#0+2]
    300 	mov	r11,r7,lsr#24
    301 	strb	r10,[r0,#0+1]
    302 	strb	r11,[r0,#0]
    303 #endif
    304 
    305 #if __ARM_ARCH__>=5
    306 	ldmia	sp!,{r4-r11,pc}
    307 #else
    308 	ldmia	sp!,{r4-r11,lr}
    309 	tst	lr,#1
    310 	moveq	pc,lr			@ be binary compatible with V4, yet
    311 	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
    312 #endif
    313 .size	gcm_gmult_4bit,.-gcm_gmult_4bit
    314 #if __ARM_ARCH__>=7
    315 .fpu	neon
    316 
    317 .global	gcm_init_neon
    318 .hidden	gcm_init_neon
    319 .type	gcm_init_neon,%function
    320 .align	4
    321 gcm_init_neon:
    322 	vld1.64		d7,[r1,:64]!	@ load H
    323 	vmov.i8		q8,#0xe1
    324 	vld1.64		d6,[r1,:64]
    325 	vshl.i64	d17,#57
    326 	vshr.u64	d16,#63		@ t0=0xc2....01
    327 	vdup.8		q9,d7[7]
    328 	vshr.u64	d26,d6,#63
    329 	vshr.s8		q9,#7			@ broadcast carry bit
    330 	vshl.i64	q3,q3,#1
    331 	vand		q8,q8,q9
    332 	vorr		d7,d26		@ H<<<=1
    333 	veor		q3,q3,q8		@ twisted H
    334 	vstmia		r0,{q3}
    335 
    336 	.word	0xe12fff1e
    337 .size	gcm_init_neon,.-gcm_init_neon
    338 
    339 .global	gcm_gmult_neon
    340 .hidden	gcm_gmult_neon
    341 .type	gcm_gmult_neon,%function
    342 .align	4
    343 gcm_gmult_neon:
    344 	vld1.64		d7,[r0,:64]!	@ load Xi
    345 	vld1.64		d6,[r0,:64]!
    346 	vmov.i64	d29,#0x0000ffffffffffff
    347 	vldmia		r1,{d26-d27}	@ load twisted H
    348 	vmov.i64	d30,#0x00000000ffffffff
    349 #ifdef __ARMEL__
    350 	vrev64.8	q3,q3
    351 #endif
    352 	vmov.i64	d31,#0x000000000000ffff
    353 	veor		d28,d26,d27		@ Karatsuba pre-processing
    354 	mov		r3,#16
    355 	b		.Lgmult_neon
    356 .size	gcm_gmult_neon,.-gcm_gmult_neon
    357 
    358 .global	gcm_ghash_neon
    359 .hidden	gcm_ghash_neon
    360 .type	gcm_ghash_neon,%function
    361 .align	4
    362 gcm_ghash_neon:
    363 	vld1.64		d1,[r0,:64]!	@ load Xi
    364 	vld1.64		d0,[r0,:64]!
    365 	vmov.i64	d29,#0x0000ffffffffffff
    366 	vldmia		r1,{d26-d27}	@ load twisted H
    367 	vmov.i64	d30,#0x00000000ffffffff
    368 #ifdef __ARMEL__
    369 	vrev64.8	q0,q0
    370 #endif
    371 	vmov.i64	d31,#0x000000000000ffff
    372 	veor		d28,d26,d27		@ Karatsuba pre-processing
    373 
    374 .Loop_neon:
    375 	vld1.64		d7,[r2]!		@ load inp
    376 	vld1.64		d6,[r2]!
    377 #ifdef __ARMEL__
    378 	vrev64.8	q3,q3
    379 #endif
    380 	veor		q3,q0			@ inp^=Xi
    381 .Lgmult_neon:
    382 	vext.8		d16, d26, d26, #1	@ A1
    383 	vmull.p8	q8, d16, d6		@ F = A1*B
    384 	vext.8		d0, d6, d6, #1	@ B1
    385 	vmull.p8	q0, d26, d0		@ E = A*B1
    386 	vext.8		d18, d26, d26, #2	@ A2
    387 	vmull.p8	q9, d18, d6		@ H = A2*B
    388 	vext.8		d22, d6, d6, #2	@ B2
    389 	vmull.p8	q11, d26, d22		@ G = A*B2
    390 	vext.8		d20, d26, d26, #3	@ A3
    391 	veor		q8, q8, q0		@ L = E + F
    392 	vmull.p8	q10, d20, d6		@ J = A3*B
    393 	vext.8		d0, d6, d6, #3	@ B3
    394 	veor		q9, q9, q11		@ M = G + H
    395 	vmull.p8	q0, d26, d0		@ I = A*B3
    396 	veor		d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
    397 	vand		d17, d17, d29
    398 	vext.8		d22, d6, d6, #4	@ B4
    399 	veor		d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
    400 	vand		d19, d19, d30
    401 	vmull.p8	q11, d26, d22		@ K = A*B4
    402 	veor		q10, q10, q0		@ N = I + J
    403 	veor		d16, d16, d17
    404 	veor		d18, d18, d19
    405 	veor		d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
    406 	vand		d21, d21, d31
    407 	vext.8		q8, q8, q8, #15
    408 	veor		d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
    409 	vmov.i64	d23, #0
    410 	vext.8		q9, q9, q9, #14
    411 	veor		d20, d20, d21
    412 	vmull.p8	q0, d26, d6		@ D = A*B
    413 	vext.8		q11, q11, q11, #12
    414 	vext.8		q10, q10, q10, #13
    415 	veor		q8, q8, q9
    416 	veor		q10, q10, q11
    417 	veor		q0, q0, q8
    418 	veor		q0, q0, q10
    419 	veor		d6,d6,d7	@ Karatsuba pre-processing
    420 	vext.8		d16, d28, d28, #1	@ A1
    421 	vmull.p8	q8, d16, d6		@ F = A1*B
    422 	vext.8		d2, d6, d6, #1	@ B1
    423 	vmull.p8	q1, d28, d2		@ E = A*B1
    424 	vext.8		d18, d28, d28, #2	@ A2
    425 	vmull.p8	q9, d18, d6		@ H = A2*B
    426 	vext.8		d22, d6, d6, #2	@ B2
    427 	vmull.p8	q11, d28, d22		@ G = A*B2
    428 	vext.8		d20, d28, d28, #3	@ A3
    429 	veor		q8, q8, q1		@ L = E + F
    430 	vmull.p8	q10, d20, d6		@ J = A3*B
    431 	vext.8		d2, d6, d6, #3	@ B3
    432 	veor		q9, q9, q11		@ M = G + H
    433 	vmull.p8	q1, d28, d2		@ I = A*B3
    434 	veor		d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
    435 	vand		d17, d17, d29
    436 	vext.8		d22, d6, d6, #4	@ B4
    437 	veor		d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
    438 	vand		d19, d19, d30
    439 	vmull.p8	q11, d28, d22		@ K = A*B4
    440 	veor		q10, q10, q1		@ N = I + J
    441 	veor		d16, d16, d17
    442 	veor		d18, d18, d19
    443 	veor		d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
    444 	vand		d21, d21, d31
    445 	vext.8		q8, q8, q8, #15
    446 	veor		d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
    447 	vmov.i64	d23, #0
    448 	vext.8		q9, q9, q9, #14
    449 	veor		d20, d20, d21
    450 	vmull.p8	q1, d28, d6		@ D = A*B
    451 	vext.8		q11, q11, q11, #12
    452 	vext.8		q10, q10, q10, #13
    453 	veor		q8, q8, q9
    454 	veor		q10, q10, q11
    455 	veor		q1, q1, q8
    456 	veor		q1, q1, q10
    457 	vext.8		d16, d27, d27, #1	@ A1
    458 	vmull.p8	q8, d16, d7		@ F = A1*B
    459 	vext.8		d4, d7, d7, #1	@ B1
    460 	vmull.p8	q2, d27, d4		@ E = A*B1
    461 	vext.8		d18, d27, d27, #2	@ A2
    462 	vmull.p8	q9, d18, d7		@ H = A2*B
    463 	vext.8		d22, d7, d7, #2	@ B2
    464 	vmull.p8	q11, d27, d22		@ G = A*B2
    465 	vext.8		d20, d27, d27, #3	@ A3
    466 	veor		q8, q8, q2		@ L = E + F
    467 	vmull.p8	q10, d20, d7		@ J = A3*B
    468 	vext.8		d4, d7, d7, #3	@ B3
    469 	veor		q9, q9, q11		@ M = G + H
    470 	vmull.p8	q2, d27, d4		@ I = A*B3
    471 	veor		d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
    472 	vand		d17, d17, d29
    473 	vext.8		d22, d7, d7, #4	@ B4
    474 	veor		d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
    475 	vand		d19, d19, d30
    476 	vmull.p8	q11, d27, d22		@ K = A*B4
    477 	veor		q10, q10, q2		@ N = I + J
    478 	veor		d16, d16, d17
    479 	veor		d18, d18, d19
    480 	veor		d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
    481 	vand		d21, d21, d31
    482 	vext.8		q8, q8, q8, #15
    483 	veor		d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
    484 	vmov.i64	d23, #0
    485 	vext.8		q9, q9, q9, #14
    486 	veor		d20, d20, d21
    487 	vmull.p8	q2, d27, d7		@ D = A*B
    488 	vext.8		q11, q11, q11, #12
    489 	vext.8		q10, q10, q10, #13
    490 	veor		q8, q8, q9
    491 	veor		q10, q10, q11
    492 	veor		q2, q2, q8
    493 	veor		q2, q2, q10
    494 	veor		q1,q1,q0		@ Karatsuba post-processing
    495 	veor		q1,q1,q2
    496 	veor		d1,d1,d2
    497 	veor		d4,d4,d3	@ Xh|Xl - 256-bit result
    498 
    499 	@ equivalent of reduction_avx from ghash-x86_64.pl
    500 	vshl.i64	q9,q0,#57		@ 1st phase
    501 	vshl.i64	q10,q0,#62
    502 	veor		q10,q10,q9		@
    503 	vshl.i64	q9,q0,#63
    504 	veor		q10, q10, q9		@
    505  	veor		d1,d1,d20	@
    506 	veor		d4,d4,d21
    507 
    508 	vshr.u64	q10,q0,#1		@ 2nd phase
    509 	veor		q2,q2,q0
    510 	veor		q0,q0,q10		@
    511 	vshr.u64	q10,q10,#6
    512 	vshr.u64	q0,q0,#1		@
    513 	veor		q0,q0,q2		@
    514 	veor		q0,q0,q10		@
    515 
    516 	subs		r3,#16
    517 	bne		.Loop_neon
    518 
    519 #ifdef __ARMEL__
    520 	vrev64.8	q0,q0
    521 #endif
    522 	sub		r0,#16
    523 	vst1.64		d1,[r0,:64]!	@ write out Xi
    524 	vst1.64		d0,[r0,:64]
    525 
    526 	.word	0xe12fff1e
    527 .size	gcm_ghash_neon,.-gcm_ghash_neon
    528 #endif
    529 .asciz  "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro (at) openssl.org>"
    530 .align  2
    531 
    532 #endif
    533