Home | History | Annotate | Download | only in poly1305
      1 /* ====================================================================
      2  * Copyright (c) 2011-2013 The OpenSSL Project.  All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  *
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in
     13  *    the documentation and/or other materials provided with the
     14  *    distribution.
     15  *
     16  * 3. All advertising materials mentioning features or use of this
     17  *    software must display the following acknowledgment:
     18  *    "This product includes software developed by the OpenSSL Project
     19  *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
     20  *
     21  * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
     22  *    endorse or promote products derived from this software without
     23  *    prior written permission. For written permission, please contact
     24  *    licensing (at) OpenSSL.org.
     25  *
     26  * 5. Products derived from this software may not be called "OpenSSL"
     27  *    nor may "OpenSSL" appear in their names without prior written
     28  *    permission of the OpenSSL Project.
     29  *
     30  * 6. Redistributions of any form whatsoever must retain the following
     31  *    acknowledgment:
     32  *    "This product includes software developed by the OpenSSL Project
     33  *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
     34  *
     35  * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
     36  * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     38  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
     39  * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     41  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     42  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     44  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     45  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     46  * OF THE POSSIBILITY OF SUCH DAMAGE.
     47  * ====================================================================
     48  */
     49 
     50 /* This implementation of poly1305 is by Andrew Moon
     51  * (https://github.com/floodyberry/poly1305-donna) and released as public
     52  * domain. It implements SIMD vectorization based on the algorithm described in
     53  * http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte
     54  * block size
     55 */
     56 
     57 #include <emmintrin.h>
     58 #include <stdint.h>
     59 #include <openssl/opensslconf.h>
     60 
     61 #if !defined(OPENSSL_NO_POLY1305)
     62 
     63 #include <openssl/poly1305.h>
     64 
     65 #define ALIGN(x) __attribute__((aligned(x)))
     66 #define INLINE inline
     67 #define U8TO64_LE(m) (*(uint64_t*)(m))
     68 #define U8TO32_LE(m) (*(uint32_t*)(m))
     69 #define U64TO8_LE(m,v) (*(uint64_t*)(m)) = v
     70 
     71 typedef __m128i xmmi;
     72 typedef unsigned __int128 uint128_t;
     73 
     74 static const uint32_t ALIGN(16) poly1305_x64_sse2_message_mask[4] =
     75 	{(1 << 26) - 1, 0, (1 << 26) - 1, 0};
     76 static const uint32_t ALIGN(16) poly1305_x64_sse2_5[4] = {5, 0, 5, 0};
     77 static const uint32_t ALIGN(16) poly1305_x64_sse2_1shl128[4] =
     78 	{(1 << 24), 0, (1 << 24), 0};
     79 
     80 static uint128_t INLINE
     81 add128(uint128_t a, uint128_t b)
     82 	{
     83 	return a + b;
     84 	}
     85 
     86 static uint128_t INLINE
     87 add128_64(uint128_t a, uint64_t b)
     88 	{
     89 	return a + b;
     90 	}
     91 
     92 static uint128_t INLINE
     93 mul64x64_128(uint64_t a, uint64_t b)
     94 	{
     95 	return (uint128_t)a * b;
     96 	}
     97 
     98 static uint64_t INLINE
     99 lo128(uint128_t a)
    100 	{
    101 	return (uint64_t)a;
    102 	}
    103 
    104 static uint64_t INLINE
    105 shr128(uint128_t v, const int shift)
    106 	{
    107 	return (uint64_t)(v >> shift);
    108 	}
    109 
    110 static uint64_t INLINE
    111 shr128_pair(uint64_t hi, uint64_t lo, const int shift)
    112 	{
    113 	return (uint64_t)((((uint128_t)hi << 64) | lo) >> shift);
    114 	}
    115 
    116 typedef struct poly1305_power_t
    117 	{
    118 	union
    119 		{
    120 		xmmi v;
    121 		uint64_t u[2];
    122 		uint32_t d[4];
    123 		} R20,R21,R22,R23,R24,S21,S22,S23,S24;
    124 	} poly1305_power;
    125 
    126 typedef struct poly1305_state_internal_t
    127 	{
    128 	poly1305_power P[2];     /* 288 bytes, top 32 bit halves unused = 144
    129 				    bytes of free storage */
    130 	union
    131 		{
    132 		xmmi H[5];           /*  80 bytes  */
    133 		uint64_t HH[10];
    134 		};
    135 	/* uint64_t r0,r1,r2;       [24 bytes] */
    136 	/* uint64_t pad0,pad1;      [16 bytes] */
    137 	uint64_t started;        /*   8 bytes  */
    138 	uint64_t leftover;       /*   8 bytes  */
    139 	uint8_t buffer[64];      /*  64 bytes  */
    140 	} poly1305_state_internal;   /* 448 bytes total + 63 bytes for
    141 					alignment = 511 bytes raw */
    142 
    143 static poly1305_state_internal INLINE
    144 *poly1305_aligned_state(poly1305_state *state)
    145 	{
    146 	return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63);
    147 	}
    148 
    149 /* copy 0-63 bytes */
    150 static void INLINE
    151 poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes)
    152 	{
    153 	size_t offset = src - dst;
    154 	if (bytes & 32)
    155 		{
    156 		_mm_storeu_si128((xmmi *)(dst + 0), _mm_loadu_si128((xmmi *)(dst + offset + 0)));
    157 		_mm_storeu_si128((xmmi *)(dst + 16), _mm_loadu_si128((xmmi *)(dst + offset + 16)));
    158 		dst += 32;
    159 		}
    160 	if (bytes & 16)
    161 		{
    162 		_mm_storeu_si128((xmmi *)dst,
    163 				 _mm_loadu_si128((xmmi *)(dst + offset)));
    164 		dst += 16;
    165 		}
    166 	if (bytes &  8)
    167 		{
    168 		*(uint64_t *)dst = *(uint64_t *)(dst + offset);
    169 		dst += 8;
    170 		}
    171 	if (bytes &  4)
    172 		{
    173 		*(uint32_t *)dst = *(uint32_t *)(dst + offset);
    174 		dst += 4;
    175 		}
    176 	if (bytes &  2)
    177 		{
    178 		*(uint16_t *)dst = *(uint16_t *)(dst + offset);
    179 		dst += 2;
    180 		}
    181 	if (bytes &  1)
    182 		{
    183 		*( uint8_t *)dst = *( uint8_t *)(dst + offset);
    184 		}
    185 	}
    186 
    187 /* zero 0-15 bytes */
    188 static void INLINE
    189 poly1305_block_zero(uint8_t *dst, size_t bytes)
    190 	{
    191 	if (bytes &  8) { *(uint64_t *)dst = 0; dst += 8; }
    192 	if (bytes &  4) { *(uint32_t *)dst = 0; dst += 4; }
    193 	if (bytes &  2) { *(uint16_t *)dst = 0; dst += 2; }
    194 	if (bytes &  1) { *( uint8_t *)dst = 0; }
    195 	}
    196 
    197 static size_t INLINE
    198 poly1305_min(size_t a, size_t b)
    199 	{
    200 	return (a < b) ? a : b;
    201 	}
    202 
    203 void
    204 CRYPTO_poly1305_init(poly1305_state *state, const unsigned char key[32])
    205 	{
    206 	poly1305_state_internal *st = poly1305_aligned_state(state);
    207 	poly1305_power *p;
    208 	uint64_t r0,r1,r2;
    209 	uint64_t t0,t1;
    210 
    211 	/* clamp key */
    212 	t0 = U8TO64_LE(key + 0);
    213 	t1 = U8TO64_LE(key + 8);
    214 	r0 = t0 & 0xffc0fffffff; t0 >>= 44; t0 |= t1 << 20;
    215 	r1 = t0 & 0xfffffc0ffff; t1 >>= 24;
    216 	r2 = t1 & 0x00ffffffc0f;
    217 
    218 	/* store r in un-used space of st->P[1] */
    219 	p = &st->P[1];
    220 	p->R20.d[1] = (uint32_t)(r0      );
    221 	p->R20.d[3] = (uint32_t)(r0 >> 32);
    222 	p->R21.d[1] = (uint32_t)(r1      );
    223 	p->R21.d[3] = (uint32_t)(r1 >> 32);
    224 	p->R22.d[1] = (uint32_t)(r2      );
    225 	p->R22.d[3] = (uint32_t)(r2 >> 32);
    226 
    227 	/* store pad */
    228 	p->R23.d[1] = U8TO32_LE(key + 16);
    229 	p->R23.d[3] = U8TO32_LE(key + 20);
    230 	p->R24.d[1] = U8TO32_LE(key + 24);
    231 	p->R24.d[3] = U8TO32_LE(key + 28);
    232 
    233 	/* H = 0 */
    234 	st->H[0] = _mm_setzero_si128();
    235 	st->H[1] = _mm_setzero_si128();
    236 	st->H[2] = _mm_setzero_si128();
    237 	st->H[3] = _mm_setzero_si128();
    238 	st->H[4] = _mm_setzero_si128();
    239 
    240 	st->started = 0;
    241 	st->leftover = 0;
    242 	}
    243 
    244 static void
    245 poly1305_first_block(poly1305_state_internal *st, const uint8_t *m)
    246 	{
    247 	const xmmi MMASK =
    248 		_mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
    249 	const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
    250 	const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
    251 	xmmi T5,T6;
    252 	poly1305_power *p;
    253 	uint128_t d[3];
    254 	uint64_t r0,r1,r2;
    255 	uint64_t r20,r21,r22,s22;
    256 	uint64_t pad0,pad1;
    257 	uint64_t c;
    258 	uint64_t i;
    259 
    260 	/* pull out stored info */
    261 	p = &st->P[1];
    262 
    263 	r0   = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
    264 	r1   = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
    265 	r2   = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
    266 	pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1];
    267 	pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1];
    268 
    269 	/* compute powers r^2,r^4 */
    270 	r20 = r0;
    271 	r21 = r1;
    272 	r22 = r2;
    273 	for (i = 0; i < 2; i++)
    274 		{
    275 		s22 = r22 * (5 << 2);
    276 
    277 		d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22));
    278 		d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21));
    279 		d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20));
    280 
    281 		                           r20 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44);
    282 		d[1] = add128_64(d[1], c); r21 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44);
    283 		d[2] = add128_64(d[2], c); r22 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42);
    284 		r20 += c * 5; c = (r20 >> 44); r20 = r20 & 0xfffffffffff;
    285 		r21 += c;
    286 
    287 		p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)( r20                     ) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
    288 		p->R21.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
    289 		p->R22.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 8)               ) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
    290 		p->R23.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
    291 		p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r22 >> 16)              )            ), _MM_SHUFFLE(1,0,1,0));
    292 		p->S21.v = _mm_mul_epu32(p->R21.v, FIVE);
    293 		p->S22.v = _mm_mul_epu32(p->R22.v, FIVE);
    294 		p->S23.v = _mm_mul_epu32(p->R23.v, FIVE);
    295 		p->S24.v = _mm_mul_epu32(p->R24.v, FIVE);
    296 		p--;
    297 		}
    298 
    299 	/* put saved info back */
    300 	p = &st->P[1];
    301 	p->R20.d[1] = (uint32_t)(r0        );
    302 	p->R20.d[3] = (uint32_t)(r0   >> 32);
    303 	p->R21.d[1] = (uint32_t)(r1        );
    304 	p->R21.d[3] = (uint32_t)(r1   >> 32);
    305 	p->R22.d[1] = (uint32_t)(r2        );
    306 	p->R22.d[3] = (uint32_t)(r2   >> 32);
    307 	p->R23.d[1] = (uint32_t)(pad0      );
    308 	p->R23.d[3] = (uint32_t)(pad0 >> 32);
    309 	p->R24.d[1] = (uint32_t)(pad1      );
    310 	p->R24.d[3] = (uint32_t)(pad1 >> 32);
    311 
    312 	/* H = [Mx,My] */
    313 	T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
    314 	T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
    315 	st->H[0] = _mm_and_si128(MMASK, T5);
    316 	st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    317 	T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
    318 	st->H[2] = _mm_and_si128(MMASK, T5);
    319 	st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    320 	st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
    321 	}
    322 
    323 static void
    324 poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, size_t bytes)
    325 	{
    326 	const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
    327 	const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
    328 	const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
    329 
    330 	poly1305_power *p;
    331 	xmmi H0,H1,H2,H3,H4;
    332 	xmmi T0,T1,T2,T3,T4,T5,T6;
    333 	xmmi M0,M1,M2,M3,M4;
    334 	xmmi C1,C2;
    335 
    336 	H0 = st->H[0];
    337 	H1 = st->H[1];
    338 	H2 = st->H[2];
    339 	H3 = st->H[3];
    340 	H4 = st->H[4];
    341 
    342 	while (bytes >= 64)
    343 		{
    344 		/* H *= [r^4,r^4] */
    345 		p = &st->P[0];
    346 		T0 = _mm_mul_epu32(H0, p->R20.v);
    347 		T1 = _mm_mul_epu32(H0, p->R21.v);
    348 		T2 = _mm_mul_epu32(H0, p->R22.v);
    349 		T3 = _mm_mul_epu32(H0, p->R23.v);
    350 		T4 = _mm_mul_epu32(H0, p->R24.v);
    351 		T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    352 		T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    353 		T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    354 		T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    355 		T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    356 		T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    357 		T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    358 		T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    359 		T5 = _mm_mul_epu32(H1, p->R23.v);                                   T4 = _mm_add_epi64(T4, T5);
    360 		T5 = _mm_mul_epu32(H2, p->R22.v);                                   T4 = _mm_add_epi64(T4, T5);
    361 		T5 = _mm_mul_epu32(H3, p->R21.v);                                   T4 = _mm_add_epi64(T4, T5);
    362 		T5 = _mm_mul_epu32(H4, p->R20.v);                                   T4 = _mm_add_epi64(T4, T5);
    363 
    364 		/* H += [Mx,My]*[r^2,r^2] */
    365 		T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
    366 		T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
    367 		M0 = _mm_and_si128(MMASK, T5);
    368 		M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    369 		T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
    370 		M2 = _mm_and_si128(MMASK, T5);
    371 		M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    372 		M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
    373 
    374 		p = &st->P[1];
    375 		T5 = _mm_mul_epu32(M0, p->R20.v); T6 = _mm_mul_epu32(M0, p->R21.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    376 		T5 = _mm_mul_epu32(M1, p->S24.v); T6 = _mm_mul_epu32(M1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    377 		T5 = _mm_mul_epu32(M2, p->S23.v); T6 = _mm_mul_epu32(M2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    378 		T5 = _mm_mul_epu32(M3, p->S22.v); T6 = _mm_mul_epu32(M3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    379 		T5 = _mm_mul_epu32(M4, p->S21.v); T6 = _mm_mul_epu32(M4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    380 		T5 = _mm_mul_epu32(M0, p->R22.v); T6 = _mm_mul_epu32(M0, p->R23.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    381 		T5 = _mm_mul_epu32(M1, p->R21.v); T6 = _mm_mul_epu32(M1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    382 		T5 = _mm_mul_epu32(M2, p->R20.v); T6 = _mm_mul_epu32(M2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    383 		T5 = _mm_mul_epu32(M3, p->S24.v); T6 = _mm_mul_epu32(M3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    384 		T5 = _mm_mul_epu32(M4, p->S23.v); T6 = _mm_mul_epu32(M4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    385 		T5 = _mm_mul_epu32(M0, p->R24.v);                                   T4 = _mm_add_epi64(T4, T5);
    386 		T5 = _mm_mul_epu32(M1, p->R23.v);                                   T4 = _mm_add_epi64(T4, T5);
    387 		T5 = _mm_mul_epu32(M2, p->R22.v);                                   T4 = _mm_add_epi64(T4, T5);
    388 		T5 = _mm_mul_epu32(M3, p->R21.v);                                   T4 = _mm_add_epi64(T4, T5);
    389 		T5 = _mm_mul_epu32(M4, p->R20.v);                                   T4 = _mm_add_epi64(T4, T5);
    390 
    391 		/* H += [Mx,My] */
    392 		T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)), _mm_loadl_epi64((xmmi *)(m + 48)));
    393 		T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)), _mm_loadl_epi64((xmmi *)(m + 56)));
    394 		M0 = _mm_and_si128(MMASK, T5);
    395 		M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    396 		T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
    397 		M2 = _mm_and_si128(MMASK, T5);
    398 		M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    399 		M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
    400 
    401 		T0 = _mm_add_epi64(T0, M0);
    402 		T1 = _mm_add_epi64(T1, M1);
    403 		T2 = _mm_add_epi64(T2, M2);
    404 		T3 = _mm_add_epi64(T3, M3);
    405 		T4 = _mm_add_epi64(T4, M4);
    406 
    407 		/* reduce */
    408 		C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
    409 		C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
    410 		C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
    411 		C1 = _mm_srli_epi64(T3, 26);                              T3 = _mm_and_si128(T3, MMASK);                                T4 = _mm_add_epi64(T4, C1);
    412 
    413 		/* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */
    414 		H0 = T0;
    415 		H1 = T1;
    416 		H2 = T2;
    417 		H3 = T3;
    418 		H4 = T4;
    419 
    420 		m += 64;
    421 		bytes -= 64;
    422 		}
    423 
    424 	st->H[0] = H0;
    425 	st->H[1] = H1;
    426 	st->H[2] = H2;
    427 	st->H[3] = H3;
    428 	st->H[4] = H4;
    429 	}
    430 
    431 static size_t
    432 poly1305_combine(poly1305_state_internal *st, const uint8_t *m, size_t bytes)
    433 	{
    434 	const xmmi MMASK =
    435 		_mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
    436 	const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
    437 	const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
    438 
    439 	poly1305_power *p;
    440 	xmmi H0,H1,H2,H3,H4;
    441 	xmmi M0,M1,M2,M3,M4;
    442 	xmmi T0,T1,T2,T3,T4,T5,T6;
    443 	xmmi C1,C2;
    444 
    445 	uint64_t r0,r1,r2;
    446 	uint64_t t0,t1,t2,t3,t4;
    447 	uint64_t c;
    448 	size_t consumed = 0;
    449 
    450 	H0 = st->H[0];
    451 	H1 = st->H[1];
    452 	H2 = st->H[2];
    453 	H3 = st->H[3];
    454 	H4 = st->H[4];
    455 
    456 	/* p = [r^2,r^2] */
    457 	p = &st->P[1];
    458 
    459 	if (bytes >= 32)
    460 		{
    461 		/* H *= [r^2,r^2] */
    462 		T0 = _mm_mul_epu32(H0, p->R20.v);
    463 		T1 = _mm_mul_epu32(H0, p->R21.v);
    464 		T2 = _mm_mul_epu32(H0, p->R22.v);
    465 		T3 = _mm_mul_epu32(H0, p->R23.v);
    466 		T4 = _mm_mul_epu32(H0, p->R24.v);
    467 		T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    468 		T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    469 		T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    470 		T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    471 		T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    472 		T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    473 		T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    474 		T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    475 		T5 = _mm_mul_epu32(H1, p->R23.v);                                   T4 = _mm_add_epi64(T4, T5);
    476 		T5 = _mm_mul_epu32(H2, p->R22.v);                                   T4 = _mm_add_epi64(T4, T5);
    477 		T5 = _mm_mul_epu32(H3, p->R21.v);                                   T4 = _mm_add_epi64(T4, T5);
    478 		T5 = _mm_mul_epu32(H4, p->R20.v);                                   T4 = _mm_add_epi64(T4, T5);
    479 
    480 		/* H += [Mx,My] */
    481 		T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
    482 		T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
    483 		M0 = _mm_and_si128(MMASK, T5);
    484 		M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    485 		T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
    486 		M2 = _mm_and_si128(MMASK, T5);
    487 		M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
    488 		M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
    489 
    490 		T0 = _mm_add_epi64(T0, M0);
    491 		T1 = _mm_add_epi64(T1, M1);
    492 		T2 = _mm_add_epi64(T2, M2);
    493 		T3 = _mm_add_epi64(T3, M3);
    494 		T4 = _mm_add_epi64(T4, M4);
    495 
    496 		/* reduce */
    497 		C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
    498 		C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
    499 		C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
    500 		C1 = _mm_srli_epi64(T3, 26);                              T3 = _mm_and_si128(T3, MMASK);                                T4 = _mm_add_epi64(T4, C1);
    501 
    502 		/* H = (H*[r^2,r^2] + [Mx,My]) */
    503 		H0 = T0;
    504 		H1 = T1;
    505 		H2 = T2;
    506 		H3 = T3;
    507 		H4 = T4;
    508 
    509 		consumed = 32;
    510 		}
    511 
    512 	/* finalize, H *= [r^2,r] */
    513 	r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
    514 	r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
    515 	r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
    516 
    517 	p->R20.d[2] = (uint32_t)( r0                    ) & 0x3ffffff;
    518 	p->R21.d[2] = (uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff;
    519 	p->R22.d[2] = (uint32_t)((r1 >> 8)              ) & 0x3ffffff;
    520 	p->R23.d[2] = (uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff;
    521 	p->R24.d[2] = (uint32_t)((r2 >> 16)             )            ;
    522 	p->S21.d[2] = p->R21.d[2] * 5;
    523 	p->S22.d[2] = p->R22.d[2] * 5;
    524 	p->S23.d[2] = p->R23.d[2] * 5;
    525 	p->S24.d[2] = p->R24.d[2] * 5;
    526 
    527 	/* H *= [r^2,r] */
    528 	T0 = _mm_mul_epu32(H0, p->R20.v);
    529 	T1 = _mm_mul_epu32(H0, p->R21.v);
    530 	T2 = _mm_mul_epu32(H0, p->R22.v);
    531 	T3 = _mm_mul_epu32(H0, p->R23.v);
    532 	T4 = _mm_mul_epu32(H0, p->R24.v);
    533 	T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    534 	T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    535 	T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    536 	T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
    537 	T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    538 	T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    539 	T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    540 	T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
    541 	T5 = _mm_mul_epu32(H1, p->R23.v);                                   T4 = _mm_add_epi64(T4, T5);
    542 	T5 = _mm_mul_epu32(H2, p->R22.v);                                   T4 = _mm_add_epi64(T4, T5);
    543 	T5 = _mm_mul_epu32(H3, p->R21.v);                                   T4 = _mm_add_epi64(T4, T5);
    544 	T5 = _mm_mul_epu32(H4, p->R20.v);                                   T4 = _mm_add_epi64(T4, T5);
    545 
    546 	C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
    547 	C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
    548 	C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
    549 	C1 = _mm_srli_epi64(T3, 26);                              T3 = _mm_and_si128(T3, MMASK);                                T4 = _mm_add_epi64(T4, C1);
    550 
    551 	/* H = H[0]+H[1] */
    552 	H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8));
    553 	H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8));
    554 	H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8));
    555 	H3 = _mm_add_epi64(T3, _mm_srli_si128(T3, 8));
    556 	H4 = _mm_add_epi64(T4, _mm_srli_si128(T4, 8));
    557 
    558 	t0 = _mm_cvtsi128_si32(H0)    ; c = (t0 >> 26); t0 &= 0x3ffffff;
    559 	t1 = _mm_cvtsi128_si32(H1) + c; c = (t1 >> 26); t1 &= 0x3ffffff;
    560 	t2 = _mm_cvtsi128_si32(H2) + c; c = (t2 >> 26); t2 &= 0x3ffffff;
    561 	t3 = _mm_cvtsi128_si32(H3) + c; c = (t3 >> 26); t3 &= 0x3ffffff;
    562 	t4 = _mm_cvtsi128_si32(H4) + c; c = (t4 >> 26); t4 &= 0x3ffffff;
    563 	t0 =              t0 + (c * 5); c = (t0 >> 26); t0 &= 0x3ffffff;
    564 	t1 =              t1 + c;
    565 
    566 	st->HH[0] =  ((t0      ) | (t1 << 26)             ) & 0xfffffffffffull;
    567 	st->HH[1] =  ((t1 >> 18) | (t2 <<  8) | (t3 << 34)) & 0xfffffffffffull;
    568 	st->HH[2] =  ((t3 >> 10) | (t4 << 16)             ) & 0x3ffffffffffull;
    569 
    570 	return consumed;
    571 	}
    572 
    573 void
    574 CRYPTO_poly1305_update(poly1305_state *state, const unsigned char *m,
    575 		       size_t bytes)
    576 	{
    577 	poly1305_state_internal *st = poly1305_aligned_state(state);
    578 	size_t want;
    579 
    580 	/* need at least 32 initial bytes to start the accelerated branch */
    581 	if (!st->started)
    582 		{
    583 		if ((st->leftover == 0) && (bytes > 32))
    584 			{
    585 			poly1305_first_block(st, m);
    586 			m += 32;
    587 			bytes -= 32;
    588 			}
    589 		else
    590 			{
    591 			want = poly1305_min(32 - st->leftover, bytes);
    592 			poly1305_block_copy(st->buffer + st->leftover, m, want);
    593 			bytes -= want;
    594 			m += want;
    595 			st->leftover += want;
    596 			if ((st->leftover < 32) || (bytes == 0))
    597 				return;
    598 			poly1305_first_block(st, st->buffer);
    599 			st->leftover = 0;
    600 			}
    601 		st->started = 1;
    602 		}
    603 
    604 	/* handle leftover */
    605 	if (st->leftover)
    606 		{
    607 		want = poly1305_min(64 - st->leftover, bytes);
    608 		poly1305_block_copy(st->buffer + st->leftover, m, want);
    609 		bytes -= want;
    610 		m += want;
    611 		st->leftover += want;
    612 		if (st->leftover < 64)
    613 			return;
    614 		poly1305_blocks(st, st->buffer, 64);
    615 		st->leftover = 0;
    616 		}
    617 
    618 	/* process 64 byte blocks */
    619 	if (bytes >= 64)
    620 		{
    621 		want = (bytes & ~63);
    622 		poly1305_blocks(st, m, want);
    623 		m += want;
    624 		bytes -= want;
    625 		}
    626 
    627 	if (bytes)
    628 		{
    629 		poly1305_block_copy(st->buffer + st->leftover, m, bytes);
    630 		st->leftover += bytes;
    631 		}
    632 	}
    633 
    634 void
    635 CRYPTO_poly1305_finish(poly1305_state *state, unsigned char mac[16])
    636 	{
    637 	poly1305_state_internal *st = poly1305_aligned_state(state);
    638 	size_t leftover = st->leftover;
    639 	uint8_t *m = st->buffer;
    640 	uint128_t d[3];
    641 	uint64_t h0,h1,h2;
    642 	uint64_t t0,t1;
    643 	uint64_t g0,g1,g2,c,nc;
    644 	uint64_t r0,r1,r2,s1,s2;
    645 	poly1305_power *p;
    646 
    647 	if (st->started)
    648 		{
    649 		size_t consumed = poly1305_combine(st, m, leftover);
    650 		leftover -= consumed;
    651 		m += consumed;
    652 		}
    653 
    654 	/* st->HH will either be 0 or have the combined result */
    655 	h0 = st->HH[0];
    656 	h1 = st->HH[1];
    657 	h2 = st->HH[2];
    658 
    659 	p = &st->P[1];
    660 	r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
    661 	r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
    662 	r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
    663 	s1 = r1 * (5 << 2);
    664 	s2 = r2 * (5 << 2);
    665 
    666 	if (leftover < 16)
    667 		goto poly1305_donna_atmost15bytes;
    668 
    669 poly1305_donna_atleast16bytes:
    670 	t0 = U8TO64_LE(m + 0);
    671 	t1 = U8TO64_LE(m + 8);
    672 	h0 += t0 & 0xfffffffffff;
    673 	t0 = shr128_pair(t1, t0, 44);
    674 	h1 += t0 & 0xfffffffffff;
    675 	h2 += (t1 >> 24) | ((uint64_t)1 << 40);
    676 
    677 poly1305_donna_mul:
    678 	d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)), mul64x64_128(h2, s1));
    679 	d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)), mul64x64_128(h2, s2));
    680 	d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)), mul64x64_128(h2, r0));
    681 	                           h0 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44);
    682 	d[1] = add128_64(d[1], c); h1 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44);
    683 	d[2] = add128_64(d[2], c); h2 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42);
    684 	h0   += c * 5;
    685 
    686 	m += 16;
    687 	leftover -= 16;
    688 	if (leftover >= 16) goto poly1305_donna_atleast16bytes;
    689 
    690 	/* final bytes */
    691 poly1305_donna_atmost15bytes:
    692 	if (!leftover) goto poly1305_donna_finish;
    693 
    694 	m[leftover++] = 1;
    695 	poly1305_block_zero(m + leftover, 16 - leftover);
    696 	leftover = 16;
    697 
    698 	t0 = U8TO64_LE(m+0);
    699 	t1 = U8TO64_LE(m+8);
    700 	h0 += t0 & 0xfffffffffff; t0 = shr128_pair(t1, t0, 44);
    701 	h1 += t0 & 0xfffffffffff;
    702 	h2 += (t1 >> 24);
    703 
    704 	goto poly1305_donna_mul;
    705 
    706 poly1305_donna_finish:
    707 	             c = (h0 >> 44); h0 &= 0xfffffffffff;
    708 	h1 += c;     c = (h1 >> 44); h1 &= 0xfffffffffff;
    709 	h2 += c;     c = (h2 >> 42); h2 &= 0x3ffffffffff;
    710 	h0 += c * 5;
    711 
    712 	g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff;
    713 	g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff;
    714 	g2 = h2 + c - ((uint64_t)1 << 42);
    715 
    716 	c = (g2 >> 63) - 1;
    717 	nc = ~c;
    718 	h0 = (h0 & nc) | (g0 & c);
    719 	h1 = (h1 & nc) | (g1 & c);
    720 	h2 = (h2 & nc) | (g2 & c);
    721 
    722 	/* pad */
    723 	t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1];
    724 	t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1];
    725 	h0 += (t0 & 0xfffffffffff)    ; c = (h0 >> 44); h0 &= 0xfffffffffff; t0 = shr128_pair(t1, t0, 44);
    726 	h1 += (t0 & 0xfffffffffff) + c; c = (h1 >> 44); h1 &= 0xfffffffffff; t1 = (t1 >> 24);
    727 	h2 += (t1                ) + c;
    728 
    729 	U64TO8_LE(mac + 0, ((h0      ) | (h1 << 44)));
    730 	U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24)));
    731 	}
    732 
    733 #endif  /* !OPENSSL_NO_POLY1305 */
    734