1 /* Copyright (c) 2014, Google Inc. 2 * 3 * Permission to use, copy, modify, and/or distribute this software for any 4 * purpose with or without fee is hereby granted, provided that the above 5 * copyright notice and this permission notice appear in all copies. 6 * 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ 14 15 /* ==================================================================== 16 * 17 * When updating this file, also update chacha_vec_arm.S 18 * 19 * ==================================================================== */ 20 21 22 /* This implementation is by Ted Krovetz and was submitted to SUPERCOP and 23 * marked as public domain. It was been altered to allow for non-aligned inputs 24 * and to allow the block counter to be passed in specifically. */ 25 26 #include <openssl/chacha.h> 27 28 #if defined(ASM_GEN) || \ 29 !defined(OPENSSL_WINDOWS) && \ 30 (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) && defined(__SSE2__) 31 32 #define CHACHA_RNDS 20 /* 8 (high speed), 20 (conservative), 12 (middle) */ 33 34 /* Architecture-neutral way to specify 16-byte vector of ints */ 35 typedef unsigned vec __attribute__((vector_size(16))); 36 37 /* This implementation is designed for Neon, SSE and AltiVec machines. The 38 * following specify how to do certain vector operations efficiently on 39 * each architecture, using intrinsics. 40 * This implementation supports parallel processing of multiple blocks, 41 * including potentially using general-purpose registers. */ 42 #if __ARM_NEON__ 43 #include <string.h> 44 #include <arm_neon.h> 45 #define GPR_TOO 1 46 #define VBPI 2 47 #define ONE (vec) vsetq_lane_u32(1, vdupq_n_u32(0), 0) 48 #define LOAD_ALIGNED(m) (vec)(*((vec *)(m))) 49 #define LOAD(m) ({ \ 50 memcpy(alignment_buffer, m, 16); \ 51 LOAD_ALIGNED(alignment_buffer); \ 52 }) 53 #define STORE(m, r) ({ \ 54 (*((vec *)(alignment_buffer))) = (r); \ 55 memcpy(m, alignment_buffer, 16); \ 56 }) 57 #define ROTV1(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 1) 58 #define ROTV2(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 2) 59 #define ROTV3(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 3) 60 #define ROTW16(x) (vec) vrev32q_u16((uint16x8_t)x) 61 #if __clang__ 62 #define ROTW7(x) (x << ((vec) {7, 7, 7, 7})) ^ (x >> ((vec) {25, 25, 25, 25})) 63 #define ROTW8(x) (x << ((vec) {8, 8, 8, 8})) ^ (x >> ((vec) {24, 24, 24, 24})) 64 #define ROTW12(x) \ 65 (x << ((vec) {12, 12, 12, 12})) ^ (x >> ((vec) {20, 20, 20, 20})) 66 #else 67 #define ROTW7(x) \ 68 (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 7), (uint32x4_t)x, 25) 69 #define ROTW8(x) \ 70 (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 8), (uint32x4_t)x, 24) 71 #define ROTW12(x) \ 72 (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 12), (uint32x4_t)x, 20) 73 #endif 74 #elif __SSE2__ 75 #include <emmintrin.h> 76 #define GPR_TOO 0 77 #if __clang__ 78 #define VBPI 4 79 #else 80 #define VBPI 3 81 #endif 82 #define ONE (vec) _mm_set_epi32(0, 0, 0, 1) 83 #define LOAD(m) (vec) _mm_loadu_si128((__m128i *)(m)) 84 #define LOAD_ALIGNED(m) (vec) _mm_load_si128((__m128i *)(m)) 85 #define STORE(m, r) _mm_storeu_si128((__m128i *)(m), (__m128i)(r)) 86 #define ROTV1(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(0, 3, 2, 1)) 87 #define ROTV2(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(1, 0, 3, 2)) 88 #define ROTV3(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(2, 1, 0, 3)) 89 #define ROTW7(x) \ 90 (vec)(_mm_slli_epi32((__m128i)x, 7) ^ _mm_srli_epi32((__m128i)x, 25)) 91 #define ROTW12(x) \ 92 (vec)(_mm_slli_epi32((__m128i)x, 12) ^ _mm_srli_epi32((__m128i)x, 20)) 93 #if __SSSE3__ 94 #include <tmmintrin.h> 95 #define ROTW8(x) \ 96 (vec) _mm_shuffle_epi8((__m128i)x, _mm_set_epi8(14, 13, 12, 15, 10, 9, 8, \ 97 11, 6, 5, 4, 7, 2, 1, 0, 3)) 98 #define ROTW16(x) \ 99 (vec) _mm_shuffle_epi8((__m128i)x, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, \ 100 10, 5, 4, 7, 6, 1, 0, 3, 2)) 101 #else 102 #define ROTW8(x) \ 103 (vec)(_mm_slli_epi32((__m128i)x, 8) ^ _mm_srli_epi32((__m128i)x, 24)) 104 #define ROTW16(x) \ 105 (vec)(_mm_slli_epi32((__m128i)x, 16) ^ _mm_srli_epi32((__m128i)x, 16)) 106 #endif 107 #else 108 #error-- Implementation supports only machines with neon or SSE2 109 #endif 110 111 #ifndef REVV_BE 112 #define REVV_BE(x) (x) 113 #endif 114 115 #ifndef REVW_BE 116 #define REVW_BE(x) (x) 117 #endif 118 119 #define BPI (VBPI + GPR_TOO) /* Blocks computed per loop iteration */ 120 121 #define DQROUND_VECTORS(a,b,c,d) \ 122 a += b; d ^= a; d = ROTW16(d); \ 123 c += d; b ^= c; b = ROTW12(b); \ 124 a += b; d ^= a; d = ROTW8(d); \ 125 c += d; b ^= c; b = ROTW7(b); \ 126 b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); \ 127 a += b; d ^= a; d = ROTW16(d); \ 128 c += d; b ^= c; b = ROTW12(b); \ 129 a += b; d ^= a; d = ROTW8(d); \ 130 c += d; b ^= c; b = ROTW7(b); \ 131 b = ROTV3(b); c = ROTV2(c); d = ROTV1(d); 132 133 #define QROUND_WORDS(a,b,c,d) \ 134 a = a+b; d ^= a; d = d<<16 | d>>16; \ 135 c = c+d; b ^= c; b = b<<12 | b>>20; \ 136 a = a+b; d ^= a; d = d<< 8 | d>>24; \ 137 c = c+d; b ^= c; b = b<< 7 | b>>25; 138 139 #define WRITE_XOR(in, op, d, v0, v1, v2, v3) \ 140 STORE(op + d + 0, LOAD(in + d + 0) ^ REVV_BE(v0)); \ 141 STORE(op + d + 4, LOAD(in + d + 4) ^ REVV_BE(v1)); \ 142 STORE(op + d + 8, LOAD(in + d + 8) ^ REVV_BE(v2)); \ 143 STORE(op + d +12, LOAD(in + d +12) ^ REVV_BE(v3)); 144 145 #if __ARM_NEON__ 146 /* For ARM, we can't depend on NEON support, so this function is compiled with 147 * a different name, along with the generic code, and can be enabled at 148 * run-time. */ 149 void CRYPTO_chacha_20_neon( 150 #else 151 void CRYPTO_chacha_20( 152 #endif 153 uint8_t *out, 154 const uint8_t *in, 155 size_t inlen, 156 const uint8_t key[32], 157 const uint8_t nonce[12], 158 uint32_t counter) 159 { 160 unsigned iters, i, *op=(unsigned *)out, *ip=(unsigned *)in, *kp; 161 #if defined(__ARM_NEON__) 162 uint32_t np[3]; 163 uint8_t alignment_buffer[16] __attribute__((aligned(16))); 164 #endif 165 vec s0, s1, s2, s3; 166 __attribute__ ((aligned (16))) unsigned chacha_const[] = 167 {0x61707865,0x3320646E,0x79622D32,0x6B206574}; 168 kp = (unsigned *)key; 169 #if defined(__ARM_NEON__) 170 memcpy(np, nonce, 12); 171 #endif 172 s0 = LOAD_ALIGNED(chacha_const); 173 s1 = LOAD(&((vec*)kp)[0]); 174 s2 = LOAD(&((vec*)kp)[1]); 175 s3 = (vec){ 176 counter, 177 ((uint32_t*)nonce)[0], 178 ((uint32_t*)nonce)[1], 179 ((uint32_t*)nonce)[2] 180 }; 181 182 for (iters = 0; iters < inlen/(BPI*64); iters++) 183 { 184 #if GPR_TOO 185 register unsigned x0, x1, x2, x3, x4, x5, x6, x7, x8, 186 x9, x10, x11, x12, x13, x14, x15; 187 #endif 188 #if VBPI > 2 189 vec v8,v9,v10,v11; 190 #endif 191 #if VBPI > 3 192 vec v12,v13,v14,v15; 193 #endif 194 195 vec v0,v1,v2,v3,v4,v5,v6,v7; 196 v4 = v0 = s0; v5 = v1 = s1; v6 = v2 = s2; v3 = s3; 197 v7 = v3 + ONE; 198 #if VBPI > 2 199 v8 = v4; v9 = v5; v10 = v6; 200 v11 = v7 + ONE; 201 #endif 202 #if VBPI > 3 203 v12 = v8; v13 = v9; v14 = v10; 204 v15 = v11 + ONE; 205 #endif 206 #if GPR_TOO 207 x0 = chacha_const[0]; x1 = chacha_const[1]; 208 x2 = chacha_const[2]; x3 = chacha_const[3]; 209 x4 = kp[0]; x5 = kp[1]; x6 = kp[2]; x7 = kp[3]; 210 x8 = kp[4]; x9 = kp[5]; x10 = kp[6]; x11 = kp[7]; 211 x12 = counter+BPI*iters+(BPI-1); x13 = np[0]; 212 x14 = np[1]; x15 = np[2]; 213 #endif 214 for (i = CHACHA_RNDS/2; i; i--) 215 { 216 DQROUND_VECTORS(v0,v1,v2,v3) 217 DQROUND_VECTORS(v4,v5,v6,v7) 218 #if VBPI > 2 219 DQROUND_VECTORS(v8,v9,v10,v11) 220 #endif 221 #if VBPI > 3 222 DQROUND_VECTORS(v12,v13,v14,v15) 223 #endif 224 #if GPR_TOO 225 QROUND_WORDS( x0, x4, x8,x12) 226 QROUND_WORDS( x1, x5, x9,x13) 227 QROUND_WORDS( x2, x6,x10,x14) 228 QROUND_WORDS( x3, x7,x11,x15) 229 QROUND_WORDS( x0, x5,x10,x15) 230 QROUND_WORDS( x1, x6,x11,x12) 231 QROUND_WORDS( x2, x7, x8,x13) 232 QROUND_WORDS( x3, x4, x9,x14) 233 #endif 234 } 235 236 WRITE_XOR(ip, op, 0, v0+s0, v1+s1, v2+s2, v3+s3) 237 s3 += ONE; 238 WRITE_XOR(ip, op, 16, v4+s0, v5+s1, v6+s2, v7+s3) 239 s3 += ONE; 240 #if VBPI > 2 241 WRITE_XOR(ip, op, 32, v8+s0, v9+s1, v10+s2, v11+s3) 242 s3 += ONE; 243 #endif 244 #if VBPI > 3 245 WRITE_XOR(ip, op, 48, v12+s0, v13+s1, v14+s2, v15+s3) 246 s3 += ONE; 247 #endif 248 ip += VBPI*16; 249 op += VBPI*16; 250 #if GPR_TOO 251 op[0] = REVW_BE(REVW_BE(ip[0]) ^ (x0 + chacha_const[0])); 252 op[1] = REVW_BE(REVW_BE(ip[1]) ^ (x1 + chacha_const[1])); 253 op[2] = REVW_BE(REVW_BE(ip[2]) ^ (x2 + chacha_const[2])); 254 op[3] = REVW_BE(REVW_BE(ip[3]) ^ (x3 + chacha_const[3])); 255 op[4] = REVW_BE(REVW_BE(ip[4]) ^ (x4 + kp[0])); 256 op[5] = REVW_BE(REVW_BE(ip[5]) ^ (x5 + kp[1])); 257 op[6] = REVW_BE(REVW_BE(ip[6]) ^ (x6 + kp[2])); 258 op[7] = REVW_BE(REVW_BE(ip[7]) ^ (x7 + kp[3])); 259 op[8] = REVW_BE(REVW_BE(ip[8]) ^ (x8 + kp[4])); 260 op[9] = REVW_BE(REVW_BE(ip[9]) ^ (x9 + kp[5])); 261 op[10] = REVW_BE(REVW_BE(ip[10]) ^ (x10 + kp[6])); 262 op[11] = REVW_BE(REVW_BE(ip[11]) ^ (x11 + kp[7])); 263 op[12] = REVW_BE(REVW_BE(ip[12]) ^ (x12 + counter+BPI*iters+(BPI-1))); 264 op[13] = REVW_BE(REVW_BE(ip[13]) ^ (x13 + np[0])); 265 op[14] = REVW_BE(REVW_BE(ip[14]) ^ (x14 + np[1])); 266 op[15] = REVW_BE(REVW_BE(ip[15]) ^ (x15 + np[2])); 267 s3 += ONE; 268 ip += 16; 269 op += 16; 270 #endif 271 } 272 273 for (iters = inlen%(BPI*64)/64; iters != 0; iters--) 274 { 275 vec v0 = s0, v1 = s1, v2 = s2, v3 = s3; 276 for (i = CHACHA_RNDS/2; i; i--) 277 { 278 DQROUND_VECTORS(v0,v1,v2,v3); 279 } 280 WRITE_XOR(ip, op, 0, v0+s0, v1+s1, v2+s2, v3+s3) 281 s3 += ONE; 282 ip += 16; 283 op += 16; 284 } 285 286 inlen = inlen % 64; 287 if (inlen) 288 { 289 __attribute__ ((aligned (16))) vec buf[4]; 290 vec v0,v1,v2,v3; 291 v0 = s0; v1 = s1; v2 = s2; v3 = s3; 292 for (i = CHACHA_RNDS/2; i; i--) 293 { 294 DQROUND_VECTORS(v0,v1,v2,v3); 295 } 296 297 if (inlen >= 16) 298 { 299 STORE(op + 0, LOAD(ip + 0) ^ REVV_BE(v0 + s0)); 300 if (inlen >= 32) 301 { 302 STORE(op + 4, LOAD(ip + 4) ^ REVV_BE(v1 + s1)); 303 if (inlen >= 48) 304 { 305 STORE(op + 8, LOAD(ip + 8) ^ 306 REVV_BE(v2 + s2)); 307 buf[3] = REVV_BE(v3 + s3); 308 } 309 else 310 buf[2] = REVV_BE(v2 + s2); 311 } 312 else 313 buf[1] = REVV_BE(v1 + s1); 314 } 315 else 316 buf[0] = REVV_BE(v0 + s0); 317 318 for (i=inlen & ~15; i<inlen; i++) 319 ((char *)op)[i] = ((char *)ip)[i] ^ ((char *)buf)[i]; 320 } 321 } 322 323 #endif /* ASM_GEN || !OPENSSL_WINDOWS && (OPENSSL_X86_64 || OPENSSL_X86) && SSE2 */ 324