Home | History | Annotate | Download | only in 2lib
      1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
      2  * <olivier.gay (at) a3.epfl.ch> under a BSD-style license. See below.
      3  */
      4 
      5 /*
      6  * FIPS 180-2 SHA-224/256/384/512 implementation
      7  * Last update: 02/02/2007
      8  * Issue date:  04/30/2005
      9  *
     10  * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay (at) a3.epfl.ch>
     11  * All rights reserved.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. Neither the name of the project nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  */
     37 
     38 #include "2sysincludes.h"
     39 #include "2common.h"
     40 #include "2sha.h"
     41 
     42 #define SHFR(x, n)    (x >> n)
     43 #define ROTR(x, n)   ((x >> n) | (x << ((sizeof(x) << 3) - n)))
     44 #define ROTL(x, n)   ((x << n) | (x >> ((sizeof(x) << 3) - n)))
     45 #define CH(x, y, z)  ((x & y) ^ (~x & z))
     46 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
     47 
     48 #define SHA256_F1(x) (ROTR(x,  2) ^ ROTR(x, 13) ^ ROTR(x, 22))
     49 #define SHA256_F2(x) (ROTR(x,  6) ^ ROTR(x, 11) ^ ROTR(x, 25))
     50 #define SHA256_F3(x) (ROTR(x,  7) ^ ROTR(x, 18) ^ SHFR(x,  3))
     51 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
     52 
     53 #define UNPACK32(x, str)				\
     54 	{						\
     55 		*((str) + 3) = (uint8_t) ((x)      );	\
     56 		*((str) + 2) = (uint8_t) ((x) >>  8);	\
     57 		*((str) + 1) = (uint8_t) ((x) >> 16);	\
     58 		*((str) + 0) = (uint8_t) ((x) >> 24);	\
     59 	}
     60 
     61 #define PACK32(str, x)						\
     62 	{							\
     63 		*(x) =   ((uint32_t) *((str) + 3)      )	\
     64 			| ((uint32_t) *((str) + 2) <<  8)       \
     65 			| ((uint32_t) *((str) + 1) << 16)       \
     66 			| ((uint32_t) *((str) + 0) << 24);      \
     67 	}
     68 
     69 /* Macros used for loops unrolling */
     70 
     71 #define SHA256_SCR(i)						\
     72 	{							\
     73 		w[i] =  SHA256_F4(w[i -  2]) + w[i -  7]	\
     74 			+ SHA256_F3(w[i - 15]) + w[i - 16];	\
     75 	}
     76 
     77 #define SHA256_EXP(a, b, c, d, e, f, g, h, j)				\
     78 	{								\
     79 		t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \
     80 			+ sha256_k[j] + w[j];				\
     81 		t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]);       \
     82 		wv[d] += t1;                                            \
     83 		wv[h] = t1 + t2;                                        \
     84 	}
     85 
     86 static const uint32_t sha256_h0[8] = {
     87 	0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
     88 	0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
     89 };
     90 
     91 static const uint32_t sha256_k[64] = {
     92 	0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
     93 	0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
     94 	0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
     95 	0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
     96 	0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
     97 	0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
     98 	0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
     99 	0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
    100 	0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
    101 	0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
    102 	0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
    103 	0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
    104 	0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
    105 	0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
    106 	0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
    107 	0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
    108 };
    109 
    110 /* SHA-256 implementation */
    111 void vb2_sha256_init(struct vb2_sha256_context *ctx)
    112 {
    113 #ifndef UNROLL_LOOPS
    114 	int i;
    115 	for (i = 0; i < 8; i++) {
    116 		ctx->h[i] = sha256_h0[i];
    117 	}
    118 #else
    119 	ctx->h[0] = sha256_h0[0]; ctx->h[1] = sha256_h0[1];
    120 	ctx->h[2] = sha256_h0[2]; ctx->h[3] = sha256_h0[3];
    121 	ctx->h[4] = sha256_h0[4]; ctx->h[5] = sha256_h0[5];
    122 	ctx->h[6] = sha256_h0[6]; ctx->h[7] = sha256_h0[7];
    123 #endif /* !UNROLL_LOOPS */
    124 
    125 	ctx->size = 0;
    126 	ctx->total_size = 0;
    127 }
    128 
    129 static void vb2_sha256_transform(struct vb2_sha256_context *ctx,
    130 				 const uint8_t *message,
    131 				 unsigned int block_nb)
    132 {
    133 	/* Note that these arrays use 72*4=288 bytes of stack */
    134 	uint32_t w[64];
    135 	uint32_t wv[8];
    136 	uint32_t t1, t2;
    137 	const unsigned char *sub_block;
    138 	int i;
    139 
    140 #ifndef UNROLL_LOOPS
    141 	int j;
    142 #endif
    143 
    144 	for (i = 0; i < (int) block_nb; i++) {
    145 		sub_block = message + (i << 6);
    146 
    147 #ifndef UNROLL_LOOPS
    148 		for (j = 0; j < 16; j++) {
    149 			PACK32(&sub_block[j << 2], &w[j]);
    150 		}
    151 
    152 		for (j = 16; j < 64; j++) {
    153 			SHA256_SCR(j);
    154 		}
    155 
    156 		for (j = 0; j < 8; j++) {
    157 			wv[j] = ctx->h[j];
    158 		}
    159 
    160 		for (j = 0; j < 64; j++) {
    161 			t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6])
    162 				+ sha256_k[j] + w[j];
    163 			t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
    164 			wv[7] = wv[6];
    165 			wv[6] = wv[5];
    166 			wv[5] = wv[4];
    167 			wv[4] = wv[3] + t1;
    168 			wv[3] = wv[2];
    169 			wv[2] = wv[1];
    170 			wv[1] = wv[0];
    171 			wv[0] = t1 + t2;
    172 		}
    173 
    174 		for (j = 0; j < 8; j++) {
    175 			ctx->h[j] += wv[j];
    176 		}
    177 #else
    178 		PACK32(&sub_block[ 0], &w[ 0]); PACK32(&sub_block[ 4], &w[ 1]);
    179 		PACK32(&sub_block[ 8], &w[ 2]); PACK32(&sub_block[12], &w[ 3]);
    180 		PACK32(&sub_block[16], &w[ 4]); PACK32(&sub_block[20], &w[ 5]);
    181 		PACK32(&sub_block[24], &w[ 6]); PACK32(&sub_block[28], &w[ 7]);
    182 		PACK32(&sub_block[32], &w[ 8]); PACK32(&sub_block[36], &w[ 9]);
    183 		PACK32(&sub_block[40], &w[10]); PACK32(&sub_block[44], &w[11]);
    184 		PACK32(&sub_block[48], &w[12]); PACK32(&sub_block[52], &w[13]);
    185 		PACK32(&sub_block[56], &w[14]); PACK32(&sub_block[60], &w[15]);
    186 
    187 		SHA256_SCR(16); SHA256_SCR(17); SHA256_SCR(18); SHA256_SCR(19);
    188 		SHA256_SCR(20); SHA256_SCR(21); SHA256_SCR(22); SHA256_SCR(23);
    189 		SHA256_SCR(24); SHA256_SCR(25); SHA256_SCR(26); SHA256_SCR(27);
    190 		SHA256_SCR(28); SHA256_SCR(29); SHA256_SCR(30); SHA256_SCR(31);
    191 		SHA256_SCR(32); SHA256_SCR(33); SHA256_SCR(34); SHA256_SCR(35);
    192 		SHA256_SCR(36); SHA256_SCR(37); SHA256_SCR(38); SHA256_SCR(39);
    193 		SHA256_SCR(40); SHA256_SCR(41); SHA256_SCR(42); SHA256_SCR(43);
    194 		SHA256_SCR(44); SHA256_SCR(45); SHA256_SCR(46); SHA256_SCR(47);
    195 		SHA256_SCR(48); SHA256_SCR(49); SHA256_SCR(50); SHA256_SCR(51);
    196 		SHA256_SCR(52); SHA256_SCR(53); SHA256_SCR(54); SHA256_SCR(55);
    197 		SHA256_SCR(56); SHA256_SCR(57); SHA256_SCR(58); SHA256_SCR(59);
    198 		SHA256_SCR(60); SHA256_SCR(61); SHA256_SCR(62); SHA256_SCR(63);
    199 
    200 		wv[0] = ctx->h[0]; wv[1] = ctx->h[1];
    201 		wv[2] = ctx->h[2]; wv[3] = ctx->h[3];
    202 		wv[4] = ctx->h[4]; wv[5] = ctx->h[5];
    203 		wv[6] = ctx->h[6]; wv[7] = ctx->h[7];
    204 
    205 		SHA256_EXP(0,1,2,3,4,5,6,7, 0); SHA256_EXP(7,0,1,2,3,4,5,6, 1);
    206 		SHA256_EXP(6,7,0,1,2,3,4,5, 2); SHA256_EXP(5,6,7,0,1,2,3,4, 3);
    207 		SHA256_EXP(4,5,6,7,0,1,2,3, 4); SHA256_EXP(3,4,5,6,7,0,1,2, 5);
    208 		SHA256_EXP(2,3,4,5,6,7,0,1, 6); SHA256_EXP(1,2,3,4,5,6,7,0, 7);
    209 		SHA256_EXP(0,1,2,3,4,5,6,7, 8); SHA256_EXP(7,0,1,2,3,4,5,6, 9);
    210 		SHA256_EXP(6,7,0,1,2,3,4,5,10); SHA256_EXP(5,6,7,0,1,2,3,4,11);
    211 		SHA256_EXP(4,5,6,7,0,1,2,3,12); SHA256_EXP(3,4,5,6,7,0,1,2,13);
    212 		SHA256_EXP(2,3,4,5,6,7,0,1,14); SHA256_EXP(1,2,3,4,5,6,7,0,15);
    213 		SHA256_EXP(0,1,2,3,4,5,6,7,16); SHA256_EXP(7,0,1,2,3,4,5,6,17);
    214 		SHA256_EXP(6,7,0,1,2,3,4,5,18); SHA256_EXP(5,6,7,0,1,2,3,4,19);
    215 		SHA256_EXP(4,5,6,7,0,1,2,3,20); SHA256_EXP(3,4,5,6,7,0,1,2,21);
    216 		SHA256_EXP(2,3,4,5,6,7,0,1,22); SHA256_EXP(1,2,3,4,5,6,7,0,23);
    217 		SHA256_EXP(0,1,2,3,4,5,6,7,24); SHA256_EXP(7,0,1,2,3,4,5,6,25);
    218 		SHA256_EXP(6,7,0,1,2,3,4,5,26); SHA256_EXP(5,6,7,0,1,2,3,4,27);
    219 		SHA256_EXP(4,5,6,7,0,1,2,3,28); SHA256_EXP(3,4,5,6,7,0,1,2,29);
    220 		SHA256_EXP(2,3,4,5,6,7,0,1,30); SHA256_EXP(1,2,3,4,5,6,7,0,31);
    221 		SHA256_EXP(0,1,2,3,4,5,6,7,32); SHA256_EXP(7,0,1,2,3,4,5,6,33);
    222 		SHA256_EXP(6,7,0,1,2,3,4,5,34); SHA256_EXP(5,6,7,0,1,2,3,4,35);
    223 		SHA256_EXP(4,5,6,7,0,1,2,3,36); SHA256_EXP(3,4,5,6,7,0,1,2,37);
    224 		SHA256_EXP(2,3,4,5,6,7,0,1,38); SHA256_EXP(1,2,3,4,5,6,7,0,39);
    225 		SHA256_EXP(0,1,2,3,4,5,6,7,40); SHA256_EXP(7,0,1,2,3,4,5,6,41);
    226 		SHA256_EXP(6,7,0,1,2,3,4,5,42); SHA256_EXP(5,6,7,0,1,2,3,4,43);
    227 		SHA256_EXP(4,5,6,7,0,1,2,3,44); SHA256_EXP(3,4,5,6,7,0,1,2,45);
    228 		SHA256_EXP(2,3,4,5,6,7,0,1,46); SHA256_EXP(1,2,3,4,5,6,7,0,47);
    229 		SHA256_EXP(0,1,2,3,4,5,6,7,48); SHA256_EXP(7,0,1,2,3,4,5,6,49);
    230 		SHA256_EXP(6,7,0,1,2,3,4,5,50); SHA256_EXP(5,6,7,0,1,2,3,4,51);
    231 		SHA256_EXP(4,5,6,7,0,1,2,3,52); SHA256_EXP(3,4,5,6,7,0,1,2,53);
    232 		SHA256_EXP(2,3,4,5,6,7,0,1,54); SHA256_EXP(1,2,3,4,5,6,7,0,55);
    233 		SHA256_EXP(0,1,2,3,4,5,6,7,56); SHA256_EXP(7,0,1,2,3,4,5,6,57);
    234 		SHA256_EXP(6,7,0,1,2,3,4,5,58); SHA256_EXP(5,6,7,0,1,2,3,4,59);
    235 		SHA256_EXP(4,5,6,7,0,1,2,3,60); SHA256_EXP(3,4,5,6,7,0,1,2,61);
    236 		SHA256_EXP(2,3,4,5,6,7,0,1,62); SHA256_EXP(1,2,3,4,5,6,7,0,63);
    237 
    238 		ctx->h[0] += wv[0]; ctx->h[1] += wv[1];
    239 		ctx->h[2] += wv[2]; ctx->h[3] += wv[3];
    240 		ctx->h[4] += wv[4]; ctx->h[5] += wv[5];
    241 		ctx->h[6] += wv[6]; ctx->h[7] += wv[7];
    242 #endif /* !UNROLL_LOOPS */
    243 	}
    244 }
    245 
    246 void vb2_sha256_update(struct vb2_sha256_context *ctx,
    247 		       const uint8_t *data,
    248 		       uint32_t size)
    249 {
    250 	unsigned int block_nb;
    251 	unsigned int new_size, rem_size, tmp_size;
    252 	const uint8_t *shifted_data;
    253 
    254 	tmp_size = VB2_SHA256_BLOCK_SIZE - ctx->size;
    255 	rem_size = size < tmp_size ? size : tmp_size;
    256 
    257 	memcpy(&ctx->block[ctx->size], data, rem_size);
    258 
    259 	if (ctx->size + size < VB2_SHA256_BLOCK_SIZE) {
    260 		ctx->size += size;
    261 		return;
    262 	}
    263 
    264 	new_size = size - rem_size;
    265 	block_nb = new_size / VB2_SHA256_BLOCK_SIZE;
    266 
    267 	shifted_data = data + rem_size;
    268 
    269 	vb2_sha256_transform(ctx, ctx->block, 1);
    270 	vb2_sha256_transform(ctx, shifted_data, block_nb);
    271 
    272 	rem_size = new_size % VB2_SHA256_BLOCK_SIZE;
    273 
    274 	memcpy(ctx->block, &shifted_data[block_nb << 6],
    275 	       rem_size);
    276 
    277 	ctx->size = rem_size;
    278 	ctx->total_size += (block_nb + 1) << 6;
    279 }
    280 
    281 void vb2_sha256_finalize(struct vb2_sha256_context *ctx, uint8_t *digest)
    282 {
    283 	unsigned int block_nb;
    284 	unsigned int pm_size;
    285 	unsigned int size_b;
    286 #ifndef UNROLL_LOOPS
    287 	int i;
    288 #endif
    289 
    290 	block_nb = (1 + ((VB2_SHA256_BLOCK_SIZE - 9)
    291 			 < (ctx->size % VB2_SHA256_BLOCK_SIZE)));
    292 
    293 	size_b = (ctx->total_size + ctx->size) << 3;
    294 	pm_size = block_nb << 6;
    295 
    296 	memset(ctx->block + ctx->size, 0, pm_size - ctx->size);
    297 	ctx->block[ctx->size] = 0x80;
    298 	UNPACK32(size_b, ctx->block + pm_size - 4);
    299 
    300 	vb2_sha256_transform(ctx, ctx->block, block_nb);
    301 
    302 #ifndef UNROLL_LOOPS
    303 	for (i = 0 ; i < 8; i++) {
    304 		UNPACK32(ctx->h[i], &digest[i << 2]);
    305 	}
    306 #else
    307 	UNPACK32(ctx->h[0], &digest[ 0]);
    308 	UNPACK32(ctx->h[1], &digest[ 4]);
    309 	UNPACK32(ctx->h[2], &digest[ 8]);
    310 	UNPACK32(ctx->h[3], &digest[12]);
    311 	UNPACK32(ctx->h[4], &digest[16]);
    312 	UNPACK32(ctx->h[5], &digest[20]);
    313 	UNPACK32(ctx->h[6], &digest[24]);
    314 	UNPACK32(ctx->h[7], &digest[28]);
    315 #endif /* !UNROLL_LOOPS */
    316 }
    317