Home | History | Annotate | Download | only in cipher
      1 /* ====================================================================
      2  * Copyright (c) 2012 The OpenSSL Project.  All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  *
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in
     13  *    the documentation and/or other materials provided with the
     14  *    distribution.
     15  *
     16  * 3. All advertising materials mentioning features or use of this
     17  *    software must display the following acknowledgment:
     18  *    "This product includes software developed by the OpenSSL Project
     19  *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
     20  *
     21  * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
     22  *    endorse or promote products derived from this software without
     23  *    prior written permission. For written permission, please contact
     24  *    openssl-core (at) openssl.org.
     25  *
     26  * 5. Products derived from this software may not be called "OpenSSL"
     27  *    nor may "OpenSSL" appear in their names without prior written
     28  *    permission of the OpenSSL Project.
     29  *
     30  * 6. Redistributions of any form whatsoever must retain the following
     31  *    acknowledgment:
     32  *    "This product includes software developed by the OpenSSL Project
     33  *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
     34  *
     35  * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
     36  * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     38  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
     39  * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     41  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     42  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     44  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     45  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     46  * OF THE POSSIBILITY OF SUCH DAMAGE.
     47  * ====================================================================
     48  *
     49  * This product includes cryptographic software written by Eric Young
     50  * (eay (at) cryptsoft.com).  This product includes software written by Tim
     51  * Hudson (tjh (at) cryptsoft.com). */
     52 
     53 #include <assert.h>
     54 #include <string.h>
     55 
     56 #include <openssl/digest.h>
     57 #include <openssl/obj.h>
     58 #include <openssl/sha.h>
     59 
     60 #include "../internal.h"
     61 
     62 
     63 /* TODO(davidben): unsigned should be size_t. The various constant_time
     64  * functions need to be switched to size_t. */
     65 
     66 /* MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length
     67  * field. (SHA-384/512 have 128-bit length.) */
     68 #define MAX_HASH_BIT_COUNT_BYTES 16
     69 
     70 /* MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support.
     71  * Currently SHA-384/512 has a 128-byte block size and that's the largest
     72  * supported by TLS.) */
     73 #define MAX_HASH_BLOCK_SIZE 128
     74 
     75 int EVP_tls_cbc_remove_padding(unsigned *out_len,
     76                                const uint8_t *in, unsigned in_len,
     77                                unsigned block_size, unsigned mac_size) {
     78   unsigned padding_length, good, to_check, i;
     79   const unsigned overhead = 1 /* padding length byte */ + mac_size;
     80 
     81   /* These lengths are all public so we can test them in non-constant time. */
     82   if (overhead > in_len) {
     83     return 0;
     84   }
     85 
     86   padding_length = in[in_len - 1];
     87 
     88   good = constant_time_ge(in_len, overhead + padding_length);
     89   /* The padding consists of a length byte at the end of the record and
     90    * then that many bytes of padding, all with the same value as the
     91    * length byte. Thus, with the length byte included, there are i+1
     92    * bytes of padding.
     93    *
     94    * We can't check just |padding_length+1| bytes because that leaks
     95    * decrypted information. Therefore we always have to check the maximum
     96    * amount of padding possible. (Again, the length of the record is
     97    * public information so we can use it.) */
     98   to_check = 256; /* maximum amount of padding, inc length byte. */
     99   if (to_check > in_len) {
    100     to_check = in_len;
    101   }
    102 
    103   for (i = 0; i < to_check; i++) {
    104     uint8_t mask = constant_time_ge_8(padding_length, i);
    105     uint8_t b = in[in_len - 1 - i];
    106     /* The final |padding_length+1| bytes should all have the value
    107      * |padding_length|. Therefore the XOR should be zero. */
    108     good &= ~(mask & (padding_length ^ b));
    109   }
    110 
    111   /* If any of the final |padding_length+1| bytes had the wrong value,
    112    * one or more of the lower eight bits of |good| will be cleared. */
    113   good = constant_time_eq(0xff, good & 0xff);
    114 
    115   /* Always treat |padding_length| as zero on error. If, assuming block size of
    116    * 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16
    117    * and returned -1, distinguishing good MAC and bad padding from bad MAC and
    118    * bad padding would give POODLE's padding oracle. */
    119   padding_length = good & (padding_length + 1);
    120   *out_len = in_len - padding_length;
    121 
    122   return constant_time_select_int(good, 1, -1);
    123 }
    124 
    125 /* If CBC_MAC_ROTATE_IN_PLACE is defined then EVP_tls_cbc_copy_mac is performed
    126  * with variable accesses in a 64-byte-aligned buffer. Assuming that this fits
    127  * into a single or pair of cache-lines, then the variable memory accesses don't
    128  * actually affect the timing. CPUs with smaller cache-lines [if any] are not
    129  * multi-core and are not considered vulnerable to cache-timing attacks. */
    130 #define CBC_MAC_ROTATE_IN_PLACE
    131 
    132 void EVP_tls_cbc_copy_mac(uint8_t *out, unsigned md_size,
    133                           const uint8_t *in, unsigned in_len,
    134                           unsigned orig_len) {
    135 #if defined(CBC_MAC_ROTATE_IN_PLACE)
    136   uint8_t rotated_mac_buf[64 + EVP_MAX_MD_SIZE];
    137   uint8_t *rotated_mac;
    138 #else
    139   uint8_t rotated_mac[EVP_MAX_MD_SIZE];
    140 #endif
    141 
    142   /* mac_end is the index of |in| just after the end of the MAC. */
    143   unsigned mac_end = in_len;
    144   unsigned mac_start = mac_end - md_size;
    145   /* scan_start contains the number of bytes that we can ignore because
    146    * the MAC's position can only vary by 255 bytes. */
    147   unsigned scan_start = 0;
    148   unsigned i, j;
    149   unsigned div_spoiler;
    150   unsigned rotate_offset;
    151 
    152   assert(orig_len >= in_len);
    153   assert(in_len >= md_size);
    154   assert(md_size <= EVP_MAX_MD_SIZE);
    155 
    156 #if defined(CBC_MAC_ROTATE_IN_PLACE)
    157   rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf) & 63);
    158 #endif
    159 
    160   /* This information is public so it's safe to branch based on it. */
    161   if (orig_len > md_size + 255 + 1) {
    162     scan_start = orig_len - (md_size + 255 + 1);
    163   }
    164   /* div_spoiler contains a multiple of md_size that is used to cause the
    165    * modulo operation to be constant time. Without this, the time varies
    166    * based on the amount of padding when running on Intel chips at least.
    167    *
    168    * The aim of right-shifting md_size is so that the compiler doesn't
    169    * figure out that it can remove div_spoiler as that would require it
    170    * to prove that md_size is always even, which I hope is beyond it. */
    171   div_spoiler = md_size >> 1;
    172   div_spoiler <<= (sizeof(div_spoiler) - 1) * 8;
    173   rotate_offset = (div_spoiler + mac_start - scan_start) % md_size;
    174 
    175   memset(rotated_mac, 0, md_size);
    176   for (i = scan_start, j = 0; i < orig_len; i++) {
    177     uint8_t mac_started = constant_time_ge_8(i, mac_start);
    178     uint8_t mac_ended = constant_time_ge_8(i, mac_end);
    179     uint8_t b = in[i];
    180     rotated_mac[j++] |= b & mac_started & ~mac_ended;
    181     j &= constant_time_lt(j, md_size);
    182   }
    183 
    184 /* Now rotate the MAC */
    185 #if defined(CBC_MAC_ROTATE_IN_PLACE)
    186   j = 0;
    187   for (i = 0; i < md_size; i++) {
    188     /* in case cache-line is 32 bytes, touch second line */
    189     ((volatile uint8_t *)rotated_mac)[rotate_offset ^ 32];
    190     out[j++] = rotated_mac[rotate_offset++];
    191     rotate_offset &= constant_time_lt(rotate_offset, md_size);
    192   }
    193 #else
    194   memset(out, 0, md_size);
    195   rotate_offset = md_size - rotate_offset;
    196   rotate_offset &= constant_time_lt(rotate_offset, md_size);
    197   for (i = 0; i < md_size; i++) {
    198     for (j = 0; j < md_size; j++) {
    199       out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset);
    200     }
    201     rotate_offset++;
    202     rotate_offset &= constant_time_lt(rotate_offset, md_size);
    203   }
    204 #endif
    205 }
    206 
    207 /* u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in
    208  * big-endian order. The value of p is advanced by four. */
    209 #define u32toBE(n, p) \
    210   (*((p)++)=(uint8_t)(n>>24), \
    211    *((p)++)=(uint8_t)(n>>16), \
    212    *((p)++)=(uint8_t)(n>>8), \
    213    *((p)++)=(uint8_t)(n))
    214 
    215 /* u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in
    216  * big-endian order. The value of p is advanced by eight. */
    217 #define u64toBE(n, p) \
    218   (*((p)++)=(uint8_t)(n>>56), \
    219    *((p)++)=(uint8_t)(n>>48), \
    220    *((p)++)=(uint8_t)(n>>40), \
    221    *((p)++)=(uint8_t)(n>>32), \
    222    *((p)++)=(uint8_t)(n>>24), \
    223    *((p)++)=(uint8_t)(n>>16), \
    224    *((p)++)=(uint8_t)(n>>8), \
    225    *((p)++)=(uint8_t)(n))
    226 
    227 /* These functions serialize the state of a hash and thus perform the standard
    228  * "final" operation without adding the padding and length that such a function
    229  * typically does. */
    230 static void tls1_sha1_final_raw(void *ctx, uint8_t *md_out) {
    231   SHA_CTX *sha1 = ctx;
    232   u32toBE(sha1->h[0], md_out);
    233   u32toBE(sha1->h[1], md_out);
    234   u32toBE(sha1->h[2], md_out);
    235   u32toBE(sha1->h[3], md_out);
    236   u32toBE(sha1->h[4], md_out);
    237 }
    238 #define LARGEST_DIGEST_CTX SHA_CTX
    239 
    240 static void tls1_sha256_final_raw(void *ctx, uint8_t *md_out) {
    241   SHA256_CTX *sha256 = ctx;
    242   unsigned i;
    243 
    244   for (i = 0; i < 8; i++) {
    245     u32toBE(sha256->h[i], md_out);
    246   }
    247 }
    248 #undef  LARGEST_DIGEST_CTX
    249 #define LARGEST_DIGEST_CTX SHA256_CTX
    250 
    251 static void tls1_sha512_final_raw(void *ctx, uint8_t *md_out) {
    252   SHA512_CTX *sha512 = ctx;
    253   unsigned i;
    254 
    255   for (i = 0; i < 8; i++) {
    256     u64toBE(sha512->h[i], md_out);
    257   }
    258 }
    259 #undef  LARGEST_DIGEST_CTX
    260 #define LARGEST_DIGEST_CTX SHA512_CTX
    261 
    262 int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) {
    263   switch (EVP_MD_type(md)) {
    264     case NID_sha1:
    265     case NID_sha256:
    266     case NID_sha384:
    267       return 1;
    268 
    269     default:
    270       return 0;
    271   }
    272 }
    273 
    274 int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
    275                               size_t *md_out_size, const uint8_t header[13],
    276                               const uint8_t *data, size_t data_plus_mac_size,
    277                               size_t data_plus_mac_plus_padding_size,
    278                               const uint8_t *mac_secret,
    279                               unsigned mac_secret_length) {
    280   union {
    281     double align;
    282     uint8_t c[sizeof(LARGEST_DIGEST_CTX)];
    283   } md_state;
    284   void (*md_final_raw)(void *ctx, uint8_t *md_out);
    285   void (*md_transform)(void *ctx, const uint8_t *block);
    286   unsigned md_size, md_block_size = 64;
    287   unsigned len, max_mac_bytes, num_blocks, num_starting_blocks, k,
    288            mac_end_offset, c, index_a, index_b;
    289   unsigned int bits; /* at most 18 bits */
    290   uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES];
    291   /* hmac_pad is the masked HMAC key. */
    292   uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE];
    293   uint8_t first_block[MAX_HASH_BLOCK_SIZE];
    294   uint8_t mac_out[EVP_MAX_MD_SIZE];
    295   unsigned i, j, md_out_size_u;
    296   EVP_MD_CTX md_ctx;
    297   /* mdLengthSize is the number of bytes in the length field that terminates
    298   * the hash. */
    299   unsigned md_length_size = 8;
    300 
    301   /* This is a, hopefully redundant, check that allows us to forget about
    302    * many possible overflows later in this function. */
    303   assert(data_plus_mac_plus_padding_size < 1024 * 1024);
    304 
    305   switch (EVP_MD_type(md)) {
    306     case NID_sha1:
    307       SHA1_Init((SHA_CTX *)md_state.c);
    308       md_final_raw = tls1_sha1_final_raw;
    309       md_transform =
    310           (void (*)(void *ctx, const uint8_t *block))SHA1_Transform;
    311       md_size = 20;
    312       break;
    313 
    314     case NID_sha256:
    315       SHA256_Init((SHA256_CTX *)md_state.c);
    316       md_final_raw = tls1_sha256_final_raw;
    317       md_transform =
    318           (void (*)(void *ctx, const uint8_t *block))SHA256_Transform;
    319       md_size = 32;
    320       break;
    321 
    322     case NID_sha384:
    323       SHA384_Init((SHA512_CTX *)md_state.c);
    324       md_final_raw = tls1_sha512_final_raw;
    325       md_transform =
    326           (void (*)(void *ctx, const uint8_t *block))SHA512_Transform;
    327       md_size = 384 / 8;
    328       md_block_size = 128;
    329       md_length_size = 16;
    330       break;
    331 
    332     default:
    333       /* EVP_tls_cbc_record_digest_supported should have been called first to
    334        * check that the hash function is supported. */
    335       assert(0);
    336       *md_out_size = 0;
    337       return 0;
    338   }
    339 
    340   assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES);
    341   assert(md_block_size <= MAX_HASH_BLOCK_SIZE);
    342   assert(md_size <= EVP_MAX_MD_SIZE);
    343 
    344   static const unsigned kHeaderLength = 13;
    345 
    346   /* kVarianceBlocks is the number of blocks of the hash that we have to
    347    * calculate in constant time because they could be altered by the
    348    * padding value.
    349    *
    350    * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not
    351    * required to be minimal. Therefore we say that the final six blocks
    352    * can vary based on the padding. */
    353   static const unsigned kVarianceBlocks = 6;
    354 
    355   /* From now on we're dealing with the MAC, which conceptually has 13
    356    * bytes of `header' before the start of the data. */
    357   len = data_plus_mac_plus_padding_size + kHeaderLength;
    358   /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including
    359   * |header|, assuming that there's no padding. */
    360   max_mac_bytes = len - md_size - 1;
    361   /* num_blocks is the maximum number of hash blocks. */
    362   num_blocks =
    363       (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size;
    364   /* In order to calculate the MAC in constant time we have to handle
    365    * the final blocks specially because the padding value could cause the
    366    * end to appear somewhere in the final |kVarianceBlocks| blocks and we
    367    * can't leak where. However, |num_starting_blocks| worth of data can
    368    * be hashed right away because no padding value can affect whether
    369    * they are plaintext. */
    370   num_starting_blocks = 0;
    371   /* k is the starting byte offset into the conceptual header||data where
    372    * we start processing. */
    373   k = 0;
    374   /* mac_end_offset is the index just past the end of the data to be
    375    * MACed. */
    376   mac_end_offset = data_plus_mac_size + kHeaderLength - md_size;
    377   /* c is the index of the 0x80 byte in the final hash block that
    378    * contains application data. */
    379   c = mac_end_offset % md_block_size;
    380   /* index_a is the hash block number that contains the 0x80 terminating
    381    * value. */
    382   index_a = mac_end_offset / md_block_size;
    383   /* index_b is the hash block number that contains the 64-bit hash
    384    * length, in bits. */
    385   index_b = (mac_end_offset + md_length_size) / md_block_size;
    386   /* bits is the hash-length in bits. It includes the additional hash
    387    * block for the masked HMAC key. */
    388 
    389   if (num_blocks > kVarianceBlocks) {
    390     num_starting_blocks = num_blocks - kVarianceBlocks;
    391     k = md_block_size * num_starting_blocks;
    392   }
    393 
    394   bits = 8 * mac_end_offset;
    395 
    396   /* Compute the initial HMAC block. */
    397   bits += 8 * md_block_size;
    398   memset(hmac_pad, 0, md_block_size);
    399   assert(mac_secret_length <= sizeof(hmac_pad));
    400   memcpy(hmac_pad, mac_secret, mac_secret_length);
    401   for (i = 0; i < md_block_size; i++) {
    402     hmac_pad[i] ^= 0x36;
    403   }
    404 
    405   md_transform(md_state.c, hmac_pad);
    406 
    407   memset(length_bytes, 0, md_length_size - 4);
    408   length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24);
    409   length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16);
    410   length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8);
    411   length_bytes[md_length_size - 1] = (uint8_t)bits;
    412 
    413   if (k > 0) {
    414     /* k is a multiple of md_block_size. */
    415     memcpy(first_block, header, 13);
    416     memcpy(first_block + 13, data, md_block_size - 13);
    417     md_transform(md_state.c, first_block);
    418     for (i = 1; i < k / md_block_size; i++) {
    419       md_transform(md_state.c, data + md_block_size * i - 13);
    420     }
    421   }
    422 
    423   memset(mac_out, 0, sizeof(mac_out));
    424 
    425   /* We now process the final hash blocks. For each block, we construct
    426    * it in constant time. If the |i==index_a| then we'll include the 0x80
    427    * bytes and zero pad etc. For each block we selectively copy it, in
    428    * constant time, to |mac_out|. */
    429   for (i = num_starting_blocks; i <= num_starting_blocks + kVarianceBlocks;
    430        i++) {
    431     uint8_t block[MAX_HASH_BLOCK_SIZE];
    432     uint8_t is_block_a = constant_time_eq_8(i, index_a);
    433     uint8_t is_block_b = constant_time_eq_8(i, index_b);
    434     for (j = 0; j < md_block_size; j++) {
    435       uint8_t b = 0, is_past_c, is_past_cp1;
    436       if (k < kHeaderLength) {
    437         b = header[k];
    438       } else if (k < data_plus_mac_plus_padding_size + kHeaderLength) {
    439         b = data[k - kHeaderLength];
    440       }
    441       k++;
    442 
    443       is_past_c = is_block_a & constant_time_ge_8(j, c);
    444       is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1);
    445       /* If this is the block containing the end of the
    446        * application data, and we are at the offset for the
    447        * 0x80 value, then overwrite b with 0x80. */
    448       b = constant_time_select_8(is_past_c, 0x80, b);
    449       /* If this the the block containing the end of the
    450        * application data and we're past the 0x80 value then
    451        * just write zero. */
    452       b = b & ~is_past_cp1;
    453       /* If this is index_b (the final block), but not
    454        * index_a (the end of the data), then the 64-bit
    455        * length didn't fit into index_a and we're having to
    456        * add an extra block of zeros. */
    457       b &= ~is_block_b | is_block_a;
    458 
    459       /* The final bytes of one of the blocks contains the
    460        * length. */
    461       if (j >= md_block_size - md_length_size) {
    462         /* If this is index_b, write a length byte. */
    463         b = constant_time_select_8(
    464             is_block_b, length_bytes[j - (md_block_size - md_length_size)], b);
    465       }
    466       block[j] = b;
    467     }
    468 
    469     md_transform(md_state.c, block);
    470     md_final_raw(md_state.c, block);
    471     /* If this is index_b, copy the hash value to |mac_out|. */
    472     for (j = 0; j < md_size; j++) {
    473       mac_out[j] |= block[j] & is_block_b;
    474     }
    475   }
    476 
    477   EVP_MD_CTX_init(&md_ctx);
    478   if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) {
    479     EVP_MD_CTX_cleanup(&md_ctx);
    480     return 0;
    481   }
    482 
    483   /* Complete the HMAC in the standard manner. */
    484   for (i = 0; i < md_block_size; i++) {
    485     hmac_pad[i] ^= 0x6a;
    486   }
    487 
    488   EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size);
    489   EVP_DigestUpdate(&md_ctx, mac_out, md_size);
    490   EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u);
    491   *md_out_size = md_out_size_u;
    492   EVP_MD_CTX_cleanup(&md_ctx);
    493 
    494   return 1;
    495 }
    496