1 /* ssl/s3_cbc.c */ 2 /* ==================================================================== 3 * Copyright (c) 2012 The OpenSSL Project. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * 3. All advertising materials mentioning features or use of this 18 * software must display the following acknowledgment: 19 * "This product includes software developed by the OpenSSL Project 20 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" 21 * 22 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to 23 * endorse or promote products derived from this software without 24 * prior written permission. For written permission, please contact 25 * openssl-core@openssl.org. 26 * 27 * 5. Products derived from this software may not be called "OpenSSL" 28 * nor may "OpenSSL" appear in their names without prior written 29 * permission of the OpenSSL Project. 30 * 31 * 6. Redistributions of any form whatsoever must retain the following 32 * acknowledgment: 33 * "This product includes software developed by the OpenSSL Project 34 * for use in the OpenSSL Toolkit (http://www.openssl.org/)" 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY 37 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 39 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR 40 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 43 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 45 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 47 * OF THE POSSIBILITY OF SUCH DAMAGE. 48 * ==================================================================== 49 * 50 * This product includes cryptographic software written by Eric Young 51 * (eay@cryptsoft.com). This product includes software written by Tim 52 * Hudson (tjh@cryptsoft.com). 53 * 54 */ 55 56 #include "../crypto/constant_time_locl.h" 57 #include "ssl_locl.h" 58 59 #include <openssl/md5.h> 60 #include <openssl/sha.h> 61 62 /* MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length 63 * field. (SHA-384/512 have 128-bit length.) */ 64 #define MAX_HASH_BIT_COUNT_BYTES 16 65 66 /* MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. 67 * Currently SHA-384/512 has a 128-byte block size and that's the largest 68 * supported by TLS.) */ 69 #define MAX_HASH_BLOCK_SIZE 128 70 71 /* ssl3_cbc_remove_padding removes padding from the decrypted, SSLv3, CBC 72 * record in |rec| by updating |rec->length| in constant time. 73 * 74 * block_size: the block size of the cipher used to encrypt the record. 75 * returns: 76 * 0: (in non-constant time) if the record is publicly invalid. 77 * 1: if the padding was valid 78 * -1: otherwise. */ 79 int ssl3_cbc_remove_padding(const SSL* s, 80 SSL3_RECORD *rec, 81 unsigned block_size, 82 unsigned mac_size) 83 { 84 unsigned padding_length, good; 85 const unsigned overhead = 1 /* padding length byte */ + mac_size; 86 87 /* These lengths are all public so we can test them in non-constant 88 * time. */ 89 if (overhead > rec->length) 90 return 0; 91 92 padding_length = rec->data[rec->length-1]; 93 good = constant_time_ge(rec->length, padding_length+overhead); 94 /* SSLv3 requires that the padding is minimal. */ 95 good &= constant_time_ge(block_size, padding_length+1); 96 padding_length = good & (padding_length+1); 97 rec->length -= padding_length; 98 rec->type |= padding_length<<8; /* kludge: pass padding length */ 99 return constant_time_select_int(good, 1, -1); 100 } 101 102 /* tls1_cbc_remove_padding removes the CBC padding from the decrypted, TLS, CBC 103 * record in |rec| in constant time and returns 1 if the padding is valid and 104 * -1 otherwise. It also removes any explicit IV from the start of the record 105 * without leaking any timing about whether there was enough space after the 106 * padding was removed. 107 * 108 * block_size: the block size of the cipher used to encrypt the record. 109 * returns: 110 * 0: (in non-constant time) if the record is publicly invalid. 111 * 1: if the padding was valid 112 * -1: otherwise. */ 113 int tls1_cbc_remove_padding(const SSL* s, 114 SSL3_RECORD *rec, 115 unsigned block_size, 116 unsigned mac_size) 117 { 118 unsigned padding_length, good, to_check, i; 119 const unsigned overhead = 1 /* padding length byte */ + mac_size; 120 /* Check if version requires explicit IV */ 121 if (s->version >= TLS1_1_VERSION || s->version == DTLS1_BAD_VER) 122 { 123 /* These lengths are all public so we can test them in 124 * non-constant time. 125 */ 126 if (overhead + block_size > rec->length) 127 return 0; 128 /* We can now safely skip explicit IV */ 129 rec->data += block_size; 130 rec->input += block_size; 131 rec->length -= block_size; 132 } 133 else if (overhead > rec->length) 134 return 0; 135 136 padding_length = rec->data[rec->length-1]; 137 138 /* NB: if compression is in operation the first packet may not be of 139 * even length so the padding bug check cannot be performed. This bug 140 * workaround has been around since SSLeay so hopefully it is either 141 * fixed now or no buggy implementation supports compression [steve] 142 */ 143 if ( (s->options&SSL_OP_TLS_BLOCK_PADDING_BUG) && !s->expand) 144 { 145 /* First packet is even in size, so check */ 146 if ((memcmp(s->s3->read_sequence, "\0\0\0\0\0\0\0\0",8) == 0) && 147 !(padding_length & 1)) 148 { 149 s->s3->flags|=TLS1_FLAGS_TLS_PADDING_BUG; 150 } 151 if ((s->s3->flags & TLS1_FLAGS_TLS_PADDING_BUG) && 152 padding_length > 0) 153 { 154 padding_length--; 155 } 156 } 157 158 if (EVP_CIPHER_flags(s->enc_read_ctx->cipher)&EVP_CIPH_FLAG_AEAD_CIPHER) 159 { 160 /* padding is already verified */ 161 rec->length -= padding_length + 1; 162 return 1; 163 } 164 165 good = constant_time_ge(rec->length, overhead+padding_length); 166 /* The padding consists of a length byte at the end of the record and 167 * then that many bytes of padding, all with the same value as the 168 * length byte. Thus, with the length byte included, there are i+1 169 * bytes of padding. 170 * 171 * We can't check just |padding_length+1| bytes because that leaks 172 * decrypted information. Therefore we always have to check the maximum 173 * amount of padding possible. (Again, the length of the record is 174 * public information so we can use it.) */ 175 to_check = 255; /* maximum amount of padding. */ 176 if (to_check > rec->length-1) 177 to_check = rec->length-1; 178 179 for (i = 0; i < to_check; i++) 180 { 181 unsigned char mask = constant_time_ge_8(padding_length, i); 182 unsigned char b = rec->data[rec->length-1-i]; 183 /* The final |padding_length+1| bytes should all have the value 184 * |padding_length|. Therefore the XOR should be zero. */ 185 good &= ~(mask&(padding_length ^ b)); 186 } 187 188 /* If any of the final |padding_length+1| bytes had the wrong value, 189 * one or more of the lower eight bits of |good| will be cleared. 190 */ 191 good = constant_time_eq(0xff, good & 0xff); 192 padding_length = good & (padding_length+1); 193 rec->length -= padding_length; 194 rec->type |= padding_length<<8; /* kludge: pass padding length */ 195 196 return constant_time_select_int(good, 1, -1); 197 } 198 199 /* ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in 200 * constant time (independent of the concrete value of rec->length, which may 201 * vary within a 256-byte window). 202 * 203 * ssl3_cbc_remove_padding or tls1_cbc_remove_padding must be called prior to 204 * this function. 205 * 206 * On entry: 207 * rec->orig_len >= md_size 208 * md_size <= EVP_MAX_MD_SIZE 209 * 210 * If CBC_MAC_ROTATE_IN_PLACE is defined then the rotation is performed with 211 * variable accesses in a 64-byte-aligned buffer. Assuming that this fits into 212 * a single or pair of cache-lines, then the variable memory accesses don't 213 * actually affect the timing. CPUs with smaller cache-lines [if any] are 214 * not multi-core and are not considered vulnerable to cache-timing attacks. 215 */ 216 #define CBC_MAC_ROTATE_IN_PLACE 217 218 void ssl3_cbc_copy_mac(unsigned char* out, 219 const SSL3_RECORD *rec, 220 unsigned md_size,unsigned orig_len) 221 { 222 #if defined(CBC_MAC_ROTATE_IN_PLACE) 223 unsigned char rotated_mac_buf[64+EVP_MAX_MD_SIZE]; 224 unsigned char *rotated_mac; 225 #else 226 unsigned char rotated_mac[EVP_MAX_MD_SIZE]; 227 #endif 228 229 /* mac_end is the index of |rec->data| just after the end of the MAC. */ 230 unsigned mac_end = rec->length; 231 unsigned mac_start = mac_end - md_size; 232 /* scan_start contains the number of bytes that we can ignore because 233 * the MAC's position can only vary by 255 bytes. */ 234 unsigned scan_start = 0; 235 unsigned i, j; 236 unsigned div_spoiler; 237 unsigned rotate_offset; 238 239 OPENSSL_assert(orig_len >= md_size); 240 OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); 241 242 #if defined(CBC_MAC_ROTATE_IN_PLACE) 243 rotated_mac = rotated_mac_buf + ((0-(size_t)rotated_mac_buf)&63); 244 #endif 245 246 /* This information is public so it's safe to branch based on it. */ 247 if (orig_len > md_size + 255 + 1) 248 scan_start = orig_len - (md_size + 255 + 1); 249 /* div_spoiler contains a multiple of md_size that is used to cause the 250 * modulo operation to be constant time. Without this, the time varies 251 * based on the amount of padding when running on Intel chips at least. 252 * 253 * The aim of right-shifting md_size is so that the compiler doesn't 254 * figure out that it can remove div_spoiler as that would require it 255 * to prove that md_size is always even, which I hope is beyond it. */ 256 div_spoiler = md_size >> 1; 257 div_spoiler <<= (sizeof(div_spoiler)-1)*8; 258 rotate_offset = (div_spoiler + mac_start - scan_start) % md_size; 259 260 memset(rotated_mac, 0, md_size); 261 for (i = scan_start, j = 0; i < orig_len; i++) 262 { 263 unsigned char mac_started = constant_time_ge_8(i, mac_start); 264 unsigned char mac_ended = constant_time_ge_8(i, mac_end); 265 unsigned char b = rec->data[i]; 266 rotated_mac[j++] |= b & mac_started & ~mac_ended; 267 j &= constant_time_lt(j,md_size); 268 } 269 270 /* Now rotate the MAC */ 271 #if defined(CBC_MAC_ROTATE_IN_PLACE) 272 j = 0; 273 for (i = 0; i < md_size; i++) 274 { 275 /* in case cache-line is 32 bytes, touch second line */ 276 ((volatile unsigned char *)rotated_mac)[rotate_offset^32]; 277 out[j++] = rotated_mac[rotate_offset++]; 278 rotate_offset &= constant_time_lt(rotate_offset,md_size); 279 } 280 #else 281 memset(out, 0, md_size); 282 rotate_offset = md_size - rotate_offset; 283 rotate_offset &= constant_time_lt(rotate_offset,md_size); 284 for (i = 0; i < md_size; i++) 285 { 286 for (j = 0; j < md_size; j++) 287 out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); 288 rotate_offset++; 289 rotate_offset &= constant_time_lt(rotate_offset,md_size); 290 } 291 #endif 292 } 293 294 /* u32toLE serialises an unsigned, 32-bit number (n) as four bytes at (p) in 295 * little-endian order. The value of p is advanced by four. */ 296 #define u32toLE(n, p) \ 297 (*((p)++)=(unsigned char)(n), \ 298 *((p)++)=(unsigned char)(n>>8), \ 299 *((p)++)=(unsigned char)(n>>16), \ 300 *((p)++)=(unsigned char)(n>>24)) 301 302 /* These functions serialize the state of a hash and thus perform the standard 303 * "final" operation without adding the padding and length that such a function 304 * typically does. */ 305 static void tls1_md5_final_raw(void* ctx, unsigned char *md_out) 306 { 307 MD5_CTX *md5 = ctx; 308 u32toLE(md5->A, md_out); 309 u32toLE(md5->B, md_out); 310 u32toLE(md5->C, md_out); 311 u32toLE(md5->D, md_out); 312 } 313 314 static void tls1_sha1_final_raw(void* ctx, unsigned char *md_out) 315 { 316 SHA_CTX *sha1 = ctx; 317 l2n(sha1->h0, md_out); 318 l2n(sha1->h1, md_out); 319 l2n(sha1->h2, md_out); 320 l2n(sha1->h3, md_out); 321 l2n(sha1->h4, md_out); 322 } 323 #define LARGEST_DIGEST_CTX SHA_CTX 324 325 #ifndef OPENSSL_NO_SHA256 326 static void tls1_sha256_final_raw(void* ctx, unsigned char *md_out) 327 { 328 SHA256_CTX *sha256 = ctx; 329 unsigned i; 330 331 for (i = 0; i < 8; i++) 332 { 333 l2n(sha256->h[i], md_out); 334 } 335 } 336 #undef LARGEST_DIGEST_CTX 337 #define LARGEST_DIGEST_CTX SHA256_CTX 338 #endif 339 340 #ifndef OPENSSL_NO_SHA512 341 static void tls1_sha512_final_raw(void* ctx, unsigned char *md_out) 342 { 343 SHA512_CTX *sha512 = ctx; 344 unsigned i; 345 346 for (i = 0; i < 8; i++) 347 { 348 l2n8(sha512->h[i], md_out); 349 } 350 } 351 #undef LARGEST_DIGEST_CTX 352 #define LARGEST_DIGEST_CTX SHA512_CTX 353 #endif 354 355 /* ssl3_cbc_record_digest_supported returns 1 iff |ctx| uses a hash function 356 * which ssl3_cbc_digest_record supports. */ 357 char ssl3_cbc_record_digest_supported(const EVP_MD_CTX *ctx) 358 { 359 #ifdef OPENSSL_FIPS 360 if (FIPS_mode()) 361 return 0; 362 #endif 363 switch (EVP_MD_CTX_type(ctx)) 364 { 365 case NID_md5: 366 case NID_sha1: 367 #ifndef OPENSSL_NO_SHA256 368 case NID_sha224: 369 case NID_sha256: 370 #endif 371 #ifndef OPENSSL_NO_SHA512 372 case NID_sha384: 373 case NID_sha512: 374 #endif 375 return 1; 376 default: 377 return 0; 378 } 379 } 380 381 /* ssl3_cbc_digest_record computes the MAC of a decrypted, padded SSLv3/TLS 382 * record. 383 * 384 * ctx: the EVP_MD_CTX from which we take the hash function. 385 * ssl3_cbc_record_digest_supported must return true for this EVP_MD_CTX. 386 * md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. 387 * md_out_size: if non-NULL, the number of output bytes is written here. 388 * header: the 13-byte, TLS record header. 389 * data: the record data itself, less any preceeding explicit IV. 390 * data_plus_mac_size: the secret, reported length of the data and MAC 391 * once the padding has been removed. 392 * data_plus_mac_plus_padding_size: the public length of the whole 393 * record, including padding. 394 * is_sslv3: non-zero if we are to use SSLv3. Otherwise, TLS. 395 * 396 * On entry: by virtue of having been through one of the remove_padding 397 * functions, above, we know that data_plus_mac_size is large enough to contain 398 * a padding byte and MAC. (If the padding was invalid, it might contain the 399 * padding too. ) */ 400 void ssl3_cbc_digest_record( 401 const EVP_MD_CTX *ctx, 402 unsigned char* md_out, 403 size_t* md_out_size, 404 const unsigned char header[13], 405 const unsigned char *data, 406 size_t data_plus_mac_size, 407 size_t data_plus_mac_plus_padding_size, 408 const unsigned char *mac_secret, 409 unsigned mac_secret_length, 410 char is_sslv3) 411 { 412 union { double align; 413 unsigned char c[sizeof(LARGEST_DIGEST_CTX)]; } md_state; 414 void (*md_final_raw)(void *ctx, unsigned char *md_out); 415 void (*md_transform)(void *ctx, const unsigned char *block); 416 unsigned md_size, md_block_size = 64; 417 unsigned sslv3_pad_length = 40, header_length, variance_blocks, 418 len, max_mac_bytes, num_blocks, 419 num_starting_blocks, k, mac_end_offset, c, index_a, index_b; 420 unsigned int bits; /* at most 18 bits */ 421 unsigned char length_bytes[MAX_HASH_BIT_COUNT_BYTES]; 422 /* hmac_pad is the masked HMAC key. */ 423 unsigned char hmac_pad[MAX_HASH_BLOCK_SIZE]; 424 unsigned char first_block[MAX_HASH_BLOCK_SIZE]; 425 unsigned char mac_out[EVP_MAX_MD_SIZE]; 426 unsigned i, j, md_out_size_u; 427 EVP_MD_CTX md_ctx; 428 /* mdLengthSize is the number of bytes in the length field that terminates 429 * the hash. */ 430 unsigned md_length_size = 8; 431 char length_is_big_endian = 1; 432 433 /* This is a, hopefully redundant, check that allows us to forget about 434 * many possible overflows later in this function. */ 435 OPENSSL_assert(data_plus_mac_plus_padding_size < 1024*1024); 436 437 switch (EVP_MD_CTX_type(ctx)) 438 { 439 case NID_md5: 440 MD5_Init((MD5_CTX*)md_state.c); 441 md_final_raw = tls1_md5_final_raw; 442 md_transform = (void(*)(void *ctx, const unsigned char *block)) MD5_Transform; 443 md_size = 16; 444 sslv3_pad_length = 48; 445 length_is_big_endian = 0; 446 break; 447 case NID_sha1: 448 SHA1_Init((SHA_CTX*)md_state.c); 449 md_final_raw = tls1_sha1_final_raw; 450 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA1_Transform; 451 md_size = 20; 452 break; 453 #ifndef OPENSSL_NO_SHA256 454 case NID_sha224: 455 SHA224_Init((SHA256_CTX*)md_state.c); 456 md_final_raw = tls1_sha256_final_raw; 457 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; 458 md_size = 224/8; 459 break; 460 case NID_sha256: 461 SHA256_Init((SHA256_CTX*)md_state.c); 462 md_final_raw = tls1_sha256_final_raw; 463 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; 464 md_size = 32; 465 break; 466 #endif 467 #ifndef OPENSSL_NO_SHA512 468 case NID_sha384: 469 SHA384_Init((SHA512_CTX*)md_state.c); 470 md_final_raw = tls1_sha512_final_raw; 471 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; 472 md_size = 384/8; 473 md_block_size = 128; 474 md_length_size = 16; 475 break; 476 case NID_sha512: 477 SHA512_Init((SHA512_CTX*)md_state.c); 478 md_final_raw = tls1_sha512_final_raw; 479 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; 480 md_size = 64; 481 md_block_size = 128; 482 md_length_size = 16; 483 break; 484 #endif 485 default: 486 /* ssl3_cbc_record_digest_supported should have been 487 * called first to check that the hash function is 488 * supported. */ 489 OPENSSL_assert(0); 490 if (md_out_size) 491 *md_out_size = -1; 492 return; 493 } 494 495 OPENSSL_assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); 496 OPENSSL_assert(md_block_size <= MAX_HASH_BLOCK_SIZE); 497 OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); 498 499 header_length = 13; 500 if (is_sslv3) 501 { 502 header_length = 503 mac_secret_length + 504 sslv3_pad_length + 505 8 /* sequence number */ + 506 1 /* record type */ + 507 2 /* record length */; 508 } 509 510 /* variance_blocks is the number of blocks of the hash that we have to 511 * calculate in constant time because they could be altered by the 512 * padding value. 513 * 514 * In SSLv3, the padding must be minimal so the end of the plaintext 515 * varies by, at most, 15+20 = 35 bytes. (We conservatively assume that 516 * the MAC size varies from 0..20 bytes.) In case the 9 bytes of hash 517 * termination (0x80 + 64-bit length) don't fit in the final block, we 518 * say that the final two blocks can vary based on the padding. 519 * 520 * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not 521 * required to be minimal. Therefore we say that the final six blocks 522 * can vary based on the padding. 523 * 524 * Later in the function, if the message is short and there obviously 525 * cannot be this many blocks then variance_blocks can be reduced. */ 526 variance_blocks = is_sslv3 ? 2 : 6; 527 /* From now on we're dealing with the MAC, which conceptually has 13 528 * bytes of `header' before the start of the data (TLS) or 71/75 bytes 529 * (SSLv3) */ 530 len = data_plus_mac_plus_padding_size + header_length; 531 /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including 532 * |header|, assuming that there's no padding. */ 533 max_mac_bytes = len - md_size - 1; 534 /* num_blocks is the maximum number of hash blocks. */ 535 num_blocks = (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; 536 /* In order to calculate the MAC in constant time we have to handle 537 * the final blocks specially because the padding value could cause the 538 * end to appear somewhere in the final |variance_blocks| blocks and we 539 * can't leak where. However, |num_starting_blocks| worth of data can 540 * be hashed right away because no padding value can affect whether 541 * they are plaintext. */ 542 num_starting_blocks = 0; 543 /* k is the starting byte offset into the conceptual header||data where 544 * we start processing. */ 545 k = 0; 546 /* mac_end_offset is the index just past the end of the data to be 547 * MACed. */ 548 mac_end_offset = data_plus_mac_size + header_length - md_size; 549 /* c is the index of the 0x80 byte in the final hash block that 550 * contains application data. */ 551 c = mac_end_offset % md_block_size; 552 /* index_a is the hash block number that contains the 0x80 terminating 553 * value. */ 554 index_a = mac_end_offset / md_block_size; 555 /* index_b is the hash block number that contains the 64-bit hash 556 * length, in bits. */ 557 index_b = (mac_end_offset + md_length_size) / md_block_size; 558 /* bits is the hash-length in bits. It includes the additional hash 559 * block for the masked HMAC key, or whole of |header| in the case of 560 * SSLv3. */ 561 562 /* For SSLv3, if we're going to have any starting blocks then we need 563 * at least two because the header is larger than a single block. */ 564 if (num_blocks > variance_blocks + (is_sslv3 ? 1 : 0)) 565 { 566 num_starting_blocks = num_blocks - variance_blocks; 567 k = md_block_size*num_starting_blocks; 568 } 569 570 bits = 8*mac_end_offset; 571 if (!is_sslv3) 572 { 573 /* Compute the initial HMAC block. For SSLv3, the padding and 574 * secret bytes are included in |header| because they take more 575 * than a single block. */ 576 bits += 8*md_block_size; 577 memset(hmac_pad, 0, md_block_size); 578 OPENSSL_assert(mac_secret_length <= sizeof(hmac_pad)); 579 memcpy(hmac_pad, mac_secret, mac_secret_length); 580 for (i = 0; i < md_block_size; i++) 581 hmac_pad[i] ^= 0x36; 582 583 md_transform(md_state.c, hmac_pad); 584 } 585 586 if (length_is_big_endian) 587 { 588 memset(length_bytes,0,md_length_size-4); 589 length_bytes[md_length_size-4] = (unsigned char)(bits>>24); 590 length_bytes[md_length_size-3] = (unsigned char)(bits>>16); 591 length_bytes[md_length_size-2] = (unsigned char)(bits>>8); 592 length_bytes[md_length_size-1] = (unsigned char)bits; 593 } 594 else 595 { 596 memset(length_bytes,0,md_length_size); 597 length_bytes[md_length_size-5] = (unsigned char)(bits>>24); 598 length_bytes[md_length_size-6] = (unsigned char)(bits>>16); 599 length_bytes[md_length_size-7] = (unsigned char)(bits>>8); 600 length_bytes[md_length_size-8] = (unsigned char)bits; 601 } 602 603 if (k > 0) 604 { 605 if (is_sslv3) 606 { 607 /* The SSLv3 header is larger than a single block. 608 * overhang is the number of bytes beyond a single 609 * block that the header consumes: either 7 bytes 610 * (SHA1) or 11 bytes (MD5). */ 611 unsigned overhang = header_length-md_block_size; 612 md_transform(md_state.c, header); 613 memcpy(first_block, header + md_block_size, overhang); 614 memcpy(first_block + overhang, data, md_block_size-overhang); 615 md_transform(md_state.c, first_block); 616 for (i = 1; i < k/md_block_size - 1; i++) 617 md_transform(md_state.c, data + md_block_size*i - overhang); 618 } 619 else 620 { 621 /* k is a multiple of md_block_size. */ 622 memcpy(first_block, header, 13); 623 memcpy(first_block+13, data, md_block_size-13); 624 md_transform(md_state.c, first_block); 625 for (i = 1; i < k/md_block_size; i++) 626 md_transform(md_state.c, data + md_block_size*i - 13); 627 } 628 } 629 630 memset(mac_out, 0, sizeof(mac_out)); 631 632 /* We now process the final hash blocks. For each block, we construct 633 * it in constant time. If the |i==index_a| then we'll include the 0x80 634 * bytes and zero pad etc. For each block we selectively copy it, in 635 * constant time, to |mac_out|. */ 636 for (i = num_starting_blocks; i <= num_starting_blocks+variance_blocks; i++) 637 { 638 unsigned char block[MAX_HASH_BLOCK_SIZE]; 639 unsigned char is_block_a = constant_time_eq_8(i, index_a); 640 unsigned char is_block_b = constant_time_eq_8(i, index_b); 641 for (j = 0; j < md_block_size; j++) 642 { 643 unsigned char b = 0, is_past_c, is_past_cp1; 644 if (k < header_length) 645 b = header[k]; 646 else if (k < data_plus_mac_plus_padding_size + header_length) 647 b = data[k-header_length]; 648 k++; 649 650 is_past_c = is_block_a & constant_time_ge_8(j, c); 651 is_past_cp1 = is_block_a & constant_time_ge_8(j, c+1); 652 /* If this is the block containing the end of the 653 * application data, and we are at the offset for the 654 * 0x80 value, then overwrite b with 0x80. */ 655 b = constant_time_select_8(is_past_c, 0x80, b); 656 /* If this the the block containing the end of the 657 * application data and we're past the 0x80 value then 658 * just write zero. */ 659 b = b&~is_past_cp1; 660 /* If this is index_b (the final block), but not 661 * index_a (the end of the data), then the 64-bit 662 * length didn't fit into index_a and we're having to 663 * add an extra block of zeros. */ 664 b &= ~is_block_b | is_block_a; 665 666 /* The final bytes of one of the blocks contains the 667 * length. */ 668 if (j >= md_block_size - md_length_size) 669 { 670 /* If this is index_b, write a length byte. */ 671 b = constant_time_select_8( 672 is_block_b, length_bytes[j-(md_block_size-md_length_size)], b); 673 } 674 block[j] = b; 675 } 676 677 md_transform(md_state.c, block); 678 md_final_raw(md_state.c, block); 679 /* If this is index_b, copy the hash value to |mac_out|. */ 680 for (j = 0; j < md_size; j++) 681 mac_out[j] |= block[j]&is_block_b; 682 } 683 684 EVP_MD_CTX_init(&md_ctx); 685 EVP_DigestInit_ex(&md_ctx, ctx->digest, NULL /* engine */); 686 if (is_sslv3) 687 { 688 /* We repurpose |hmac_pad| to contain the SSLv3 pad2 block. */ 689 memset(hmac_pad, 0x5c, sslv3_pad_length); 690 691 EVP_DigestUpdate(&md_ctx, mac_secret, mac_secret_length); 692 EVP_DigestUpdate(&md_ctx, hmac_pad, sslv3_pad_length); 693 EVP_DigestUpdate(&md_ctx, mac_out, md_size); 694 } 695 else 696 { 697 /* Complete the HMAC in the standard manner. */ 698 for (i = 0; i < md_block_size; i++) 699 hmac_pad[i] ^= 0x6a; 700 701 EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); 702 EVP_DigestUpdate(&md_ctx, mac_out, md_size); 703 } 704 EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); 705 if (md_out_size) 706 *md_out_size = md_out_size_u; 707 EVP_MD_CTX_cleanup(&md_ctx); 708 } 709 710 #ifdef OPENSSL_FIPS 711 712 /* Due to the need to use EVP in FIPS mode we can't reimplement digests but 713 * we can ensure the number of blocks processed is equal for all cases 714 * by digesting additional data. 715 */ 716 717 void tls_fips_digest_extra( 718 const EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *mac_ctx, 719 const unsigned char *data, size_t data_len, size_t orig_len) 720 { 721 size_t block_size, digest_pad, blocks_data, blocks_orig; 722 if (EVP_CIPHER_CTX_mode(cipher_ctx) != EVP_CIPH_CBC_MODE) 723 return; 724 block_size = EVP_MD_CTX_block_size(mac_ctx); 725 /* We are in FIPS mode if we get this far so we know we have only SHA* 726 * digests and TLS to deal with. 727 * Minimum digest padding length is 17 for SHA384/SHA512 and 9 728 * otherwise. 729 * Additional header is 13 bytes. To get the number of digest blocks 730 * processed round up the amount of data plus padding to the nearest 731 * block length. Block length is 128 for SHA384/SHA512 and 64 otherwise. 732 * So we have: 733 * blocks = (payload_len + digest_pad + 13 + block_size - 1)/block_size 734 * equivalently: 735 * blocks = (payload_len + digest_pad + 12)/block_size + 1 736 * HMAC adds a constant overhead. 737 * We're ultimately only interested in differences so this becomes 738 * blocks = (payload_len + 29)/128 739 * for SHA384/SHA512 and 740 * blocks = (payload_len + 21)/64 741 * otherwise. 742 */ 743 digest_pad = block_size == 64 ? 21 : 29; 744 blocks_orig = (orig_len + digest_pad)/block_size; 745 blocks_data = (data_len + digest_pad)/block_size; 746 /* MAC enough blocks to make up the difference between the original 747 * and actual lengths plus one extra block to ensure this is never a 748 * no op. The "data" pointer should always have enough space to 749 * perform this operation as it is large enough for a maximum 750 * length TLS buffer. 751 */ 752 EVP_DigestSignUpdate(mac_ctx, data, 753 (blocks_orig - blocks_data + 1) * block_size); 754 } 755 #endif 756