1 /*- 2 * Copyright (c) 2017 W. Dean Freeman 3 * Copyright (c) 2013-2015 Mark R V Murray 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * This implementation of Fortuna is based on the descriptions found in 31 * ISBN 978-0-470-47424-2 "Cryptography Engineering" by Ferguson, Schneier 32 * and Kohno ("FS&K"). 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/limits.h> 40 41 #ifdef _KERNEL 42 #include <sys/fail.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mutex.h> 47 #include <sys/random.h> 48 #include <sys/sdt.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 52 #include <machine/cpu.h> 53 #else /* !_KERNEL */ 54 #include <inttypes.h> 55 #include <stdbool.h> 56 #include <stdio.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <threads.h> 60 61 #include "unit_test.h" 62 #endif /* _KERNEL */ 63 64 #include <crypto/chacha20/chacha.h> 65 #include <crypto/rijndael/rijndael-api-fst.h> 66 #include <crypto/sha2/sha256.h> 67 68 #include <dev/random/hash.h> 69 #include <dev/random/randomdev.h> 70 #ifdef _KERNEL 71 #include <dev/random/random_harvestq.h> 72 #endif 73 #include <dev/random/uint128.h> 74 #include <dev/random/fortuna.h> 75 76 /* Defined in FS&K */ 77 #define RANDOM_FORTUNA_NPOOLS 32 /* The number of accumulation pools */ 78 #define RANDOM_FORTUNA_DEFPOOLSIZE 64 /* The default pool size/length for a (re)seed */ 79 #define RANDOM_FORTUNA_MAX_READ (1 << 20) /* Max bytes from AES before rekeying */ 80 #define RANDOM_FORTUNA_BLOCKS_PER_KEY (1 << 16) /* Max blocks from AES before rekeying */ 81 CTASSERT(RANDOM_FORTUNA_BLOCKS_PER_KEY * RANDOM_BLOCKSIZE == 82 RANDOM_FORTUNA_MAX_READ); 83 84 /* 85 * The allowable range of RANDOM_FORTUNA_DEFPOOLSIZE. The default value is above. 86 * Making RANDOM_FORTUNA_DEFPOOLSIZE too large will mean a long time between reseeds, 87 * and too small may compromise initial security but get faster reseeds. 88 */ 89 #define RANDOM_FORTUNA_MINPOOLSIZE 16 90 #define RANDOM_FORTUNA_MAXPOOLSIZE INT_MAX 91 CTASSERT(RANDOM_FORTUNA_MINPOOLSIZE <= RANDOM_FORTUNA_DEFPOOLSIZE); 92 CTASSERT(RANDOM_FORTUNA_DEFPOOLSIZE <= RANDOM_FORTUNA_MAXPOOLSIZE); 93 94 /* This algorithm (and code) presumes that RANDOM_KEYSIZE is twice as large as RANDOM_BLOCKSIZE */ 95 CTASSERT(RANDOM_BLOCKSIZE == sizeof(uint128_t)); 96 CTASSERT(RANDOM_KEYSIZE == 2*RANDOM_BLOCKSIZE); 97 98 /* Probes for dtrace(1) */ 99 #ifdef _KERNEL 100 SDT_PROVIDER_DECLARE(random); 101 SDT_PROVIDER_DEFINE(random); 102 SDT_PROBE_DEFINE2(random, fortuna, event_processor, debug, "u_int", "struct fs_pool *"); 103 #endif /* _KERNEL */ 104 105 /* 106 * This is the beastie that needs protecting. It contains all of the 107 * state that we are excited about. Exactly one is instantiated. 108 */ 109 static struct fortuna_state { 110 struct fs_pool { /* P_i */ 111 u_int fsp_length; /* Only the first one is used by Fortuna */ 112 struct randomdev_hash fsp_hash; 113 } fs_pool[RANDOM_FORTUNA_NPOOLS]; 114 u_int fs_reseedcount; /* ReseedCnt */ 115 uint128_t fs_counter; /* C */ 116 union randomdev_key fs_key; /* K */ 117 u_int fs_minpoolsize; /* Extras */ 118 /* Extras for the OS */ 119 #ifdef _KERNEL 120 /* For use when 'pacing' the reseeds */ 121 sbintime_t fs_lasttime; 122 #endif 123 /* Reseed lock */ 124 mtx_t fs_mtx; 125 } fortuna_state; 126 127 /* 128 * Experimental concurrent reads feature. For now, disabled by default. But 129 * we may enable it in the future. 130 * 131 * The benefit is improved concurrency in Fortuna. That is reflected in two 132 * related aspects: 133 * 134 * 1. Concurrent devrandom readers can achieve similar throughput to a single 135 * reader thread. 136 * 137 * 2. The rand_harvestq process spends much less time spinning when one or more 138 * readers is processing a large request. Partially this is due to 139 * rand_harvestq / ra_event_processor design, which only passes one event at 140 * a time to the underlying algorithm. Each time, Fortuna must take its 141 * global state mutex, potentially blocking on a reader. Our adaptive 142 * mutexes assume that a lock holder currently on CPU will release the lock 143 * quickly, and spin if the owning thread is currently running. 144 */ 145 static bool fortuna_concurrent_read __read_frequently = false; 146 147 #ifdef _KERNEL 148 static struct sysctl_ctx_list random_clist; 149 RANDOM_CHECK_UINT(fs_minpoolsize, RANDOM_FORTUNA_MINPOOLSIZE, RANDOM_FORTUNA_MAXPOOLSIZE); 150 #else 151 static uint8_t zero_region[RANDOM_ZERO_BLOCKSIZE]; 152 #endif 153 154 static void random_fortuna_pre_read(void); 155 static void random_fortuna_read(uint8_t *, size_t); 156 static bool random_fortuna_seeded(void); 157 static bool random_fortuna_seeded_internal(void); 158 static void random_fortuna_process_event(struct harvest_event *); 159 static void random_fortuna_init_alg(void *); 160 static void random_fortuna_deinit_alg(void *); 161 162 static void random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount); 163 164 struct random_algorithm random_alg_context = { 165 .ra_ident = "Fortuna", 166 .ra_init_alg = random_fortuna_init_alg, 167 .ra_deinit_alg = random_fortuna_deinit_alg, 168 .ra_pre_read = random_fortuna_pre_read, 169 .ra_read = random_fortuna_read, 170 .ra_seeded = random_fortuna_seeded, 171 .ra_event_processor = random_fortuna_process_event, 172 .ra_poolcount = RANDOM_FORTUNA_NPOOLS, 173 }; 174 175 /* ARGSUSED */ 176 static void 177 random_fortuna_init_alg(void *unused __unused) 178 { 179 int i; 180 #ifdef _KERNEL 181 struct sysctl_oid *random_fortuna_o; 182 #endif 183 184 RANDOM_RESEED_INIT_LOCK(); 185 /* 186 * Fortuna parameters. Do not adjust these unless you have 187 * have a very good clue about what they do! 188 */ 189 fortuna_state.fs_minpoolsize = RANDOM_FORTUNA_DEFPOOLSIZE; 190 #ifdef _KERNEL 191 fortuna_state.fs_lasttime = 0; 192 random_fortuna_o = SYSCTL_ADD_NODE(&random_clist, 193 SYSCTL_STATIC_CHILDREN(_kern_random), 194 OID_AUTO, "fortuna", CTLFLAG_RW, 0, 195 "Fortuna Parameters"); 196 SYSCTL_ADD_PROC(&random_clist, 197 SYSCTL_CHILDREN(random_fortuna_o), OID_AUTO, 198 "minpoolsize", CTLTYPE_UINT | CTLFLAG_RWTUN, 199 &fortuna_state.fs_minpoolsize, RANDOM_FORTUNA_DEFPOOLSIZE, 200 random_check_uint_fs_minpoolsize, "IU", 201 "Minimum pool size necessary to cause a reseed"); 202 KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0 at startup")); 203 204 SYSCTL_ADD_BOOL(&random_clist, SYSCTL_CHILDREN(random_fortuna_o), 205 OID_AUTO, "concurrent_read", CTLFLAG_RDTUN, 206 &fortuna_concurrent_read, 0, "If non-zero, enable EXPERIMENTAL " 207 "feature to improve concurrent Fortuna performance."); 208 #endif 209 210 /*- 211 * FS&K - InitializePRNG() 212 * - P_i = \epsilon 213 * - ReseedCNT = 0 214 */ 215 for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) { 216 randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash); 217 fortuna_state.fs_pool[i].fsp_length = 0; 218 } 219 fortuna_state.fs_reseedcount = 0; 220 /*- 221 * FS&K - InitializeGenerator() 222 * - C = 0 223 * - K = 0 224 */ 225 fortuna_state.fs_counter = UINT128_ZERO; 226 explicit_bzero(&fortuna_state.fs_key, sizeof(fortuna_state.fs_key)); 227 } 228 229 /* ARGSUSED */ 230 static void 231 random_fortuna_deinit_alg(void *unused __unused) 232 { 233 234 RANDOM_RESEED_DEINIT_LOCK(); 235 explicit_bzero(&fortuna_state, sizeof(fortuna_state)); 236 #ifdef _KERNEL 237 sysctl_ctx_free(&random_clist); 238 #endif 239 } 240 241 /*- 242 * FS&K - AddRandomEvent() 243 * Process a single stochastic event off the harvest queue 244 */ 245 static void 246 random_fortuna_process_event(struct harvest_event *event) 247 { 248 u_int pl; 249 250 RANDOM_RESEED_LOCK(); 251 /*- 252 * FS&K - P_i = P_i|<harvested stuff> 253 * Accumulate the event into the appropriate pool 254 * where each event carries the destination information. 255 * 256 * The hash_init() and hash_finish() calls are done in 257 * random_fortuna_pre_read(). 258 * 259 * We must be locked against pool state modification which can happen 260 * during accumulation/reseeding and reading/regating. 261 */ 262 pl = event->he_destination % RANDOM_FORTUNA_NPOOLS; 263 /* 264 * We ignore low entropy static/counter fields towards the end of the 265 * he_event structure in order to increase measurable entropy when 266 * conducting SP800-90B entropy analysis measurements of seed material 267 * fed into PRNG. 268 * -- wdf 269 */ 270 KASSERT(event->he_size <= sizeof(event->he_entropy), 271 ("%s: event->he_size: %hhu > sizeof(event->he_entropy): %zu\n", 272 __func__, event->he_size, sizeof(event->he_entropy))); 273 randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash, 274 &event->he_somecounter, sizeof(event->he_somecounter)); 275 randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash, 276 event->he_entropy, event->he_size); 277 278 /*- 279 * Don't wrap the length. This is a "saturating" add. 280 * XXX: FIX!!: We don't actually need lengths for anything but fs_pool[0], 281 * but it's been useful debugging to see them all. 282 */ 283 fortuna_state.fs_pool[pl].fsp_length = MIN(RANDOM_FORTUNA_MAXPOOLSIZE, 284 fortuna_state.fs_pool[pl].fsp_length + 285 sizeof(event->he_somecounter) + event->he_size); 286 RANDOM_RESEED_UNLOCK(); 287 } 288 289 /*- 290 * FS&K - Reseed() 291 * This introduces new key material into the output generator. 292 * Additionally it increments the output generator's counter 293 * variable C. When C > 0, the output generator is seeded and 294 * will deliver output. 295 * The entropy_data buffer passed is a very specific size; the 296 * product of RANDOM_FORTUNA_NPOOLS and RANDOM_KEYSIZE. 297 */ 298 static void 299 random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount) 300 { 301 struct randomdev_hash context; 302 uint8_t hash[RANDOM_KEYSIZE]; 303 const void *keymaterial; 304 size_t keysz; 305 bool seeded; 306 307 RANDOM_RESEED_ASSERT_LOCK_OWNED(); 308 309 seeded = random_fortuna_seeded_internal(); 310 if (seeded) { 311 randomdev_getkey(&fortuna_state.fs_key, &keymaterial, &keysz); 312 KASSERT(keysz == RANDOM_KEYSIZE, ("%s: key size %zu not %u", 313 __func__, keysz, (unsigned)RANDOM_KEYSIZE)); 314 } 315 316 /*- 317 * FS&K - K = Hd(K|s) where Hd(m) is H(H(0^512|m)) 318 * - C = C + 1 319 */ 320 randomdev_hash_init(&context); 321 randomdev_hash_iterate(&context, zero_region, RANDOM_ZERO_BLOCKSIZE); 322 if (seeded) 323 randomdev_hash_iterate(&context, keymaterial, keysz); 324 randomdev_hash_iterate(&context, entropy_data, RANDOM_KEYSIZE*blockcount); 325 randomdev_hash_finish(&context, hash); 326 randomdev_hash_init(&context); 327 randomdev_hash_iterate(&context, hash, RANDOM_KEYSIZE); 328 randomdev_hash_finish(&context, hash); 329 randomdev_encrypt_init(&fortuna_state.fs_key, hash); 330 explicit_bzero(hash, sizeof(hash)); 331 /* Unblock the device if this is the first time we are reseeding. */ 332 if (uint128_is_zero(fortuna_state.fs_counter)) 333 randomdev_unblock(); 334 uint128_increment(&fortuna_state.fs_counter); 335 } 336 337 /*- 338 * FS&K - RandomData() (Part 1) 339 * Used to return processed entropy from the PRNG. There is a pre_read 340 * required to be present (but it can be a stub) in order to allow 341 * specific actions at the begin of the read. 342 */ 343 void 344 random_fortuna_pre_read(void) 345 { 346 #ifdef _KERNEL 347 sbintime_t now; 348 #endif 349 struct randomdev_hash context; 350 uint32_t s[RANDOM_FORTUNA_NPOOLS*RANDOM_KEYSIZE_WORDS]; 351 uint8_t temp[RANDOM_KEYSIZE]; 352 u_int i; 353 354 KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0")); 355 RANDOM_RESEED_LOCK(); 356 #ifdef _KERNEL 357 /* FS&K - Use 'getsbinuptime()' to prevent reseed-spamming. */ 358 now = getsbinuptime(); 359 #endif 360 361 if (fortuna_state.fs_pool[0].fsp_length < fortuna_state.fs_minpoolsize 362 #ifdef _KERNEL 363 /* 364 * FS&K - Use 'getsbinuptime()' to prevent reseed-spamming, but do 365 * not block initial seeding (fs_lasttime == 0). 366 */ 367 || (__predict_true(fortuna_state.fs_lasttime != 0) && 368 now - fortuna_state.fs_lasttime <= SBT_1S/10) 369 #endif 370 ) { 371 RANDOM_RESEED_UNLOCK(); 372 return; 373 } 374 375 #ifdef _KERNEL 376 /* 377 * When set, pretend we do not have enough entropy to reseed yet. 378 */ 379 KFAIL_POINT_CODE(DEBUG_FP, random_fortuna_pre_read, { 380 if (RETURN_VALUE != 0) { 381 RANDOM_RESEED_UNLOCK(); 382 return; 383 } 384 }); 385 #endif 386 387 #ifdef _KERNEL 388 fortuna_state.fs_lasttime = now; 389 #endif 390 391 /* FS&K - ReseedCNT = ReseedCNT + 1 */ 392 fortuna_state.fs_reseedcount++; 393 /* s = \epsilon at start */ 394 for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) { 395 /* FS&K - if Divides(ReseedCnt, 2^i) ... */ 396 if ((fortuna_state.fs_reseedcount % (1 << i)) == 0) { 397 /*- 398 * FS&K - temp = (P_i) 399 * - P_i = \epsilon 400 * - s = s|H(temp) 401 */ 402 randomdev_hash_finish(&fortuna_state.fs_pool[i].fsp_hash, temp); 403 randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash); 404 fortuna_state.fs_pool[i].fsp_length = 0; 405 randomdev_hash_init(&context); 406 randomdev_hash_iterate(&context, temp, RANDOM_KEYSIZE); 407 randomdev_hash_finish(&context, s + i*RANDOM_KEYSIZE_WORDS); 408 } else 409 break; 410 } 411 #ifdef _KERNEL 412 SDT_PROBE2(random, fortuna, event_processor, debug, fortuna_state.fs_reseedcount, fortuna_state.fs_pool); 413 #endif 414 /* FS&K */ 415 random_fortuna_reseed_internal(s, i); 416 RANDOM_RESEED_UNLOCK(); 417 418 /* Clean up and secure */ 419 explicit_bzero(s, sizeof(s)); 420 explicit_bzero(temp, sizeof(temp)); 421 } 422 423 /* 424 * This is basically GenerateBlocks() from FS&K. 425 * 426 * It differs in two ways: 427 * 428 * 1. Chacha20 is tolerant of non-block-multiple request sizes, so we do not 429 * need to handle any remainder bytes specially and can just pass the length 430 * directly to the PRF construction; and 431 * 432 * 2. Chacha20 is a 512-bit block size cipher (whereas AES has 128-bit block 433 * size, regardless of key size). This means Chacha does not require re-keying 434 * every 1MiB. This is implied by the math in FS&K 9.4 and mentioned 435 * explicitly in the conclusion, "If we had a block cipher with a 256-bit [or 436 * greater] block size, then the collisions would not have been an issue at 437 * all" (p. 144). 438 * 439 * 3. In conventional ("locked") mode, we produce a maximum of PAGE_SIZE output 440 * at a time before dropping the lock, to not bully the lock especially. This 441 * has been the status quo since 2015 (r284959). 442 * 443 * The upstream caller random_fortuna_read is responsible for zeroing out 444 * sensitive buffers provided as parameters to this routine. 445 */ 446 enum { 447 FORTUNA_UNLOCKED = false, 448 FORTUNA_LOCKED = true 449 }; 450 static void 451 random_fortuna_genbytes(uint8_t *buf, size_t bytecount, 452 uint8_t newkey[static RANDOM_KEYSIZE], uint128_t *p_counter, 453 union randomdev_key *p_key, bool locked) 454 { 455 uint8_t remainder_buf[RANDOM_BLOCKSIZE]; 456 size_t chunk_size; 457 458 if (locked) 459 RANDOM_RESEED_ASSERT_LOCK_OWNED(); 460 else 461 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED(); 462 463 /* 464 * Easy case: don't have to worry about bullying the global mutex, 465 * don't have to worry about rekeying Chacha; API is byte-oriented. 466 */ 467 if (!locked && random_chachamode) { 468 randomdev_keystream(p_key, p_counter, buf, bytecount); 469 return; 470 } 471 472 if (locked) { 473 /* 474 * While holding the global lock, limit PRF generation to 475 * mitigate, but not eliminate, bullying symptoms. 476 */ 477 chunk_size = PAGE_SIZE; 478 } else { 479 /* 480 * 128-bit block ciphers like AES must be re-keyed at 1MB 481 * intervals to avoid unacceptable statistical differentiation 482 * from true random data (FS&K 9.4, p. 143-144). 483 */ 484 MPASS(!random_chachamode); 485 chunk_size = RANDOM_FORTUNA_MAX_READ; 486 } 487 488 chunk_size = MIN(bytecount, chunk_size); 489 if (!random_chachamode) 490 chunk_size = rounddown(chunk_size, RANDOM_BLOCKSIZE); 491 492 while (bytecount >= chunk_size && chunk_size > 0) { 493 randomdev_keystream(p_key, p_counter, buf, chunk_size); 494 495 buf += chunk_size; 496 bytecount -= chunk_size; 497 498 /* We have to rekey if there is any data remaining to be 499 * generated, in two scenarios: 500 * 501 * locked: we need to rekey before we unlock and release the 502 * global state to another consumer; or 503 * 504 * unlocked: we need to rekey because we're in AES mode and are 505 * required to rekey at chunk_size==1MB. But we do not need to 506 * rekey during the last trailing <1MB chunk. 507 */ 508 if (bytecount > 0) { 509 if (locked || chunk_size == RANDOM_FORTUNA_MAX_READ) { 510 randomdev_keystream(p_key, p_counter, newkey, 511 RANDOM_KEYSIZE); 512 randomdev_encrypt_init(p_key, newkey); 513 } 514 515 /* 516 * If we're holding the global lock, yield it briefly 517 * now. 518 */ 519 if (locked) { 520 RANDOM_RESEED_UNLOCK(); 521 RANDOM_RESEED_LOCK(); 522 } 523 524 /* 525 * At the trailing end, scale down chunk_size from 1MB or 526 * PAGE_SIZE to all remaining full blocks (AES) or all 527 * remaining bytes (Chacha). 528 */ 529 if (bytecount < chunk_size) { 530 if (random_chachamode) 531 chunk_size = bytecount; 532 else if (bytecount >= RANDOM_BLOCKSIZE) 533 chunk_size = rounddown(bytecount, 534 RANDOM_BLOCKSIZE); 535 else 536 break; 537 } 538 } 539 } 540 541 /* 542 * Generate any partial AES block remaining into a temporary buffer and 543 * copy the desired substring out. 544 */ 545 if (bytecount > 0) { 546 MPASS(!random_chachamode); 547 548 randomdev_keystream(p_key, p_counter, remainder_buf, 549 sizeof(remainder_buf)); 550 } 551 552 /* 553 * In locked mode, re-key global K before dropping the lock, which we 554 * don't need for memcpy/bzero below. 555 */ 556 if (locked) { 557 randomdev_keystream(p_key, p_counter, newkey, RANDOM_KEYSIZE); 558 randomdev_encrypt_init(p_key, newkey); 559 RANDOM_RESEED_UNLOCK(); 560 } 561 562 if (bytecount > 0) { 563 memcpy(buf, remainder_buf, bytecount); 564 explicit_bzero(remainder_buf, sizeof(remainder_buf)); 565 } 566 } 567 568 569 /* 570 * Handle only "concurrency-enabled" Fortuna reads to simplify logic. 571 * 572 * Caller (random_fortuna_read) is responsible for zeroing out sensitive 573 * buffers provided as parameters to this routine. 574 */ 575 static void 576 random_fortuna_read_concurrent(uint8_t *buf, size_t bytecount, 577 uint8_t newkey[static RANDOM_KEYSIZE]) 578 { 579 union randomdev_key key_copy; 580 uint128_t counter_copy; 581 size_t blockcount; 582 583 MPASS(fortuna_concurrent_read); 584 585 /* 586 * Compute number of blocks required for the PRF request ('delta C'). 587 * We will step the global counter 'C' by this number under lock, and 588 * then actually consume the counter values outside the lock. 589 * 590 * This ensures that contemporaneous but independent requests for 591 * randomness receive distinct 'C' values and thus independent PRF 592 * results. 593 */ 594 if (random_chachamode) { 595 blockcount = howmany(bytecount, CHACHA_BLOCKLEN); 596 } else { 597 blockcount = howmany(bytecount, RANDOM_BLOCKSIZE); 598 599 /* 600 * Need to account for the additional blocks generated by 601 * rekeying when updating the global fs_counter. 602 */ 603 blockcount += RANDOM_KEYS_PER_BLOCK * 604 (blockcount / RANDOM_FORTUNA_BLOCKS_PER_KEY); 605 } 606 607 RANDOM_RESEED_LOCK(); 608 KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0")); 609 /* 610 * Technically, we only need mutual exclusion to update shared state 611 * appropriately. Nothing about updating the shared internal state 612 * requires that we perform (most) expensive cryptographic keystream 613 * generation under lock. (We still need to generate 256 bits of 614 * keystream to re-key between consumers.) 615 * 616 * Save the original counter and key values that will be used as the 617 * PRF for this particular consumer. 618 */ 619 memcpy(&counter_copy, &fortuna_state.fs_counter, sizeof(counter_copy)); 620 memcpy(&key_copy, &fortuna_state.fs_key, sizeof(key_copy)); 621 622 /* 623 * Step the counter as if we had generated 'bytecount' blocks for this 624 * consumer. I.e., ensure that the next consumer gets an independent 625 * range of counter values once we drop the global lock. 626 */ 627 uint128_add64(&fortuna_state.fs_counter, blockcount); 628 629 /* 630 * We still need to Rekey the global 'K' between independent calls; 631 * this is no different from conventional Fortuna. Note that 632 * 'randomdev_keystream()' will step the fs_counter 'C' appropriately 633 * for the blocks needed for the 'newkey'. 634 * 635 * (This is part of PseudoRandomData() in FS&K, 9.4.4.) 636 */ 637 randomdev_keystream(&fortuna_state.fs_key, &fortuna_state.fs_counter, 638 newkey, RANDOM_KEYSIZE); 639 randomdev_encrypt_init(&fortuna_state.fs_key, newkey); 640 641 /* 642 * We have everything we need to generate a unique PRF for this 643 * consumer without touching global state. 644 */ 645 RANDOM_RESEED_UNLOCK(); 646 647 random_fortuna_genbytes(buf, bytecount, newkey, &counter_copy, 648 &key_copy, FORTUNA_UNLOCKED); 649 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED(); 650 651 explicit_bzero(&counter_copy, sizeof(counter_copy)); 652 explicit_bzero(&key_copy, sizeof(key_copy)); 653 } 654 655 /*- 656 * FS&K - RandomData() (Part 2) 657 * Main read from Fortuna, continued. May be called multiple times after 658 * the random_fortuna_pre_read() above. 659 * 660 * The supplied buf MAY not be a multiple of RANDOM_BLOCKSIZE in size; it is 661 * the responsibility of the algorithm to accommodate partial block reads, if a 662 * block output mode is used. 663 */ 664 void 665 random_fortuna_read(uint8_t *buf, size_t bytecount) 666 { 667 uint8_t newkey[RANDOM_KEYSIZE]; 668 669 if (fortuna_concurrent_read) { 670 random_fortuna_read_concurrent(buf, bytecount, newkey); 671 goto out; 672 } 673 674 RANDOM_RESEED_LOCK(); 675 KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0")); 676 677 random_fortuna_genbytes(buf, bytecount, newkey, 678 &fortuna_state.fs_counter, &fortuna_state.fs_key, FORTUNA_LOCKED); 679 /* Returns unlocked */ 680 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED(); 681 682 out: 683 explicit_bzero(newkey, sizeof(newkey)); 684 } 685 686 #ifdef _KERNEL 687 static bool block_seeded_status = false; 688 SYSCTL_BOOL(_kern_random, OID_AUTO, block_seeded_status, CTLFLAG_RWTUN, 689 &block_seeded_status, 0, 690 "If non-zero, pretend Fortuna is in an unseeded state. By setting " 691 "this as a tunable, boot can be tested as if the random device is " 692 "unavailable."); 693 #endif 694 695 static bool 696 random_fortuna_seeded_internal(void) 697 { 698 return (!uint128_is_zero(fortuna_state.fs_counter)); 699 } 700 701 static bool 702 random_fortuna_seeded(void) 703 { 704 705 #ifdef _KERNEL 706 if (block_seeded_status) 707 return (false); 708 #endif 709 710 if (__predict_true(random_fortuna_seeded_internal())) 711 return (true); 712 713 /* 714 * Maybe we have enough entropy in the zeroth pool but just haven't 715 * kicked the initial seed step. Do so now. 716 */ 717 random_fortuna_pre_read(); 718 719 return (random_fortuna_seeded_internal()); 720 } 721