1 /*- 2 * Copyright (c) 2017 W. Dean Freeman 3 * Copyright (c) 2013-2015 Mark R V Murray 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * This implementation of Fortuna is based on the descriptions found in 31 * ISBN 978-0-470-47424-2 "Cryptography Engineering" by Ferguson, Schneier 32 * and Kohno ("FS&K"). 33 */ 34 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/limits.h> 38 39 #ifdef _KERNEL 40 #include <sys/fail.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/random.h> 46 #include <sys/sdt.h> 47 #include <sys/sysctl.h> 48 #include <sys/systm.h> 49 50 #include <machine/cpu.h> 51 #else /* !_KERNEL */ 52 #include <inttypes.h> 53 #include <stdbool.h> 54 #include <stdio.h> 55 #include <stdlib.h> 56 #include <string.h> 57 #include <threads.h> 58 59 #include "unit_test.h" 60 #endif /* _KERNEL */ 61 62 #include <crypto/chacha20/chacha.h> 63 #include <crypto/rijndael/rijndael-api-fst.h> 64 #include <crypto/sha2/sha256.h> 65 66 #include <dev/random/hash.h> 67 #include <dev/random/randomdev.h> 68 #ifdef _KERNEL 69 #include <dev/random/random_harvestq.h> 70 #endif 71 #include <dev/random/uint128.h> 72 #include <dev/random/fortuna.h> 73 74 /* Defined in FS&K */ 75 #define RANDOM_FORTUNA_NPOOLS 32 /* The number of accumulation pools */ 76 #define RANDOM_FORTUNA_DEFPOOLSIZE 64 /* The default pool size/length for a (re)seed */ 77 #define RANDOM_FORTUNA_MAX_READ (1 << 20) /* Max bytes from AES before rekeying */ 78 #define RANDOM_FORTUNA_BLOCKS_PER_KEY (1 << 16) /* Max blocks from AES before rekeying */ 79 CTASSERT(RANDOM_FORTUNA_BLOCKS_PER_KEY * RANDOM_BLOCKSIZE == 80 RANDOM_FORTUNA_MAX_READ); 81 82 /* 83 * The allowable range of RANDOM_FORTUNA_DEFPOOLSIZE. The default value is above. 84 * Making RANDOM_FORTUNA_DEFPOOLSIZE too large will mean a long time between reseeds, 85 * and too small may compromise initial security but get faster reseeds. 86 */ 87 #define RANDOM_FORTUNA_MINPOOLSIZE 16 88 #define RANDOM_FORTUNA_MAXPOOLSIZE INT_MAX 89 CTASSERT(RANDOM_FORTUNA_MINPOOLSIZE <= RANDOM_FORTUNA_DEFPOOLSIZE); 90 CTASSERT(RANDOM_FORTUNA_DEFPOOLSIZE <= RANDOM_FORTUNA_MAXPOOLSIZE); 91 92 /* This algorithm (and code) presumes that RANDOM_KEYSIZE is twice as large as RANDOM_BLOCKSIZE */ 93 CTASSERT(RANDOM_BLOCKSIZE == sizeof(uint128_t)); 94 CTASSERT(RANDOM_KEYSIZE == 2*RANDOM_BLOCKSIZE); 95 96 /* Probes for dtrace(1) */ 97 #ifdef _KERNEL 98 SDT_PROVIDER_DECLARE(random); 99 SDT_PROVIDER_DEFINE(random); 100 SDT_PROBE_DEFINE2(random, fortuna, event_processor, debug, "u_int", "struct fs_pool *"); 101 #endif /* _KERNEL */ 102 103 /* 104 * This is the beastie that needs protecting. It contains all of the 105 * state that we are excited about. Exactly one is instantiated. 106 */ 107 static struct fortuna_state { 108 struct fs_pool { /* P_i */ 109 u_int fsp_length; /* Only the first one is used by Fortuna */ 110 struct randomdev_hash fsp_hash; 111 } fs_pool[RANDOM_FORTUNA_NPOOLS]; 112 u_int fs_reseedcount; /* ReseedCnt */ 113 uint128_t fs_counter; /* C */ 114 union randomdev_key fs_key; /* K */ 115 u_int fs_minpoolsize; /* Extras */ 116 /* Extras for the OS */ 117 #ifdef _KERNEL 118 /* For use when 'pacing' the reseeds */ 119 sbintime_t fs_lasttime; 120 #endif 121 /* Reseed lock */ 122 mtx_t fs_mtx; 123 } fortuna_state; 124 125 /* 126 * This knob enables or disables the "Concurrent Reads" Fortuna feature. 127 * 128 * The benefit of Concurrent Reads is improved concurrency in Fortuna. That is 129 * reflected in two related aspects: 130 * 131 * 1. Concurrent full-rate devrandom readers can achieve similar throughput to 132 * a single reader thread (at least up to a modest number of cores; the 133 * non-concurrent design falls over at 2 readers). 134 * 135 * 2. The rand_harvestq process spends much less time spinning when one or more 136 * readers is processing a large request. Partially this is due to 137 * rand_harvestq / ra_event_processor design, which only passes one event at 138 * a time to the underlying algorithm. Each time, Fortuna must take its 139 * global state mutex, potentially blocking on a reader. Our adaptive 140 * mutexes assume that a lock holder currently on CPU will release the lock 141 * quickly, and spin if the owning thread is currently running. 142 * 143 * (There is no reason rand_harvestq necessarily has to use the same lock as 144 * the generator, or that it must necessarily drop and retake locks 145 * repeatedly, but that is the current status quo.) 146 * 147 * The concern is that the reduced lock scope might results in a less safe 148 * random(4) design. However, the reduced-lock scope design is still 149 * fundamentally Fortuna. This is discussed below. 150 * 151 * Fortuna Read() only needs mutual exclusion between readers to correctly 152 * update the shared read-side state: C, the 128-bit counter; and K, the 153 * current cipher/PRF key. 154 * 155 * In the Fortuna design, the global counter C should provide an independent 156 * range of values per request. 157 * 158 * Under lock, we can save a copy of C on the stack, and increment the global C 159 * by the number of blocks a Read request will require. 160 * 161 * Still under lock, we can save a copy of the key K on the stack, and then 162 * perform the usual key erasure K' <- Keystream(C, K, ...). This does require 163 * generating 256 bits (32 bytes) of cryptographic keystream output with the 164 * global lock held, but that's all; none of the API keystream generation must 165 * be performed under lock. 166 * 167 * At this point, we may unlock. 168 * 169 * Some example timelines below (to oversimplify, all requests are in units of 170 * native blocks, and the keysize happens to be equal or less to the native 171 * blocksize of the underlying cipher, and the same sequence of two requests 172 * arrive in the same order). The possibly expensive consumer keystream 173 * generation portion is marked with '**'. 174 * 175 * Status Quo fortuna_read() Reduced-scope locking 176 * ------------------------- --------------------- 177 * C=C_0, K=K_0 C=C_0, K=K_0 178 * <Thr 1 requests N blocks> <Thr 1 requests N blocks> 179 * 1:Lock() 1:Lock() 180 * <Thr 2 requests M blocks> <Thr 2 requests M blocks> 181 * 1:GenBytes() 1:stack_C := C_0 182 * 1: Keystream(C_0, K_0, N) 1:stack_K := K_0 183 * 1: <N blocks generated>** 1:C' := C_0 + N 184 * 1: C' := C_0 + N 1:K' := Keystream(C', K_0, 1) 185 * 1: <- Keystream 1: <1 block generated> 186 * 1: K' := Keystream(C', K_0, 1) 1: C'' := C' + 1 187 * 1: <1 block generated> 1: <- Keystream 188 * 1: C'' := C' + 1 1:Unlock() 189 * 1: <- Keystream 190 * 1: <- GenBytes() 191 * 1:Unlock() 192 * 193 * Just prior to unlock, shared state is identical: 194 * ------------------------------------------------ 195 * C'' == C_0 + N + 1 C'' == C_0 + N + 1 196 * K' == keystream generated from K' == keystream generated from 197 * C_0 + N, K_0. C_0 + N, K_0. 198 * K_0 has been erased. K_0 has been erased. 199 * 200 * After both designs unlock, the 2nd reader is unblocked. 201 * 202 * 2:Lock() 2:Lock() 203 * 2:GenBytes() 2:stack_C' := C'' 204 * 2: Keystream(C'', K', M) 2:stack_K' := K' 205 * 2: <M blocks generated>** 2:C''' := C'' + M 206 * 2: C''' := C'' + M 2:K'' := Keystream(C''', K', 1) 207 * 2: <- Keystream 2: <1 block generated> 208 * 2: K'' := Keystream(C''', K', 1) 2: C'''' := C''' + 1 209 * 2: <1 block generated> 2: <- Keystream 210 * 2: C'''' := C''' + 1 2:Unlock() 211 * 2: <- Keystream 212 * 2: <- GenBytes() 213 * 2:Unlock() 214 * 215 * Just prior to unlock, global state is identical: 216 * ------------------------------------------------------ 217 * 218 * C'''' == (C_0 + N + 1) + M + 1 C'''' == (C_0 + N + 1) + M + 1 219 * K'' == keystream generated from K'' == keystream generated from 220 * C_0 + N + 1 + M, K'. C_0 + N + 1 + M, K'. 221 * K' has been erased. K' has been erased. 222 * 223 * Finally, in the new design, the two consumer threads can finish the 224 * remainder of the generation at any time (including simultaneously): 225 * 226 * 1: GenBytes() 227 * 1: Keystream(stack_C, stack_K, N) 228 * 1: <N blocks generated>** 229 * 1: <- Keystream 230 * 1: <- GenBytes 231 * 1:ExplicitBzero(stack_C, stack_K) 232 * 233 * 2: GenBytes() 234 * 2: Keystream(stack_C', stack_K', M) 235 * 2: <M blocks generated>** 236 * 2: <- Keystream 237 * 2: <- GenBytes 238 * 2:ExplicitBzero(stack_C', stack_K') 239 * 240 * The generated user keystream for both threads is identical between the two 241 * implementations: 242 * 243 * 1: Keystream(C_0, K_0, N) 1: Keystream(stack_C, stack_K, N) 244 * 2: Keystream(C'', K', M) 2: Keystream(stack_C', stack_K', M) 245 * 246 * (stack_C == C_0; stack_K == K_0; stack_C' == C''; stack_K' == K'.) 247 */ 248 static bool fortuna_concurrent_read __read_frequently = true; 249 250 #ifdef _KERNEL 251 static struct sysctl_ctx_list random_clist; 252 RANDOM_CHECK_UINT(fs_minpoolsize, RANDOM_FORTUNA_MINPOOLSIZE, RANDOM_FORTUNA_MAXPOOLSIZE); 253 #else 254 static uint8_t zero_region[RANDOM_ZERO_BLOCKSIZE]; 255 #endif 256 257 static void random_fortuna_pre_read(void); 258 static void random_fortuna_read(uint8_t *, size_t); 259 static bool random_fortuna_seeded(void); 260 static bool random_fortuna_seeded_internal(void); 261 static void random_fortuna_process_event(struct harvest_event *); 262 263 static void random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount); 264 265 #ifdef RANDOM_LOADABLE 266 static 267 #endif 268 const struct random_algorithm random_alg_context = { 269 .ra_ident = "Fortuna", 270 .ra_pre_read = random_fortuna_pre_read, 271 .ra_read = random_fortuna_read, 272 .ra_seeded = random_fortuna_seeded, 273 .ra_event_processor = random_fortuna_process_event, 274 .ra_poolcount = RANDOM_FORTUNA_NPOOLS, 275 }; 276 277 /* ARGSUSED */ 278 static void 279 random_fortuna_init_alg(void *unused __unused) 280 { 281 int i; 282 #ifdef _KERNEL 283 struct sysctl_oid *random_fortuna_o; 284 #endif 285 286 #ifdef RANDOM_LOADABLE 287 p_random_alg_context = &random_alg_context; 288 #endif 289 290 RANDOM_RESEED_INIT_LOCK(); 291 /* 292 * Fortuna parameters. Do not adjust these unless you have 293 * have a very good clue about what they do! 294 */ 295 fortuna_state.fs_minpoolsize = RANDOM_FORTUNA_DEFPOOLSIZE; 296 #ifdef _KERNEL 297 fortuna_state.fs_lasttime = 0; 298 random_fortuna_o = SYSCTL_ADD_NODE(&random_clist, 299 SYSCTL_STATIC_CHILDREN(_kern_random), 300 OID_AUTO, "fortuna", CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 301 "Fortuna Parameters"); 302 SYSCTL_ADD_PROC(&random_clist, 303 SYSCTL_CHILDREN(random_fortuna_o), OID_AUTO, "minpoolsize", 304 CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 305 &fortuna_state.fs_minpoolsize, RANDOM_FORTUNA_DEFPOOLSIZE, 306 random_check_uint_fs_minpoolsize, "IU", 307 "Minimum pool size necessary to cause a reseed"); 308 KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0 at startup")); 309 310 SYSCTL_ADD_BOOL(&random_clist, SYSCTL_CHILDREN(random_fortuna_o), 311 OID_AUTO, "concurrent_read", CTLFLAG_RDTUN, 312 &fortuna_concurrent_read, 0, "If non-zero, enable " 313 "feature to improve concurrent Fortuna performance."); 314 #endif 315 316 /*- 317 * FS&K - InitializePRNG() 318 * - P_i = \epsilon 319 * - ReseedCNT = 0 320 */ 321 for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) { 322 randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash); 323 fortuna_state.fs_pool[i].fsp_length = 0; 324 } 325 fortuna_state.fs_reseedcount = 0; 326 /*- 327 * FS&K - InitializeGenerator() 328 * - C = 0 329 * - K = 0 330 */ 331 fortuna_state.fs_counter = UINT128_ZERO; 332 explicit_bzero(&fortuna_state.fs_key, sizeof(fortuna_state.fs_key)); 333 } 334 SYSINIT(random_alg, SI_SUB_RANDOM, SI_ORDER_SECOND, random_fortuna_init_alg, 335 NULL); 336 337 /*- 338 * FS&K - AddRandomEvent() 339 * Process a single stochastic event off the harvest queue 340 */ 341 static void 342 random_fortuna_process_event(struct harvest_event *event) 343 { 344 u_int pl; 345 346 RANDOM_RESEED_LOCK(); 347 /*- 348 * FS&K - P_i = P_i|<harvested stuff> 349 * Accumulate the event into the appropriate pool 350 * where each event carries the destination information. 351 * 352 * The hash_init() and hash_finish() calls are done in 353 * random_fortuna_pre_read(). 354 * 355 * We must be locked against pool state modification which can happen 356 * during accumulation/reseeding and reading/regating. 357 */ 358 pl = event->he_destination % RANDOM_FORTUNA_NPOOLS; 359 /* 360 * If a VM generation ID changes (clone and play or VM rewind), we want 361 * to incorporate that as soon as possible. Override destingation pool 362 * for immediate next use. 363 */ 364 if (event->he_source == RANDOM_PURE_VMGENID) 365 pl = 0; 366 /* 367 * We ignore low entropy static/counter fields towards the end of the 368 * he_event structure in order to increase measurable entropy when 369 * conducting SP800-90B entropy analysis measurements of seed material 370 * fed into PRNG. 371 * -- wdf 372 */ 373 KASSERT(event->he_size <= sizeof(event->he_entropy), 374 ("%s: event->he_size: %hhu > sizeof(event->he_entropy): %zu\n", 375 __func__, event->he_size, sizeof(event->he_entropy))); 376 randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash, 377 &event->he_somecounter, sizeof(event->he_somecounter)); 378 randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash, 379 event->he_entropy, event->he_size); 380 381 /*- 382 * Don't wrap the length. This is a "saturating" add. 383 * XXX: FIX!!: We don't actually need lengths for anything but fs_pool[0], 384 * but it's been useful debugging to see them all. 385 */ 386 fortuna_state.fs_pool[pl].fsp_length = MIN(RANDOM_FORTUNA_MAXPOOLSIZE, 387 fortuna_state.fs_pool[pl].fsp_length + 388 sizeof(event->he_somecounter) + event->he_size); 389 RANDOM_RESEED_UNLOCK(); 390 } 391 392 /*- 393 * FS&K - Reseed() 394 * This introduces new key material into the output generator. 395 * Additionally it increments the output generator's counter 396 * variable C. When C > 0, the output generator is seeded and 397 * will deliver output. 398 * The entropy_data buffer passed is a very specific size; the 399 * product of RANDOM_FORTUNA_NPOOLS and RANDOM_KEYSIZE. 400 */ 401 static void 402 random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount) 403 { 404 struct randomdev_hash context; 405 uint8_t hash[RANDOM_KEYSIZE]; 406 const void *keymaterial; 407 size_t keysz; 408 bool seeded; 409 410 RANDOM_RESEED_ASSERT_LOCK_OWNED(); 411 412 seeded = random_fortuna_seeded_internal(); 413 if (seeded) { 414 randomdev_getkey(&fortuna_state.fs_key, &keymaterial, &keysz); 415 KASSERT(keysz == RANDOM_KEYSIZE, ("%s: key size %zu not %u", 416 __func__, keysz, (unsigned)RANDOM_KEYSIZE)); 417 } 418 419 /*- 420 * FS&K - K = Hd(K|s) where Hd(m) is H(H(0^512|m)) 421 * - C = C + 1 422 */ 423 randomdev_hash_init(&context); 424 randomdev_hash_iterate(&context, zero_region, RANDOM_ZERO_BLOCKSIZE); 425 if (seeded) 426 randomdev_hash_iterate(&context, keymaterial, keysz); 427 randomdev_hash_iterate(&context, entropy_data, RANDOM_KEYSIZE*blockcount); 428 randomdev_hash_finish(&context, hash); 429 randomdev_hash_init(&context); 430 randomdev_hash_iterate(&context, hash, RANDOM_KEYSIZE); 431 randomdev_hash_finish(&context, hash); 432 randomdev_encrypt_init(&fortuna_state.fs_key, hash); 433 explicit_bzero(hash, sizeof(hash)); 434 /* Unblock the device if this is the first time we are reseeding. */ 435 if (uint128_is_zero(fortuna_state.fs_counter)) 436 randomdev_unblock(); 437 uint128_increment(&fortuna_state.fs_counter); 438 } 439 440 /*- 441 * FS&K - RandomData() (Part 1) 442 * Used to return processed entropy from the PRNG. There is a pre_read 443 * required to be present (but it can be a stub) in order to allow 444 * specific actions at the begin of the read. 445 */ 446 void 447 random_fortuna_pre_read(void) 448 { 449 #ifdef _KERNEL 450 sbintime_t now; 451 #endif 452 struct randomdev_hash context; 453 uint32_t s[RANDOM_FORTUNA_NPOOLS*RANDOM_KEYSIZE_WORDS]; 454 uint8_t temp[RANDOM_KEYSIZE]; 455 u_int i; 456 457 KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0")); 458 RANDOM_RESEED_LOCK(); 459 #ifdef _KERNEL 460 /* FS&K - Use 'getsbinuptime()' to prevent reseed-spamming. */ 461 now = getsbinuptime(); 462 #endif 463 464 if (fortuna_state.fs_pool[0].fsp_length < fortuna_state.fs_minpoolsize 465 #ifdef _KERNEL 466 /* 467 * FS&K - Use 'getsbinuptime()' to prevent reseed-spamming, but do 468 * not block initial seeding (fs_lasttime == 0). 469 */ 470 || (__predict_true(fortuna_state.fs_lasttime != 0) && 471 now - fortuna_state.fs_lasttime <= SBT_1S/10) 472 #endif 473 ) { 474 RANDOM_RESEED_UNLOCK(); 475 return; 476 } 477 478 #ifdef _KERNEL 479 /* 480 * When set, pretend we do not have enough entropy to reseed yet. 481 */ 482 KFAIL_POINT_CODE(DEBUG_FP, random_fortuna_pre_read, { 483 if (RETURN_VALUE != 0) { 484 RANDOM_RESEED_UNLOCK(); 485 return; 486 } 487 }); 488 #endif 489 490 #ifdef _KERNEL 491 fortuna_state.fs_lasttime = now; 492 #endif 493 494 /* FS&K - ReseedCNT = ReseedCNT + 1 */ 495 fortuna_state.fs_reseedcount++; 496 /* s = \epsilon at start */ 497 for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) { 498 /* FS&K - if Divides(ReseedCnt, 2^i) ... */ 499 if ((fortuna_state.fs_reseedcount % (1 << i)) == 0) { 500 /*- 501 * FS&K - temp = (P_i) 502 * - P_i = \epsilon 503 * - s = s|H(temp) 504 */ 505 randomdev_hash_finish(&fortuna_state.fs_pool[i].fsp_hash, temp); 506 randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash); 507 fortuna_state.fs_pool[i].fsp_length = 0; 508 randomdev_hash_init(&context); 509 randomdev_hash_iterate(&context, temp, RANDOM_KEYSIZE); 510 randomdev_hash_finish(&context, s + i*RANDOM_KEYSIZE_WORDS); 511 } else 512 break; 513 } 514 #ifdef _KERNEL 515 SDT_PROBE2(random, fortuna, event_processor, debug, fortuna_state.fs_reseedcount, fortuna_state.fs_pool); 516 #endif 517 /* FS&K */ 518 random_fortuna_reseed_internal(s, i); 519 RANDOM_RESEED_UNLOCK(); 520 521 /* Clean up and secure */ 522 explicit_bzero(s, sizeof(s)); 523 explicit_bzero(temp, sizeof(temp)); 524 } 525 526 /* 527 * This is basically GenerateBlocks() from FS&K. 528 * 529 * It differs in two ways: 530 * 531 * 1. Chacha20 is tolerant of non-block-multiple request sizes, so we do not 532 * need to handle any remainder bytes specially and can just pass the length 533 * directly to the PRF construction; and 534 * 535 * 2. Chacha20 is a 512-bit block size cipher (whereas AES has 128-bit block 536 * size, regardless of key size). This means Chacha does not require re-keying 537 * every 1MiB. This is implied by the math in FS&K 9.4 and mentioned 538 * explicitly in the conclusion, "If we had a block cipher with a 256-bit [or 539 * greater] block size, then the collisions would not have been an issue at 540 * all" (p. 144). 541 * 542 * 3. In conventional ("locked") mode, we produce a maximum of PAGE_SIZE output 543 * at a time before dropping the lock, to not bully the lock especially. This 544 * has been the status quo since 2015 (r284959). 545 * 546 * The upstream caller random_fortuna_read is responsible for zeroing out 547 * sensitive buffers provided as parameters to this routine. 548 */ 549 enum { 550 FORTUNA_UNLOCKED = false, 551 FORTUNA_LOCKED = true 552 }; 553 static void 554 random_fortuna_genbytes(uint8_t *buf, size_t bytecount, 555 uint8_t newkey[static RANDOM_KEYSIZE], uint128_t *p_counter, 556 union randomdev_key *p_key, bool locked) 557 { 558 uint8_t remainder_buf[RANDOM_BLOCKSIZE]; 559 size_t chunk_size; 560 561 if (locked) 562 RANDOM_RESEED_ASSERT_LOCK_OWNED(); 563 else 564 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED(); 565 566 /* 567 * Easy case: don't have to worry about bullying the global mutex, 568 * don't have to worry about rekeying Chacha; API is byte-oriented. 569 */ 570 if (!locked && random_chachamode) { 571 randomdev_keystream(p_key, p_counter, buf, bytecount); 572 return; 573 } 574 575 if (locked) { 576 /* 577 * While holding the global lock, limit PRF generation to 578 * mitigate, but not eliminate, bullying symptoms. 579 */ 580 chunk_size = PAGE_SIZE; 581 } else { 582 /* 583 * 128-bit block ciphers like AES must be re-keyed at 1MB 584 * intervals to avoid unacceptable statistical differentiation 585 * from true random data (FS&K 9.4, p. 143-144). 586 */ 587 MPASS(!random_chachamode); 588 chunk_size = RANDOM_FORTUNA_MAX_READ; 589 } 590 591 chunk_size = MIN(bytecount, chunk_size); 592 if (!random_chachamode) 593 chunk_size = rounddown(chunk_size, RANDOM_BLOCKSIZE); 594 595 while (bytecount >= chunk_size && chunk_size > 0) { 596 randomdev_keystream(p_key, p_counter, buf, chunk_size); 597 598 buf += chunk_size; 599 bytecount -= chunk_size; 600 601 /* We have to rekey if there is any data remaining to be 602 * generated, in two scenarios: 603 * 604 * locked: we need to rekey before we unlock and release the 605 * global state to another consumer; or 606 * 607 * unlocked: we need to rekey because we're in AES mode and are 608 * required to rekey at chunk_size==1MB. But we do not need to 609 * rekey during the last trailing <1MB chunk. 610 */ 611 if (bytecount > 0) { 612 if (locked || chunk_size == RANDOM_FORTUNA_MAX_READ) { 613 randomdev_keystream(p_key, p_counter, newkey, 614 RANDOM_KEYSIZE); 615 randomdev_encrypt_init(p_key, newkey); 616 } 617 618 /* 619 * If we're holding the global lock, yield it briefly 620 * now. 621 */ 622 if (locked) { 623 RANDOM_RESEED_UNLOCK(); 624 RANDOM_RESEED_LOCK(); 625 } 626 627 /* 628 * At the trailing end, scale down chunk_size from 1MB or 629 * PAGE_SIZE to all remaining full blocks (AES) or all 630 * remaining bytes (Chacha). 631 */ 632 if (bytecount < chunk_size) { 633 if (random_chachamode) 634 chunk_size = bytecount; 635 else if (bytecount >= RANDOM_BLOCKSIZE) 636 chunk_size = rounddown(bytecount, 637 RANDOM_BLOCKSIZE); 638 else 639 break; 640 } 641 } 642 } 643 644 /* 645 * Generate any partial AES block remaining into a temporary buffer and 646 * copy the desired substring out. 647 */ 648 if (bytecount > 0) { 649 MPASS(!random_chachamode); 650 651 randomdev_keystream(p_key, p_counter, remainder_buf, 652 sizeof(remainder_buf)); 653 } 654 655 /* 656 * In locked mode, re-key global K before dropping the lock, which we 657 * don't need for memcpy/bzero below. 658 */ 659 if (locked) { 660 randomdev_keystream(p_key, p_counter, newkey, RANDOM_KEYSIZE); 661 randomdev_encrypt_init(p_key, newkey); 662 RANDOM_RESEED_UNLOCK(); 663 } 664 665 if (bytecount > 0) { 666 memcpy(buf, remainder_buf, bytecount); 667 explicit_bzero(remainder_buf, sizeof(remainder_buf)); 668 } 669 } 670 671 672 /* 673 * Handle only "concurrency-enabled" Fortuna reads to simplify logic. 674 * 675 * Caller (random_fortuna_read) is responsible for zeroing out sensitive 676 * buffers provided as parameters to this routine. 677 */ 678 static void 679 random_fortuna_read_concurrent(uint8_t *buf, size_t bytecount, 680 uint8_t newkey[static RANDOM_KEYSIZE]) 681 { 682 union randomdev_key key_copy; 683 uint128_t counter_copy; 684 size_t blockcount; 685 686 MPASS(fortuna_concurrent_read); 687 688 /* 689 * Compute number of blocks required for the PRF request ('delta C'). 690 * We will step the global counter 'C' by this number under lock, and 691 * then actually consume the counter values outside the lock. 692 * 693 * This ensures that contemporaneous but independent requests for 694 * randomness receive distinct 'C' values and thus independent PRF 695 * results. 696 */ 697 if (random_chachamode) { 698 blockcount = howmany(bytecount, CHACHA_BLOCKLEN); 699 } else { 700 blockcount = howmany(bytecount, RANDOM_BLOCKSIZE); 701 702 /* 703 * Need to account for the additional blocks generated by 704 * rekeying when updating the global fs_counter. 705 */ 706 blockcount += RANDOM_KEYS_PER_BLOCK * 707 (blockcount / RANDOM_FORTUNA_BLOCKS_PER_KEY); 708 } 709 710 RANDOM_RESEED_LOCK(); 711 KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0")); 712 713 /* 714 * Save the original counter and key values that will be used as the 715 * PRF for this particular consumer. 716 */ 717 memcpy(&counter_copy, &fortuna_state.fs_counter, sizeof(counter_copy)); 718 memcpy(&key_copy, &fortuna_state.fs_key, sizeof(key_copy)); 719 720 /* 721 * Step the counter as if we had generated 'bytecount' blocks for this 722 * consumer. I.e., ensure that the next consumer gets an independent 723 * range of counter values once we drop the global lock. 724 */ 725 uint128_add64(&fortuna_state.fs_counter, blockcount); 726 727 /* 728 * We still need to Rekey the global 'K' between independent calls; 729 * this is no different from conventional Fortuna. Note that 730 * 'randomdev_keystream()' will step the fs_counter 'C' appropriately 731 * for the blocks needed for the 'newkey'. 732 * 733 * (This is part of PseudoRandomData() in FS&K, 9.4.4.) 734 */ 735 randomdev_keystream(&fortuna_state.fs_key, &fortuna_state.fs_counter, 736 newkey, RANDOM_KEYSIZE); 737 randomdev_encrypt_init(&fortuna_state.fs_key, newkey); 738 739 /* 740 * We have everything we need to generate a unique PRF for this 741 * consumer without touching global state. 742 */ 743 RANDOM_RESEED_UNLOCK(); 744 745 random_fortuna_genbytes(buf, bytecount, newkey, &counter_copy, 746 &key_copy, FORTUNA_UNLOCKED); 747 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED(); 748 749 explicit_bzero(&counter_copy, sizeof(counter_copy)); 750 explicit_bzero(&key_copy, sizeof(key_copy)); 751 } 752 753 /*- 754 * FS&K - RandomData() (Part 2) 755 * Main read from Fortuna, continued. May be called multiple times after 756 * the random_fortuna_pre_read() above. 757 * 758 * The supplied buf MAY not be a multiple of RANDOM_BLOCKSIZE in size; it is 759 * the responsibility of the algorithm to accommodate partial block reads, if a 760 * block output mode is used. 761 */ 762 void 763 random_fortuna_read(uint8_t *buf, size_t bytecount) 764 { 765 uint8_t newkey[RANDOM_KEYSIZE]; 766 767 if (fortuna_concurrent_read) { 768 random_fortuna_read_concurrent(buf, bytecount, newkey); 769 goto out; 770 } 771 772 RANDOM_RESEED_LOCK(); 773 KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0")); 774 775 random_fortuna_genbytes(buf, bytecount, newkey, 776 &fortuna_state.fs_counter, &fortuna_state.fs_key, FORTUNA_LOCKED); 777 /* Returns unlocked */ 778 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED(); 779 780 out: 781 explicit_bzero(newkey, sizeof(newkey)); 782 } 783 784 #ifdef _KERNEL 785 static bool block_seeded_status = false; 786 SYSCTL_BOOL(_kern_random, OID_AUTO, block_seeded_status, CTLFLAG_RWTUN, 787 &block_seeded_status, 0, 788 "If non-zero, pretend Fortuna is in an unseeded state. By setting " 789 "this as a tunable, boot can be tested as if the random device is " 790 "unavailable."); 791 #endif 792 793 static bool 794 random_fortuna_seeded_internal(void) 795 { 796 return (!uint128_is_zero(fortuna_state.fs_counter)); 797 } 798 799 static bool 800 random_fortuna_seeded(void) 801 { 802 803 #ifdef _KERNEL 804 if (block_seeded_status) 805 return (false); 806 #endif 807 808 if (__predict_true(random_fortuna_seeded_internal())) 809 return (true); 810 811 /* 812 * Maybe we have enough entropy in the zeroth pool but just haven't 813 * kicked the initial seed step. Do so now. 814 */ 815 random_fortuna_pre_read(); 816 817 return (random_fortuna_seeded_internal()); 818 } 819