xref: /freebsd/sys/dev/random/fortuna.c (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 /*-
2  * Copyright (c) 2017 W. Dean Freeman
3  * Copyright (c) 2013-2015 Mark R V Murray
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer
11  *    in this position and unchanged.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * This implementation of Fortuna is based on the descriptions found in
31  * ISBN 978-0-470-47424-2 "Cryptography Engineering" by Ferguson, Schneier
32  * and Kohno ("FS&K").
33  */
34 
35 #include <sys/param.h>
36 #include <sys/limits.h>
37 
38 #ifdef _KERNEL
39 #include <sys/fail.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/random.h>
45 #include <sys/sdt.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 
49 #include <machine/cpu.h>
50 #else /* !_KERNEL */
51 #include <inttypes.h>
52 #include <stdbool.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <threads.h>
57 
58 #include "unit_test.h"
59 #endif /* _KERNEL */
60 
61 #include <crypto/chacha20/chacha.h>
62 #include <crypto/rijndael/rijndael-api-fst.h>
63 #include <crypto/sha2/sha256.h>
64 
65 #include <dev/random/hash.h>
66 #include <dev/random/randomdev.h>
67 #ifdef _KERNEL
68 #include <dev/random/random_harvestq.h>
69 #endif
70 #include <dev/random/uint128.h>
71 #include <dev/random/fortuna.h>
72 
73 /* Defined in FS&K */
74 #define	RANDOM_FORTUNA_MAX_READ (1 << 20)	/* Max bytes from AES before rekeying */
75 #define	RANDOM_FORTUNA_BLOCKS_PER_KEY (1 << 16)	/* Max blocks from AES before rekeying */
76 CTASSERT(RANDOM_FORTUNA_BLOCKS_PER_KEY * RANDOM_BLOCKSIZE ==
77     RANDOM_FORTUNA_MAX_READ);
78 
79 /*
80  * The allowable range of RANDOM_FORTUNA_DEFPOOLSIZE. The default value is above.
81  * Making RANDOM_FORTUNA_DEFPOOLSIZE too large will mean a long time between reseeds,
82  * and too small may compromise initial security but get faster reseeds.
83  */
84 #define	RANDOM_FORTUNA_MINPOOLSIZE 16
85 #define	RANDOM_FORTUNA_MAXPOOLSIZE INT_MAX
86 CTASSERT(RANDOM_FORTUNA_MINPOOLSIZE <= RANDOM_FORTUNA_DEFPOOLSIZE);
87 CTASSERT(RANDOM_FORTUNA_DEFPOOLSIZE <= RANDOM_FORTUNA_MAXPOOLSIZE);
88 
89 /* This algorithm (and code) presumes that RANDOM_KEYSIZE is twice as large as RANDOM_BLOCKSIZE */
90 CTASSERT(RANDOM_BLOCKSIZE == sizeof(uint128_t));
91 CTASSERT(RANDOM_KEYSIZE == 2*RANDOM_BLOCKSIZE);
92 
93 /* Probes for dtrace(1) */
94 #ifdef _KERNEL
95 SDT_PROVIDER_DECLARE(random);
96 SDT_PROVIDER_DEFINE(random);
97 SDT_PROBE_DEFINE2(random, fortuna, event_processor, debug, "u_int", "struct fs_pool *");
98 #endif /* _KERNEL */
99 
100 /*
101  * This is the beastie that needs protecting. It contains all of the
102  * state that we are excited about. Exactly one is instantiated.
103  */
104 static struct fortuna_state {
105 	struct fs_pool {		/* P_i */
106 		u_int fsp_length;	/* Only the first one is used by Fortuna */
107 		struct randomdev_hash fsp_hash;
108 	} fs_pool[RANDOM_FORTUNA_NPOOLS];
109 	u_int fs_reseedcount;		/* ReseedCnt */
110 	uint128_t fs_counter;		/* C */
111 	union randomdev_key fs_key;	/* K */
112 	u_int fs_minpoolsize;		/* Extras */
113 	/* Extras for the OS */
114 #ifdef _KERNEL
115 	/* For use when 'pacing' the reseeds */
116 	sbintime_t fs_lasttime;
117 #endif
118 	/* Reseed lock */
119 	mtx_t fs_mtx;
120 } fortuna_state;
121 
122 /*
123  * This knob enables or disables the "Concurrent Reads" Fortuna feature.
124  *
125  * The benefit of Concurrent Reads is improved concurrency in Fortuna.  That is
126  * reflected in two related aspects:
127  *
128  * 1. Concurrent full-rate devrandom readers can achieve similar throughput to
129  *    a single reader thread (at least up to a modest number of cores; the
130  *    non-concurrent design falls over at 2 readers).
131  *
132  * 2. The rand_harvestq process spends much less time spinning when one or more
133  *    readers is processing a large request.  Partially this is due to
134  *    rand_harvestq / ra_event_processor design, which only passes one event at
135  *    a time to the underlying algorithm.  Each time, Fortuna must take its
136  *    global state mutex, potentially blocking on a reader.  Our adaptive
137  *    mutexes assume that a lock holder currently on CPU will release the lock
138  *    quickly, and spin if the owning thread is currently running.
139  *
140  *    (There is no reason rand_harvestq necessarily has to use the same lock as
141  *    the generator, or that it must necessarily drop and retake locks
142  *    repeatedly, but that is the current status quo.)
143  *
144  * The concern is that the reduced lock scope might results in a less safe
145  * random(4) design.  However, the reduced-lock scope design is still
146  * fundamentally Fortuna.  This is discussed below.
147  *
148  * Fortuna Read() only needs mutual exclusion between readers to correctly
149  * update the shared read-side state: C, the 128-bit counter; and K, the
150  * current cipher/PRF key.
151  *
152  * In the Fortuna design, the global counter C should provide an independent
153  * range of values per request.
154  *
155  * Under lock, we can save a copy of C on the stack, and increment the global C
156  * by the number of blocks a Read request will require.
157  *
158  * Still under lock, we can save a copy of the key K on the stack, and then
159  * perform the usual key erasure K' <- Keystream(C, K, ...).  This does require
160  * generating 256 bits (32 bytes) of cryptographic keystream output with the
161  * global lock held, but that's all; none of the API keystream generation must
162  * be performed under lock.
163  *
164  * At this point, we may unlock.
165  *
166  * Some example timelines below (to oversimplify, all requests are in units of
167  * native blocks, and the keysize happens to be equal or less to the native
168  * blocksize of the underlying cipher, and the same sequence of two requests
169  * arrive in the same order).  The possibly expensive consumer keystream
170  * generation portion is marked with '**'.
171  *
172  * Status Quo fortuna_read()           Reduced-scope locking
173  * -------------------------           ---------------------
174  * C=C_0, K=K_0                        C=C_0, K=K_0
175  * <Thr 1 requests N blocks>           <Thr 1 requests N blocks>
176  * 1:Lock()                            1:Lock()
177  * <Thr 2 requests M blocks>           <Thr 2 requests M blocks>
178  * 1:GenBytes()                        1:stack_C := C_0
179  * 1:  Keystream(C_0, K_0, N)          1:stack_K := K_0
180  * 1:    <N blocks generated>**        1:C' := C_0 + N
181  * 1:    C' := C_0 + N                 1:K' := Keystream(C', K_0, 1)
182  * 1:    <- Keystream                  1:  <1 block generated>
183  * 1:  K' := Keystream(C', K_0, 1)     1:  C'' := C' + 1
184  * 1:    <1 block generated>           1:  <- Keystream
185  * 1:    C'' := C' + 1                 1:Unlock()
186  * 1:    <- Keystream
187  * 1:  <- GenBytes()
188  * 1:Unlock()
189  *
190  * Just prior to unlock, shared state is identical:
191  * ------------------------------------------------
192  * C'' == C_0 + N + 1                  C'' == C_0 + N + 1
193  * K' == keystream generated from      K' == keystream generated from
194  *       C_0 + N, K_0.                       C_0 + N, K_0.
195  * K_0 has been erased.                K_0 has been erased.
196  *
197  * After both designs unlock, the 2nd reader is unblocked.
198  *
199  * 2:Lock()                            2:Lock()
200  * 2:GenBytes()                        2:stack_C' := C''
201  * 2:  Keystream(C'', K', M)           2:stack_K' := K'
202  * 2:    <M blocks generated>**        2:C''' := C'' + M
203  * 2:    C''' := C'' + M               2:K'' := Keystream(C''', K', 1)
204  * 2:    <- Keystream                  2:  <1 block generated>
205  * 2:  K'' := Keystream(C''', K', 1)   2:  C'''' := C''' + 1
206  * 2:    <1 block generated>           2:  <- Keystream
207  * 2:    C'''' := C''' + 1             2:Unlock()
208  * 2:    <- Keystream
209  * 2:  <- GenBytes()
210  * 2:Unlock()
211  *
212  * Just prior to unlock, global state is identical:
213  * ------------------------------------------------------
214  *
215  * C'''' == (C_0 + N + 1) + M + 1      C'''' == (C_0 + N + 1) + M + 1
216  * K'' == keystream generated from     K'' == keystream generated from
217  *        C_0 + N + 1 + M, K'.                C_0 + N + 1 + M, K'.
218  * K' has been erased.                 K' has been erased.
219  *
220  * Finally, in the new design, the two consumer threads can finish the
221  * remainder of the generation at any time (including simultaneously):
222  *
223  *                                     1:  GenBytes()
224  *                                     1:    Keystream(stack_C, stack_K, N)
225  *                                     1:      <N blocks generated>**
226  *                                     1:    <- Keystream
227  *                                     1:  <- GenBytes
228  *                                     1:ExplicitBzero(stack_C, stack_K)
229  *
230  *                                     2:  GenBytes()
231  *                                     2:    Keystream(stack_C', stack_K', M)
232  *                                     2:      <M blocks generated>**
233  *                                     2:    <- Keystream
234  *                                     2:  <- GenBytes
235  *                                     2:ExplicitBzero(stack_C', stack_K')
236  *
237  * The generated user keystream for both threads is identical between the two
238  * implementations:
239  *
240  * 1: Keystream(C_0, K_0, N)           1: Keystream(stack_C, stack_K, N)
241  * 2: Keystream(C'', K', M)            2: Keystream(stack_C', stack_K', M)
242  *
243  * (stack_C == C_0; stack_K == K_0; stack_C' == C''; stack_K' == K'.)
244  */
245 static bool fortuna_concurrent_read __read_frequently = true;
246 
247 #ifdef _KERNEL
248 static struct sysctl_ctx_list random_clist;
249 RANDOM_CHECK_UINT(fs_minpoolsize, RANDOM_FORTUNA_MINPOOLSIZE, RANDOM_FORTUNA_MAXPOOLSIZE);
250 #else
251 static uint8_t zero_region[RANDOM_ZERO_BLOCKSIZE];
252 #endif
253 
254 static void random_fortuna_pre_read(void);
255 static void random_fortuna_read(uint8_t *, size_t);
256 static bool random_fortuna_seeded(void);
257 static bool random_fortuna_seeded_internal(void);
258 static void random_fortuna_process_event(struct harvest_event *);
259 
260 static void random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount);
261 
262 #ifdef RANDOM_LOADABLE
263 static
264 #endif
265 const struct random_algorithm random_alg_context = {
266 	.ra_ident = "Fortuna",
267 	.ra_pre_read = random_fortuna_pre_read,
268 	.ra_read = random_fortuna_read,
269 	.ra_seeded = random_fortuna_seeded,
270 	.ra_event_processor = random_fortuna_process_event,
271 	.ra_poolcount = RANDOM_FORTUNA_NPOOLS,
272 };
273 
274 /* ARGSUSED */
275 static void
276 random_fortuna_init_alg(void *unused __unused)
277 {
278 	int i;
279 #ifdef _KERNEL
280 	struct sysctl_oid *random_fortuna_o;
281 #endif
282 
283 #ifdef RANDOM_LOADABLE
284 	p_random_alg_context = &random_alg_context;
285 #endif
286 
287 	RANDOM_RESEED_INIT_LOCK();
288 	/*
289 	 * Fortuna parameters. Do not adjust these unless you have
290 	 * have a very good clue about what they do!
291 	 */
292 	fortuna_state.fs_minpoolsize = RANDOM_FORTUNA_DEFPOOLSIZE;
293 #ifdef _KERNEL
294 	fortuna_state.fs_lasttime = 0;
295 	random_fortuna_o = SYSCTL_ADD_NODE(&random_clist,
296 		SYSCTL_STATIC_CHILDREN(_kern_random),
297 		OID_AUTO, "fortuna", CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
298 		"Fortuna Parameters");
299 	SYSCTL_ADD_PROC(&random_clist,
300 	    SYSCTL_CHILDREN(random_fortuna_o), OID_AUTO, "minpoolsize",
301 	    CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
302 	    &fortuna_state.fs_minpoolsize, RANDOM_FORTUNA_DEFPOOLSIZE,
303 	    random_check_uint_fs_minpoolsize, "IU",
304 	    "Minimum pool size necessary to cause a reseed");
305 	KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0 at startup"));
306 
307 	SYSCTL_ADD_BOOL(&random_clist, SYSCTL_CHILDREN(random_fortuna_o),
308 	    OID_AUTO, "concurrent_read", CTLFLAG_RDTUN,
309 	    &fortuna_concurrent_read, 0, "If non-zero, enable "
310 	    "feature to improve concurrent Fortuna performance.");
311 #endif
312 
313 	/*-
314 	 * FS&K - InitializePRNG()
315 	 *      - P_i = \epsilon
316 	 *      - ReseedCNT = 0
317 	 */
318 	for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) {
319 		randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash);
320 		fortuna_state.fs_pool[i].fsp_length = 0;
321 	}
322 	fortuna_state.fs_reseedcount = 0;
323 	/*-
324 	 * FS&K - InitializeGenerator()
325 	 *      - C = 0
326 	 *      - K = 0
327 	 */
328 	fortuna_state.fs_counter = UINT128_ZERO;
329 	explicit_bzero(&fortuna_state.fs_key, sizeof(fortuna_state.fs_key));
330 }
331 SYSINIT(random_alg, SI_SUB_RANDOM, SI_ORDER_SECOND, random_fortuna_init_alg,
332     NULL);
333 
334 /*-
335  * FS&K - AddRandomEvent()
336  * Process a single stochastic event off the harvest queue
337  */
338 static void
339 random_fortuna_process_event(struct harvest_event *event)
340 {
341 	u_int pl;
342 
343 	RANDOM_RESEED_LOCK();
344 	/*-
345 	 * FS&K - P_i = P_i|<harvested stuff>
346 	 * Accumulate the event into the appropriate pool
347 	 * where each event carries the destination information.
348 	 *
349 	 * The hash_init() and hash_finish() calls are done in
350 	 * random_fortuna_pre_read().
351 	 *
352 	 * We must be locked against pool state modification which can happen
353 	 * during accumulation/reseeding and reading/regating.
354 	 */
355 	pl = event->he_destination % RANDOM_FORTUNA_NPOOLS;
356 	/*
357 	 * If a VM generation ID changes (clone and play or VM rewind), we want
358 	 * to incorporate that as soon as possible.  Override destingation pool
359 	 * for immediate next use.
360 	 */
361 	if (event->he_source == RANDOM_PURE_VMGENID)
362 		pl = 0;
363 	/*
364 	 * We ignore low entropy static/counter fields towards the end of the
365 	 * he_event structure in order to increase measurable entropy when
366 	 * conducting SP800-90B entropy analysis measurements of seed material
367 	 * fed into PRNG.
368 	 * -- wdf
369 	 */
370 	KASSERT(event->he_size <= sizeof(event->he_entropy),
371 	    ("%s: event->he_size: %hhu > sizeof(event->he_entropy): %zu\n",
372 	    __func__, event->he_size, sizeof(event->he_entropy)));
373 	randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash,
374 	    &event->he_somecounter, sizeof(event->he_somecounter));
375 	randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash,
376 	    event->he_entropy, event->he_size);
377 
378 	/*-
379 	 * Don't wrap the length.  This is a "saturating" add.
380 	 * XXX: FIX!!: We don't actually need lengths for anything but fs_pool[0],
381 	 * but it's been useful debugging to see them all.
382 	 */
383 	fortuna_state.fs_pool[pl].fsp_length = MIN(RANDOM_FORTUNA_MAXPOOLSIZE,
384 	    fortuna_state.fs_pool[pl].fsp_length +
385 	    sizeof(event->he_somecounter) + event->he_size);
386 	RANDOM_RESEED_UNLOCK();
387 }
388 
389 /*-
390  * FS&K - Reseed()
391  * This introduces new key material into the output generator.
392  * Additionally it increments the output generator's counter
393  * variable C. When C > 0, the output generator is seeded and
394  * will deliver output.
395  * The entropy_data buffer passed is a very specific size; the
396  * product of RANDOM_FORTUNA_NPOOLS and RANDOM_KEYSIZE.
397  */
398 static void
399 random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount)
400 {
401 	struct randomdev_hash context;
402 	uint8_t hash[RANDOM_KEYSIZE];
403 	const void *keymaterial;
404 	size_t keysz;
405 	bool seeded;
406 
407 	RANDOM_RESEED_ASSERT_LOCK_OWNED();
408 
409 	seeded = random_fortuna_seeded_internal();
410 	if (seeded) {
411 		randomdev_getkey(&fortuna_state.fs_key, &keymaterial, &keysz);
412 		KASSERT(keysz == RANDOM_KEYSIZE, ("%s: key size %zu not %u",
413 			__func__, keysz, (unsigned)RANDOM_KEYSIZE));
414 	}
415 
416 	/*-
417 	 * FS&K - K = Hd(K|s) where Hd(m) is H(H(0^512|m))
418 	 *      - C = C + 1
419 	 */
420 	randomdev_hash_init(&context);
421 	randomdev_hash_iterate(&context, zero_region, RANDOM_ZERO_BLOCKSIZE);
422 	if (seeded)
423 		randomdev_hash_iterate(&context, keymaterial, keysz);
424 	randomdev_hash_iterate(&context, entropy_data, RANDOM_KEYSIZE*blockcount);
425 	randomdev_hash_finish(&context, hash);
426 	randomdev_hash_init(&context);
427 	randomdev_hash_iterate(&context, hash, RANDOM_KEYSIZE);
428 	randomdev_hash_finish(&context, hash);
429 	randomdev_encrypt_init(&fortuna_state.fs_key, hash);
430 	explicit_bzero(hash, sizeof(hash));
431 	/* Unblock the device if this is the first time we are reseeding. */
432 	if (uint128_is_zero(fortuna_state.fs_counter))
433 		randomdev_unblock();
434 	uint128_increment(&fortuna_state.fs_counter);
435 }
436 
437 /*-
438  * FS&K - RandomData() (Part 1)
439  * Used to return processed entropy from the PRNG. There is a pre_read
440  * required to be present (but it can be a stub) in order to allow
441  * specific actions at the begin of the read.
442  */
443 void
444 random_fortuna_pre_read(void)
445 {
446 #ifdef _KERNEL
447 	sbintime_t now;
448 #endif
449 	struct randomdev_hash context;
450 	uint32_t s[RANDOM_FORTUNA_NPOOLS*RANDOM_KEYSIZE_WORDS];
451 	uint8_t temp[RANDOM_KEYSIZE];
452 	u_int i;
453 
454 	KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0"));
455 	RANDOM_RESEED_LOCK();
456 #ifdef _KERNEL
457 	/* FS&K - Use 'getsbinuptime()' to prevent reseed-spamming. */
458 	now = getsbinuptime();
459 #endif
460 
461 	if (fortuna_state.fs_pool[0].fsp_length < fortuna_state.fs_minpoolsize
462 #ifdef _KERNEL
463 	    /*
464 	     * FS&K - Use 'getsbinuptime()' to prevent reseed-spamming, but do
465 	     * not block initial seeding (fs_lasttime == 0).
466 	     */
467 	    || (__predict_true(fortuna_state.fs_lasttime != 0) &&
468 		now - fortuna_state.fs_lasttime <= SBT_1S/10)
469 #endif
470 	) {
471 		RANDOM_RESEED_UNLOCK();
472 		return;
473 	}
474 
475 #ifdef _KERNEL
476 	/*
477 	 * When set, pretend we do not have enough entropy to reseed yet.
478 	 */
479 	KFAIL_POINT_CODE(DEBUG_FP, random_fortuna_pre_read, {
480 		if (RETURN_VALUE != 0) {
481 			RANDOM_RESEED_UNLOCK();
482 			return;
483 		}
484 	});
485 #endif
486 
487 #ifdef _KERNEL
488 	fortuna_state.fs_lasttime = now;
489 #endif
490 
491 	/* FS&K - ReseedCNT = ReseedCNT + 1 */
492 	fortuna_state.fs_reseedcount++;
493 	/* s = \epsilon at start */
494 	for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) {
495 		/* FS&K - if Divides(ReseedCnt, 2^i) ... */
496 		if ((fortuna_state.fs_reseedcount % (1 << i)) == 0) {
497 			/*-
498 			    * FS&K - temp = (P_i)
499 			    *      - P_i = \epsilon
500 			    *      - s = s|H(temp)
501 			    */
502 			randomdev_hash_finish(&fortuna_state.fs_pool[i].fsp_hash, temp);
503 			randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash);
504 			fortuna_state.fs_pool[i].fsp_length = 0;
505 			randomdev_hash_init(&context);
506 			randomdev_hash_iterate(&context, temp, RANDOM_KEYSIZE);
507 			randomdev_hash_finish(&context, s + i*RANDOM_KEYSIZE_WORDS);
508 		} else
509 			break;
510 	}
511 #ifdef _KERNEL
512 	SDT_PROBE2(random, fortuna, event_processor, debug, fortuna_state.fs_reseedcount, fortuna_state.fs_pool);
513 #endif
514 	/* FS&K */
515 	random_fortuna_reseed_internal(s, i);
516 	RANDOM_RESEED_UNLOCK();
517 
518 	/* Clean up and secure */
519 	explicit_bzero(s, sizeof(s));
520 	explicit_bzero(temp, sizeof(temp));
521 }
522 
523 /*
524  * This is basically GenerateBlocks() from FS&K.
525  *
526  * It differs in two ways:
527  *
528  * 1. Chacha20 is tolerant of non-block-multiple request sizes, so we do not
529  * need to handle any remainder bytes specially and can just pass the length
530  * directly to the PRF construction; and
531  *
532  * 2. Chacha20 is a 512-bit block size cipher (whereas AES has 128-bit block
533  * size, regardless of key size).  This means Chacha does not require re-keying
534  * every 1MiB.  This is implied by the math in FS&K 9.4 and mentioned
535  * explicitly in the conclusion, "If we had a block cipher with a 256-bit [or
536  * greater] block size, then the collisions would not have been an issue at
537  * all" (p. 144).
538  *
539  * 3. In conventional ("locked") mode, we produce a maximum of PAGE_SIZE output
540  * at a time before dropping the lock, to not bully the lock especially.  This
541  * has been the status quo since 2015 (r284959).
542  *
543  * The upstream caller random_fortuna_read is responsible for zeroing out
544  * sensitive buffers provided as parameters to this routine.
545  */
546 enum {
547 	FORTUNA_UNLOCKED = false,
548 	FORTUNA_LOCKED = true
549 };
550 static void
551 random_fortuna_genbytes(uint8_t *buf, size_t bytecount,
552     uint8_t newkey[static RANDOM_KEYSIZE], uint128_t *p_counter,
553     union randomdev_key *p_key, bool locked)
554 {
555 	uint8_t remainder_buf[RANDOM_BLOCKSIZE];
556 	size_t chunk_size;
557 
558 	if (locked)
559 		RANDOM_RESEED_ASSERT_LOCK_OWNED();
560 	else
561 		RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
562 
563 	/*
564 	 * Easy case: don't have to worry about bullying the global mutex,
565 	 * don't have to worry about rekeying Chacha; API is byte-oriented.
566 	 */
567 	if (!locked && random_chachamode) {
568 		randomdev_keystream(p_key, p_counter, buf, bytecount);
569 		return;
570 	}
571 
572 	if (locked) {
573 		/*
574 		 * While holding the global lock, limit PRF generation to
575 		 * mitigate, but not eliminate, bullying symptoms.
576 		 */
577 		chunk_size = PAGE_SIZE;
578 	} else {
579 		/*
580 		* 128-bit block ciphers like AES must be re-keyed at 1MB
581 		* intervals to avoid unacceptable statistical differentiation
582 		* from true random data (FS&K 9.4, p. 143-144).
583 		*/
584 		MPASS(!random_chachamode);
585 		chunk_size = RANDOM_FORTUNA_MAX_READ;
586 	}
587 
588 	chunk_size = MIN(bytecount, chunk_size);
589 	if (!random_chachamode)
590 		chunk_size = rounddown(chunk_size, RANDOM_BLOCKSIZE);
591 
592 	while (bytecount >= chunk_size && chunk_size > 0) {
593 		randomdev_keystream(p_key, p_counter, buf, chunk_size);
594 
595 		buf += chunk_size;
596 		bytecount -= chunk_size;
597 
598 		/* We have to rekey if there is any data remaining to be
599 		 * generated, in two scenarios:
600 		 *
601 		 * locked: we need to rekey before we unlock and release the
602 		 * global state to another consumer; or
603 		 *
604 		 * unlocked: we need to rekey because we're in AES mode and are
605 		 * required to rekey at chunk_size==1MB.  But we do not need to
606 		 * rekey during the last trailing <1MB chunk.
607 		 */
608 		if (bytecount > 0) {
609 			if (locked || chunk_size == RANDOM_FORTUNA_MAX_READ) {
610 				randomdev_keystream(p_key, p_counter, newkey,
611 				    RANDOM_KEYSIZE);
612 				randomdev_encrypt_init(p_key, newkey);
613 			}
614 
615 			/*
616 			 * If we're holding the global lock, yield it briefly
617 			 * now.
618 			 */
619 			if (locked) {
620 				RANDOM_RESEED_UNLOCK();
621 				RANDOM_RESEED_LOCK();
622 			}
623 
624 			/*
625 			 * At the trailing end, scale down chunk_size from 1MB or
626 			 * PAGE_SIZE to all remaining full blocks (AES) or all
627 			 * remaining bytes (Chacha).
628 			 */
629 			if (bytecount < chunk_size) {
630 				if (random_chachamode)
631 					chunk_size = bytecount;
632 				else if (bytecount >= RANDOM_BLOCKSIZE)
633 					chunk_size = rounddown(bytecount,
634 					    RANDOM_BLOCKSIZE);
635 				else
636 					break;
637 			}
638 		}
639 	}
640 
641 	/*
642 	 * Generate any partial AES block remaining into a temporary buffer and
643 	 * copy the desired substring out.
644 	 */
645 	if (bytecount > 0) {
646 		MPASS(!random_chachamode);
647 
648 		randomdev_keystream(p_key, p_counter, remainder_buf,
649 		    sizeof(remainder_buf));
650 	}
651 
652 	/*
653 	 * In locked mode, re-key global K before dropping the lock, which we
654 	 * don't need for memcpy/bzero below.
655 	 */
656 	if (locked) {
657 		randomdev_keystream(p_key, p_counter, newkey, RANDOM_KEYSIZE);
658 		randomdev_encrypt_init(p_key, newkey);
659 		RANDOM_RESEED_UNLOCK();
660 	}
661 
662 	if (bytecount > 0) {
663 		memcpy(buf, remainder_buf, bytecount);
664 		explicit_bzero(remainder_buf, sizeof(remainder_buf));
665 	}
666 }
667 
668 
669 /*
670  * Handle only "concurrency-enabled" Fortuna reads to simplify logic.
671  *
672  * Caller (random_fortuna_read) is responsible for zeroing out sensitive
673  * buffers provided as parameters to this routine.
674  */
675 static void
676 random_fortuna_read_concurrent(uint8_t *buf, size_t bytecount,
677     uint8_t newkey[static RANDOM_KEYSIZE])
678 {
679 	union randomdev_key key_copy;
680 	uint128_t counter_copy;
681 	size_t blockcount;
682 
683 	MPASS(fortuna_concurrent_read);
684 
685 	/*
686 	 * Compute number of blocks required for the PRF request ('delta C').
687 	 * We will step the global counter 'C' by this number under lock, and
688 	 * then actually consume the counter values outside the lock.
689 	 *
690 	 * This ensures that contemporaneous but independent requests for
691 	 * randomness receive distinct 'C' values and thus independent PRF
692 	 * results.
693 	 */
694 	if (random_chachamode) {
695 		blockcount = howmany(bytecount, CHACHA_BLOCKLEN);
696 	} else {
697 		blockcount = howmany(bytecount, RANDOM_BLOCKSIZE);
698 
699 		/*
700 		 * Need to account for the additional blocks generated by
701 		 * rekeying when updating the global fs_counter.
702 		 */
703 		blockcount += RANDOM_KEYS_PER_BLOCK *
704 		    (blockcount / RANDOM_FORTUNA_BLOCKS_PER_KEY);
705 	}
706 
707 	RANDOM_RESEED_LOCK();
708 	KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0"));
709 
710 	/*
711 	 * Save the original counter and key values that will be used as the
712 	 * PRF for this particular consumer.
713 	 */
714 	memcpy(&counter_copy, &fortuna_state.fs_counter, sizeof(counter_copy));
715 	memcpy(&key_copy, &fortuna_state.fs_key, sizeof(key_copy));
716 
717 	/*
718 	 * Step the counter as if we had generated 'bytecount' blocks for this
719 	 * consumer.  I.e., ensure that the next consumer gets an independent
720 	 * range of counter values once we drop the global lock.
721 	 */
722 	uint128_add64(&fortuna_state.fs_counter, blockcount);
723 
724 	/*
725 	 * We still need to Rekey the global 'K' between independent calls;
726 	 * this is no different from conventional Fortuna.  Note that
727 	 * 'randomdev_keystream()' will step the fs_counter 'C' appropriately
728 	 * for the blocks needed for the 'newkey'.
729 	 *
730 	 * (This is part of PseudoRandomData() in FS&K, 9.4.4.)
731 	 */
732 	randomdev_keystream(&fortuna_state.fs_key, &fortuna_state.fs_counter,
733 	    newkey, RANDOM_KEYSIZE);
734 	randomdev_encrypt_init(&fortuna_state.fs_key, newkey);
735 
736 	/*
737 	 * We have everything we need to generate a unique PRF for this
738 	 * consumer without touching global state.
739 	 */
740 	RANDOM_RESEED_UNLOCK();
741 
742 	random_fortuna_genbytes(buf, bytecount, newkey, &counter_copy,
743 	    &key_copy, FORTUNA_UNLOCKED);
744 	RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
745 
746 	explicit_bzero(&counter_copy, sizeof(counter_copy));
747 	explicit_bzero(&key_copy, sizeof(key_copy));
748 }
749 
750 /*-
751  * FS&K - RandomData() (Part 2)
752  * Main read from Fortuna, continued. May be called multiple times after
753  * the random_fortuna_pre_read() above.
754  *
755  * The supplied buf MAY not be a multiple of RANDOM_BLOCKSIZE in size; it is
756  * the responsibility of the algorithm to accommodate partial block reads, if a
757  * block output mode is used.
758  */
759 void
760 random_fortuna_read(uint8_t *buf, size_t bytecount)
761 {
762 	uint8_t newkey[RANDOM_KEYSIZE];
763 
764 	if (fortuna_concurrent_read) {
765 		random_fortuna_read_concurrent(buf, bytecount, newkey);
766 		goto out;
767 	}
768 
769 	RANDOM_RESEED_LOCK();
770 	KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0"));
771 
772 	random_fortuna_genbytes(buf, bytecount, newkey,
773 	    &fortuna_state.fs_counter, &fortuna_state.fs_key, FORTUNA_LOCKED);
774 	/* Returns unlocked */
775 	RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
776 
777 out:
778 	explicit_bzero(newkey, sizeof(newkey));
779 }
780 
781 #ifdef _KERNEL
782 static bool block_seeded_status = false;
783 SYSCTL_BOOL(_kern_random, OID_AUTO, block_seeded_status, CTLFLAG_RWTUN,
784     &block_seeded_status, 0,
785     "If non-zero, pretend Fortuna is in an unseeded state.  By setting "
786     "this as a tunable, boot can be tested as if the random device is "
787     "unavailable.");
788 #endif
789 
790 static bool
791 random_fortuna_seeded_internal(void)
792 {
793 	return (!uint128_is_zero(fortuna_state.fs_counter));
794 }
795 
796 static bool
797 random_fortuna_seeded(void)
798 {
799 
800 #ifdef _KERNEL
801 	if (block_seeded_status)
802 		return (false);
803 #endif
804 
805 	if (__predict_true(random_fortuna_seeded_internal()))
806 		return (true);
807 
808 	/*
809 	 * Maybe we have enough entropy in the zeroth pool but just haven't
810 	 * kicked the initial seed step.  Do so now.
811 	 */
812 	random_fortuna_pre_read();
813 
814 	return (random_fortuna_seeded_internal());
815 }
816