xref: /freebsd/sys/dev/random/fortuna.c (revision f6a3b357e9be4c6423c85eff9a847163a0d307c8)
1 /*-
2  * Copyright (c) 2017 W. Dean Freeman
3  * Copyright (c) 2013-2015 Mark R V Murray
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer
11  *    in this position and unchanged.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * This implementation of Fortuna is based on the descriptions found in
31  * ISBN 978-0-470-47424-2 "Cryptography Engineering" by Ferguson, Schneier
32  * and Kohno ("FS&K").
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/limits.h>
40 
41 #ifdef _KERNEL
42 #include <sys/fail.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/random.h>
48 #include <sys/sdt.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 
52 #include <machine/cpu.h>
53 #else /* !_KERNEL */
54 #include <inttypes.h>
55 #include <stdbool.h>
56 #include <stdio.h>
57 #include <stdlib.h>
58 #include <string.h>
59 #include <threads.h>
60 
61 #include "unit_test.h"
62 #endif /* _KERNEL */
63 
64 #include <crypto/chacha20/chacha.h>
65 #include <crypto/rijndael/rijndael-api-fst.h>
66 #include <crypto/sha2/sha256.h>
67 
68 #include <dev/random/hash.h>
69 #include <dev/random/randomdev.h>
70 #ifdef _KERNEL
71 #include <dev/random/random_harvestq.h>
72 #endif
73 #include <dev/random/uint128.h>
74 #include <dev/random/fortuna.h>
75 
76 /* Defined in FS&K */
77 #define	RANDOM_FORTUNA_NPOOLS 32		/* The number of accumulation pools */
78 #define	RANDOM_FORTUNA_DEFPOOLSIZE 64		/* The default pool size/length for a (re)seed */
79 #define	RANDOM_FORTUNA_MAX_READ (1 << 20)	/* Max bytes from AES before rekeying */
80 #define	RANDOM_FORTUNA_BLOCKS_PER_KEY (1 << 16)	/* Max blocks from AES before rekeying */
81 CTASSERT(RANDOM_FORTUNA_BLOCKS_PER_KEY * RANDOM_BLOCKSIZE ==
82     RANDOM_FORTUNA_MAX_READ);
83 
84 /*
85  * The allowable range of RANDOM_FORTUNA_DEFPOOLSIZE. The default value is above.
86  * Making RANDOM_FORTUNA_DEFPOOLSIZE too large will mean a long time between reseeds,
87  * and too small may compromise initial security but get faster reseeds.
88  */
89 #define	RANDOM_FORTUNA_MINPOOLSIZE 16
90 #define	RANDOM_FORTUNA_MAXPOOLSIZE INT_MAX
91 CTASSERT(RANDOM_FORTUNA_MINPOOLSIZE <= RANDOM_FORTUNA_DEFPOOLSIZE);
92 CTASSERT(RANDOM_FORTUNA_DEFPOOLSIZE <= RANDOM_FORTUNA_MAXPOOLSIZE);
93 
94 /* This algorithm (and code) presumes that RANDOM_KEYSIZE is twice as large as RANDOM_BLOCKSIZE */
95 CTASSERT(RANDOM_BLOCKSIZE == sizeof(uint128_t));
96 CTASSERT(RANDOM_KEYSIZE == 2*RANDOM_BLOCKSIZE);
97 
98 /* Probes for dtrace(1) */
99 #ifdef _KERNEL
100 SDT_PROVIDER_DECLARE(random);
101 SDT_PROVIDER_DEFINE(random);
102 SDT_PROBE_DEFINE2(random, fortuna, event_processor, debug, "u_int", "struct fs_pool *");
103 #endif /* _KERNEL */
104 
105 /*
106  * This is the beastie that needs protecting. It contains all of the
107  * state that we are excited about. Exactly one is instantiated.
108  */
109 static struct fortuna_state {
110 	struct fs_pool {		/* P_i */
111 		u_int fsp_length;	/* Only the first one is used by Fortuna */
112 		struct randomdev_hash fsp_hash;
113 	} fs_pool[RANDOM_FORTUNA_NPOOLS];
114 	u_int fs_reseedcount;		/* ReseedCnt */
115 	uint128_t fs_counter;		/* C */
116 	union randomdev_key fs_key;	/* K */
117 	u_int fs_minpoolsize;		/* Extras */
118 	/* Extras for the OS */
119 #ifdef _KERNEL
120 	/* For use when 'pacing' the reseeds */
121 	sbintime_t fs_lasttime;
122 #endif
123 	/* Reseed lock */
124 	mtx_t fs_mtx;
125 } fortuna_state;
126 
127 /*
128  * This knob enables or disables Concurrent Reads.  The plan is to turn it on
129  * by default sometime before 13.0 branches.
130  *
131  * The benefit is improved concurrency in Fortuna.  That is reflected in two
132  * related aspects:
133  *
134  * 1. Concurrent devrandom readers can achieve similar throughput to a single
135  *    reader thread.
136  *
137  * 2. The rand_harvestq process spends much less time spinning when one or more
138  *    readers is processing a large request.  Partially this is due to
139  *    rand_harvestq / ra_event_processor design, which only passes one event at
140  *    a time to the underlying algorithm.  Each time, Fortuna must take its
141  *    global state mutex, potentially blocking on a reader.  Our adaptive
142  *    mutexes assume that a lock holder currently on CPU will release the lock
143  *    quickly, and spin if the owning thread is currently running.
144  *
145  * The concern is that the reduced lock scope might results in a less safe
146  * random(4) design.  However, the reduced-lock scope design is still
147  * fundamentally Fortuna.  This is discussed below.
148  *
149  * Fortuna Read() only needs mutual exclusion between readers to correctly
150  * update the shared read-side state: just C, the 128-bit counter, and K, the
151  * current cipher key.
152  *
153  * In the Fortuna design, the global counter C should provide an independent
154  * range of values per generator (CTR-mode cipher or similar) invocation.
155  *
156  * Under lock, we can save a copy of C on the stack, and increment the global C
157  * by the number of blocks a Read request will require.
158  *
159  * Still under lock, we can save a copy of the key K on the stack, and then
160  * perform the usual key erasure K' <- Keystream(C, K, ...).  This does require
161  * generating 256 bits (32 bytes) of cryptographic keystream output with the
162  * global lock held, but that's all; none of the user keystream generation must
163  * be performed under lock.
164  *
165  * At this point, we may unlock.
166  *
167  * Some example timelines below (to oversimplify, all requests are in units of
168  * native blocks, and the keysize happens to be equal or less to the native
169  * blocksize of the underlying cipher, and the same sequence of two requests
170  * arrive in the same order).  The possibly expensive consumer keystream
171  * generation portion is marked with '**'.
172  *
173  * Status Quo fortuna_read()           Reduced-scope locking
174  * -------------------------           ---------------------
175  * C=C_0, K=K_0                        C=C_0, K=K_0
176  * <Thr 1 requests N blocks>           <Thr 1 requests N blocks>
177  * 1:Lock()                            1:Lock()
178  * <Thr 2 requests M blocks>           <Thr 2 requests M blocks>
179  * 1:GenBytes()                        1:stack_C := C_0
180  * 1:  Keystream(C_0, K_0, N)          1:stack_K := K_0
181  * 1:    <N blocks generated>**        1:C' := C_0 + N
182  * 1:    C' := C_0 + N                 1:K' := Keystream(C', K_0, 1)
183  * 1:    <- Keystream                  1:  <1 block generated>
184  * 1:  K' := Keystream(C', K_0, 1)     1:  C'' := C' + 1
185  * 1:    <1 block generated>           1:  <- Keystream
186  * 1:    C'' := C' + 1                 1:Unlock()
187  * 1:    <- Keystream
188  * 1:  <- GenBytes()
189  * 1:Unlock()
190  *
191  * Just prior to unlock, shared state is identical:
192  * ------------------------------------------------
193  * C'' == C_0 + N + 1                  C'' == C_0 + N + 1
194  * K' == keystream generated from      K' == keystream generated from
195  *       C_0 + N, K_0.                       C_0 + N, K_0.
196  * K_0 has been erased.                K_0 has been erased.
197  *
198  * After both designs unlock, the 2nd reader is unblocked.
199  *
200  * 2:Lock()                            2:Lock()
201  * 2:GenBytes()                        2:stack_C' := C''
202  * 2:  Keystream(C'', K', M)           2:stack_K' := K'
203  * 2:    <M blocks generated>**        2:C''' := C'' + M
204  * 2:    C''' := C'' + M               2:K'' := Keystream(C''', K', 1)
205  * 2:    <- Keystream                  2:  <1 block generated>
206  * 2:  K'' := Keystream(C''', K', 1)   2:  C'''' := C''' + 1
207  * 2:    <1 block generated>           2:  <- Keystream
208  * 2:    C'''' := C''' + 1             2:Unlock()
209  * 2:    <- Keystream
210  * 2:  <- GenBytes()
211  * 2:Unlock()
212  *
213  * Just prior to unlock, shared state is still identical:
214  * ------------------------------------------------------
215  *
216  * C'''' == (C_0 + N + 1) + M + 1      C'''' == (C_0 + N + 1) + M + 1
217  * K'' == keystream generated from     K'' == keystream generated from
218  *        C_0 + N + 1 + M, K'.                C_0 + N + 1 + M, K'.
219  * K' has been erased.                 K' has been erased.
220  *
221  * Finally, in the new design, the two consumer threads can finish the
222  * remainder of the generation at any time (including simultaneously):
223  *
224  *                                     1:  GenBytes()
225  *                                     1:    Keystream(stack_C, stack_K, N)
226  *                                     1:      <N blocks generated>**
227  *                                     1:    <- Keystream
228  *                                     1:  <- GenBytes
229  *                                     1:ExplicitBzero(stack_C, stack_K)
230  *
231  *                                     2:  GenBytes()
232  *                                     2:    Keystream(stack_C', stack_K', M)
233  *                                     2:      <M blocks generated>**
234  *                                     2:    <- Keystream
235  *                                     2:  <- GenBytes
236  *                                     2:ExplicitBzero(stack_C', stack_K')
237  *
238  * The generated user keystream for both threads is identical between the two
239  * implementations:
240  *
241  * 1: Keystream(C_0, K_0, N)           1: Keystream(stack_C, stack_K, N)
242  * 2: Keystream(C'', K', M)            2: Keystream(stack_C', stack_K', M)
243  *
244  * (stack_C == C_0; stack_K == K_0; stack_C' == C''; stack_K' == K'.)
245  */
246 static bool fortuna_concurrent_read __read_frequently = false;
247 
248 #ifdef _KERNEL
249 static struct sysctl_ctx_list random_clist;
250 RANDOM_CHECK_UINT(fs_minpoolsize, RANDOM_FORTUNA_MINPOOLSIZE, RANDOM_FORTUNA_MAXPOOLSIZE);
251 #else
252 static uint8_t zero_region[RANDOM_ZERO_BLOCKSIZE];
253 #endif
254 
255 static void random_fortuna_pre_read(void);
256 static void random_fortuna_read(uint8_t *, size_t);
257 static bool random_fortuna_seeded(void);
258 static bool random_fortuna_seeded_internal(void);
259 static void random_fortuna_process_event(struct harvest_event *);
260 static void random_fortuna_init_alg(void *);
261 static void random_fortuna_deinit_alg(void *);
262 
263 static void random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount);
264 
265 struct random_algorithm random_alg_context = {
266 	.ra_ident = "Fortuna",
267 	.ra_init_alg = random_fortuna_init_alg,
268 	.ra_deinit_alg = random_fortuna_deinit_alg,
269 	.ra_pre_read = random_fortuna_pre_read,
270 	.ra_read = random_fortuna_read,
271 	.ra_seeded = random_fortuna_seeded,
272 	.ra_event_processor = random_fortuna_process_event,
273 	.ra_poolcount = RANDOM_FORTUNA_NPOOLS,
274 };
275 
276 /* ARGSUSED */
277 static void
278 random_fortuna_init_alg(void *unused __unused)
279 {
280 	int i;
281 #ifdef _KERNEL
282 	struct sysctl_oid *random_fortuna_o;
283 #endif
284 
285 	RANDOM_RESEED_INIT_LOCK();
286 	/*
287 	 * Fortuna parameters. Do not adjust these unless you have
288 	 * have a very good clue about what they do!
289 	 */
290 	fortuna_state.fs_minpoolsize = RANDOM_FORTUNA_DEFPOOLSIZE;
291 #ifdef _KERNEL
292 	fortuna_state.fs_lasttime = 0;
293 	random_fortuna_o = SYSCTL_ADD_NODE(&random_clist,
294 		SYSCTL_STATIC_CHILDREN(_kern_random),
295 		OID_AUTO, "fortuna", CTLFLAG_RW, 0,
296 		"Fortuna Parameters");
297 	SYSCTL_ADD_PROC(&random_clist,
298 		SYSCTL_CHILDREN(random_fortuna_o), OID_AUTO,
299 		"minpoolsize", CTLTYPE_UINT | CTLFLAG_RWTUN,
300 		&fortuna_state.fs_minpoolsize, RANDOM_FORTUNA_DEFPOOLSIZE,
301 		random_check_uint_fs_minpoolsize, "IU",
302 		"Minimum pool size necessary to cause a reseed");
303 	KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0 at startup"));
304 
305 	SYSCTL_ADD_BOOL(&random_clist, SYSCTL_CHILDREN(random_fortuna_o),
306 	    OID_AUTO, "concurrent_read", CTLFLAG_RDTUN,
307 	    &fortuna_concurrent_read, 0, "If non-zero, enable "
308 	    "feature to improve concurrent Fortuna performance.");
309 #endif
310 
311 	/*-
312 	 * FS&K - InitializePRNG()
313 	 *      - P_i = \epsilon
314 	 *      - ReseedCNT = 0
315 	 */
316 	for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) {
317 		randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash);
318 		fortuna_state.fs_pool[i].fsp_length = 0;
319 	}
320 	fortuna_state.fs_reseedcount = 0;
321 	/*-
322 	 * FS&K - InitializeGenerator()
323 	 *      - C = 0
324 	 *      - K = 0
325 	 */
326 	fortuna_state.fs_counter = UINT128_ZERO;
327 	explicit_bzero(&fortuna_state.fs_key, sizeof(fortuna_state.fs_key));
328 }
329 
330 /* ARGSUSED */
331 static void
332 random_fortuna_deinit_alg(void *unused __unused)
333 {
334 
335 	RANDOM_RESEED_DEINIT_LOCK();
336 	explicit_bzero(&fortuna_state, sizeof(fortuna_state));
337 #ifdef _KERNEL
338 	sysctl_ctx_free(&random_clist);
339 #endif
340 }
341 
342 /*-
343  * FS&K - AddRandomEvent()
344  * Process a single stochastic event off the harvest queue
345  */
346 static void
347 random_fortuna_process_event(struct harvest_event *event)
348 {
349 	u_int pl;
350 
351 	RANDOM_RESEED_LOCK();
352 	/*-
353 	 * FS&K - P_i = P_i|<harvested stuff>
354 	 * Accumulate the event into the appropriate pool
355 	 * where each event carries the destination information.
356 	 *
357 	 * The hash_init() and hash_finish() calls are done in
358 	 * random_fortuna_pre_read().
359 	 *
360 	 * We must be locked against pool state modification which can happen
361 	 * during accumulation/reseeding and reading/regating.
362 	 */
363 	pl = event->he_destination % RANDOM_FORTUNA_NPOOLS;
364 	/*
365 	 * We ignore low entropy static/counter fields towards the end of the
366 	 * he_event structure in order to increase measurable entropy when
367 	 * conducting SP800-90B entropy analysis measurements of seed material
368 	 * fed into PRNG.
369 	 * -- wdf
370 	 */
371 	KASSERT(event->he_size <= sizeof(event->he_entropy),
372 	    ("%s: event->he_size: %hhu > sizeof(event->he_entropy): %zu\n",
373 	    __func__, event->he_size, sizeof(event->he_entropy)));
374 	randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash,
375 	    &event->he_somecounter, sizeof(event->he_somecounter));
376 	randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash,
377 	    event->he_entropy, event->he_size);
378 
379 	/*-
380 	 * Don't wrap the length.  This is a "saturating" add.
381 	 * XXX: FIX!!: We don't actually need lengths for anything but fs_pool[0],
382 	 * but it's been useful debugging to see them all.
383 	 */
384 	fortuna_state.fs_pool[pl].fsp_length = MIN(RANDOM_FORTUNA_MAXPOOLSIZE,
385 	    fortuna_state.fs_pool[pl].fsp_length +
386 	    sizeof(event->he_somecounter) + event->he_size);
387 	RANDOM_RESEED_UNLOCK();
388 }
389 
390 /*-
391  * FS&K - Reseed()
392  * This introduces new key material into the output generator.
393  * Additionally it increments the output generator's counter
394  * variable C. When C > 0, the output generator is seeded and
395  * will deliver output.
396  * The entropy_data buffer passed is a very specific size; the
397  * product of RANDOM_FORTUNA_NPOOLS and RANDOM_KEYSIZE.
398  */
399 static void
400 random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount)
401 {
402 	struct randomdev_hash context;
403 	uint8_t hash[RANDOM_KEYSIZE];
404 	const void *keymaterial;
405 	size_t keysz;
406 	bool seeded;
407 
408 	RANDOM_RESEED_ASSERT_LOCK_OWNED();
409 
410 	seeded = random_fortuna_seeded_internal();
411 	if (seeded) {
412 		randomdev_getkey(&fortuna_state.fs_key, &keymaterial, &keysz);
413 		KASSERT(keysz == RANDOM_KEYSIZE, ("%s: key size %zu not %u",
414 			__func__, keysz, (unsigned)RANDOM_KEYSIZE));
415 	}
416 
417 	/*-
418 	 * FS&K - K = Hd(K|s) where Hd(m) is H(H(0^512|m))
419 	 *      - C = C + 1
420 	 */
421 	randomdev_hash_init(&context);
422 	randomdev_hash_iterate(&context, zero_region, RANDOM_ZERO_BLOCKSIZE);
423 	if (seeded)
424 		randomdev_hash_iterate(&context, keymaterial, keysz);
425 	randomdev_hash_iterate(&context, entropy_data, RANDOM_KEYSIZE*blockcount);
426 	randomdev_hash_finish(&context, hash);
427 	randomdev_hash_init(&context);
428 	randomdev_hash_iterate(&context, hash, RANDOM_KEYSIZE);
429 	randomdev_hash_finish(&context, hash);
430 	randomdev_encrypt_init(&fortuna_state.fs_key, hash);
431 	explicit_bzero(hash, sizeof(hash));
432 	/* Unblock the device if this is the first time we are reseeding. */
433 	if (uint128_is_zero(fortuna_state.fs_counter))
434 		randomdev_unblock();
435 	uint128_increment(&fortuna_state.fs_counter);
436 }
437 
438 /*-
439  * FS&K - RandomData() (Part 1)
440  * Used to return processed entropy from the PRNG. There is a pre_read
441  * required to be present (but it can be a stub) in order to allow
442  * specific actions at the begin of the read.
443  */
444 void
445 random_fortuna_pre_read(void)
446 {
447 #ifdef _KERNEL
448 	sbintime_t now;
449 #endif
450 	struct randomdev_hash context;
451 	uint32_t s[RANDOM_FORTUNA_NPOOLS*RANDOM_KEYSIZE_WORDS];
452 	uint8_t temp[RANDOM_KEYSIZE];
453 	u_int i;
454 
455 	KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0"));
456 	RANDOM_RESEED_LOCK();
457 #ifdef _KERNEL
458 	/* FS&K - Use 'getsbinuptime()' to prevent reseed-spamming. */
459 	now = getsbinuptime();
460 #endif
461 
462 	if (fortuna_state.fs_pool[0].fsp_length < fortuna_state.fs_minpoolsize
463 #ifdef _KERNEL
464 	    /*
465 	     * FS&K - Use 'getsbinuptime()' to prevent reseed-spamming, but do
466 	     * not block initial seeding (fs_lasttime == 0).
467 	     */
468 	    || (__predict_true(fortuna_state.fs_lasttime != 0) &&
469 		now - fortuna_state.fs_lasttime <= SBT_1S/10)
470 #endif
471 	) {
472 		RANDOM_RESEED_UNLOCK();
473 		return;
474 	}
475 
476 #ifdef _KERNEL
477 	/*
478 	 * When set, pretend we do not have enough entropy to reseed yet.
479 	 */
480 	KFAIL_POINT_CODE(DEBUG_FP, random_fortuna_pre_read, {
481 		if (RETURN_VALUE != 0) {
482 			RANDOM_RESEED_UNLOCK();
483 			return;
484 		}
485 	});
486 #endif
487 
488 #ifdef _KERNEL
489 	fortuna_state.fs_lasttime = now;
490 #endif
491 
492 	/* FS&K - ReseedCNT = ReseedCNT + 1 */
493 	fortuna_state.fs_reseedcount++;
494 	/* s = \epsilon at start */
495 	for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) {
496 		/* FS&K - if Divides(ReseedCnt, 2^i) ... */
497 		if ((fortuna_state.fs_reseedcount % (1 << i)) == 0) {
498 			/*-
499 			    * FS&K - temp = (P_i)
500 			    *      - P_i = \epsilon
501 			    *      - s = s|H(temp)
502 			    */
503 			randomdev_hash_finish(&fortuna_state.fs_pool[i].fsp_hash, temp);
504 			randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash);
505 			fortuna_state.fs_pool[i].fsp_length = 0;
506 			randomdev_hash_init(&context);
507 			randomdev_hash_iterate(&context, temp, RANDOM_KEYSIZE);
508 			randomdev_hash_finish(&context, s + i*RANDOM_KEYSIZE_WORDS);
509 		} else
510 			break;
511 	}
512 #ifdef _KERNEL
513 	SDT_PROBE2(random, fortuna, event_processor, debug, fortuna_state.fs_reseedcount, fortuna_state.fs_pool);
514 #endif
515 	/* FS&K */
516 	random_fortuna_reseed_internal(s, i);
517 	RANDOM_RESEED_UNLOCK();
518 
519 	/* Clean up and secure */
520 	explicit_bzero(s, sizeof(s));
521 	explicit_bzero(temp, sizeof(temp));
522 }
523 
524 /*
525  * This is basically GenerateBlocks() from FS&K.
526  *
527  * It differs in two ways:
528  *
529  * 1. Chacha20 is tolerant of non-block-multiple request sizes, so we do not
530  * need to handle any remainder bytes specially and can just pass the length
531  * directly to the PRF construction; and
532  *
533  * 2. Chacha20 is a 512-bit block size cipher (whereas AES has 128-bit block
534  * size, regardless of key size).  This means Chacha does not require re-keying
535  * every 1MiB.  This is implied by the math in FS&K 9.4 and mentioned
536  * explicitly in the conclusion, "If we had a block cipher with a 256-bit [or
537  * greater] block size, then the collisions would not have been an issue at
538  * all" (p. 144).
539  *
540  * 3. In conventional ("locked") mode, we produce a maximum of PAGE_SIZE output
541  * at a time before dropping the lock, to not bully the lock especially.  This
542  * has been the status quo since 2015 (r284959).
543  *
544  * The upstream caller random_fortuna_read is responsible for zeroing out
545  * sensitive buffers provided as parameters to this routine.
546  */
547 enum {
548 	FORTUNA_UNLOCKED = false,
549 	FORTUNA_LOCKED = true
550 };
551 static void
552 random_fortuna_genbytes(uint8_t *buf, size_t bytecount,
553     uint8_t newkey[static RANDOM_KEYSIZE], uint128_t *p_counter,
554     union randomdev_key *p_key, bool locked)
555 {
556 	uint8_t remainder_buf[RANDOM_BLOCKSIZE];
557 	size_t chunk_size;
558 
559 	if (locked)
560 		RANDOM_RESEED_ASSERT_LOCK_OWNED();
561 	else
562 		RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
563 
564 	/*
565 	 * Easy case: don't have to worry about bullying the global mutex,
566 	 * don't have to worry about rekeying Chacha; API is byte-oriented.
567 	 */
568 	if (!locked && random_chachamode) {
569 		randomdev_keystream(p_key, p_counter, buf, bytecount);
570 		return;
571 	}
572 
573 	if (locked) {
574 		/*
575 		 * While holding the global lock, limit PRF generation to
576 		 * mitigate, but not eliminate, bullying symptoms.
577 		 */
578 		chunk_size = PAGE_SIZE;
579 	} else {
580 		/*
581 		* 128-bit block ciphers like AES must be re-keyed at 1MB
582 		* intervals to avoid unacceptable statistical differentiation
583 		* from true random data (FS&K 9.4, p. 143-144).
584 		*/
585 		MPASS(!random_chachamode);
586 		chunk_size = RANDOM_FORTUNA_MAX_READ;
587 	}
588 
589 	chunk_size = MIN(bytecount, chunk_size);
590 	if (!random_chachamode)
591 		chunk_size = rounddown(chunk_size, RANDOM_BLOCKSIZE);
592 
593 	while (bytecount >= chunk_size && chunk_size > 0) {
594 		randomdev_keystream(p_key, p_counter, buf, chunk_size);
595 
596 		buf += chunk_size;
597 		bytecount -= chunk_size;
598 
599 		/* We have to rekey if there is any data remaining to be
600 		 * generated, in two scenarios:
601 		 *
602 		 * locked: we need to rekey before we unlock and release the
603 		 * global state to another consumer; or
604 		 *
605 		 * unlocked: we need to rekey because we're in AES mode and are
606 		 * required to rekey at chunk_size==1MB.  But we do not need to
607 		 * rekey during the last trailing <1MB chunk.
608 		 */
609 		if (bytecount > 0) {
610 			if (locked || chunk_size == RANDOM_FORTUNA_MAX_READ) {
611 				randomdev_keystream(p_key, p_counter, newkey,
612 				    RANDOM_KEYSIZE);
613 				randomdev_encrypt_init(p_key, newkey);
614 			}
615 
616 			/*
617 			 * If we're holding the global lock, yield it briefly
618 			 * now.
619 			 */
620 			if (locked) {
621 				RANDOM_RESEED_UNLOCK();
622 				RANDOM_RESEED_LOCK();
623 			}
624 
625 			/*
626 			 * At the trailing end, scale down chunk_size from 1MB or
627 			 * PAGE_SIZE to all remaining full blocks (AES) or all
628 			 * remaining bytes (Chacha).
629 			 */
630 			if (bytecount < chunk_size) {
631 				if (random_chachamode)
632 					chunk_size = bytecount;
633 				else if (bytecount >= RANDOM_BLOCKSIZE)
634 					chunk_size = rounddown(bytecount,
635 					    RANDOM_BLOCKSIZE);
636 				else
637 					break;
638 			}
639 		}
640 	}
641 
642 	/*
643 	 * Generate any partial AES block remaining into a temporary buffer and
644 	 * copy the desired substring out.
645 	 */
646 	if (bytecount > 0) {
647 		MPASS(!random_chachamode);
648 
649 		randomdev_keystream(p_key, p_counter, remainder_buf,
650 		    sizeof(remainder_buf));
651 	}
652 
653 	/*
654 	 * In locked mode, re-key global K before dropping the lock, which we
655 	 * don't need for memcpy/bzero below.
656 	 */
657 	if (locked) {
658 		randomdev_keystream(p_key, p_counter, newkey, RANDOM_KEYSIZE);
659 		randomdev_encrypt_init(p_key, newkey);
660 		RANDOM_RESEED_UNLOCK();
661 	}
662 
663 	if (bytecount > 0) {
664 		memcpy(buf, remainder_buf, bytecount);
665 		explicit_bzero(remainder_buf, sizeof(remainder_buf));
666 	}
667 }
668 
669 
670 /*
671  * Handle only "concurrency-enabled" Fortuna reads to simplify logic.
672  *
673  * Caller (random_fortuna_read) is responsible for zeroing out sensitive
674  * buffers provided as parameters to this routine.
675  */
676 static void
677 random_fortuna_read_concurrent(uint8_t *buf, size_t bytecount,
678     uint8_t newkey[static RANDOM_KEYSIZE])
679 {
680 	union randomdev_key key_copy;
681 	uint128_t counter_copy;
682 	size_t blockcount;
683 
684 	MPASS(fortuna_concurrent_read);
685 
686 	/*
687 	 * Compute number of blocks required for the PRF request ('delta C').
688 	 * We will step the global counter 'C' by this number under lock, and
689 	 * then actually consume the counter values outside the lock.
690 	 *
691 	 * This ensures that contemporaneous but independent requests for
692 	 * randomness receive distinct 'C' values and thus independent PRF
693 	 * results.
694 	 */
695 	if (random_chachamode) {
696 		blockcount = howmany(bytecount, CHACHA_BLOCKLEN);
697 	} else {
698 		blockcount = howmany(bytecount, RANDOM_BLOCKSIZE);
699 
700 		/*
701 		 * Need to account for the additional blocks generated by
702 		 * rekeying when updating the global fs_counter.
703 		 */
704 		blockcount += RANDOM_KEYS_PER_BLOCK *
705 		    (blockcount / RANDOM_FORTUNA_BLOCKS_PER_KEY);
706 	}
707 
708 	RANDOM_RESEED_LOCK();
709 	KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0"));
710 
711 	/*
712 	 * Save the original counter and key values that will be used as the
713 	 * PRF for this particular consumer.
714 	 */
715 	memcpy(&counter_copy, &fortuna_state.fs_counter, sizeof(counter_copy));
716 	memcpy(&key_copy, &fortuna_state.fs_key, sizeof(key_copy));
717 
718 	/*
719 	 * Step the counter as if we had generated 'bytecount' blocks for this
720 	 * consumer.  I.e., ensure that the next consumer gets an independent
721 	 * range of counter values once we drop the global lock.
722 	 */
723 	uint128_add64(&fortuna_state.fs_counter, blockcount);
724 
725 	/*
726 	 * We still need to Rekey the global 'K' between independent calls;
727 	 * this is no different from conventional Fortuna.  Note that
728 	 * 'randomdev_keystream()' will step the fs_counter 'C' appropriately
729 	 * for the blocks needed for the 'newkey'.
730 	 *
731 	 * (This is part of PseudoRandomData() in FS&K, 9.4.4.)
732 	 */
733 	randomdev_keystream(&fortuna_state.fs_key, &fortuna_state.fs_counter,
734 	    newkey, RANDOM_KEYSIZE);
735 	randomdev_encrypt_init(&fortuna_state.fs_key, newkey);
736 
737 	/*
738 	 * We have everything we need to generate a unique PRF for this
739 	 * consumer without touching global state.
740 	 */
741 	RANDOM_RESEED_UNLOCK();
742 
743 	random_fortuna_genbytes(buf, bytecount, newkey, &counter_copy,
744 	    &key_copy, FORTUNA_UNLOCKED);
745 	RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
746 
747 	explicit_bzero(&counter_copy, sizeof(counter_copy));
748 	explicit_bzero(&key_copy, sizeof(key_copy));
749 }
750 
751 /*-
752  * FS&K - RandomData() (Part 2)
753  * Main read from Fortuna, continued. May be called multiple times after
754  * the random_fortuna_pre_read() above.
755  *
756  * The supplied buf MAY not be a multiple of RANDOM_BLOCKSIZE in size; it is
757  * the responsibility of the algorithm to accommodate partial block reads, if a
758  * block output mode is used.
759  */
760 void
761 random_fortuna_read(uint8_t *buf, size_t bytecount)
762 {
763 	uint8_t newkey[RANDOM_KEYSIZE];
764 
765 	if (fortuna_concurrent_read) {
766 		random_fortuna_read_concurrent(buf, bytecount, newkey);
767 		goto out;
768 	}
769 
770 	RANDOM_RESEED_LOCK();
771 	KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0"));
772 
773 	random_fortuna_genbytes(buf, bytecount, newkey,
774 	    &fortuna_state.fs_counter, &fortuna_state.fs_key, FORTUNA_LOCKED);
775 	/* Returns unlocked */
776 	RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
777 
778 out:
779 	explicit_bzero(newkey, sizeof(newkey));
780 }
781 
782 #ifdef _KERNEL
783 static bool block_seeded_status = false;
784 SYSCTL_BOOL(_kern_random, OID_AUTO, block_seeded_status, CTLFLAG_RWTUN,
785     &block_seeded_status, 0,
786     "If non-zero, pretend Fortuna is in an unseeded state.  By setting "
787     "this as a tunable, boot can be tested as if the random device is "
788     "unavailable.");
789 #endif
790 
791 static bool
792 random_fortuna_seeded_internal(void)
793 {
794 	return (!uint128_is_zero(fortuna_state.fs_counter));
795 }
796 
797 static bool
798 random_fortuna_seeded(void)
799 {
800 
801 #ifdef _KERNEL
802 	if (block_seeded_status)
803 		return (false);
804 #endif
805 
806 	if (__predict_true(random_fortuna_seeded_internal()))
807 		return (true);
808 
809 	/*
810 	 * Maybe we have enough entropy in the zeroth pool but just haven't
811 	 * kicked the initial seed step.  Do so now.
812 	 */
813 	random_fortuna_pre_read();
814 
815 	return (random_fortuna_seeded_internal());
816 }
817