xref: /freebsd/sys/dev/random/fenestrasX/fx_pool.c (revision 685a78570b359eb2bd4d8c28dde22de54d55b8ec)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/domainset.h>
30 #include <sys/fail.h>
31 #include <sys/limits.h>
32 #include <sys/lock.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/queue.h>
37 #include <sys/random.h>
38 #include <sys/sdt.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <sys/taskqueue.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/smp.h>
45 
46 #include <dev/random/randomdev.h>
47 #include <dev/random/random_harvestq.h>
48 
49 #include <dev/random/fenestrasX/fx_brng.h>
50 #include <dev/random/fenestrasX/fx_hash.h>
51 #include <dev/random/fenestrasX/fx_pool.h>
52 #include <dev/random/fenestrasX/fx_priv.h>
53 #include <dev/random/fenestrasX/fx_pub.h>
54 
55 /*
56  * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2)
57  */
58 #define	FXENT_RESSED_INTVL_GFACT	3
59 #define	FXENT_RESEED_INTVL_MAX		3600
60 
61 /*
62  * Pool reseed schedule.  Initially, only pool 0 is active.  Until the timer
63  * interval reaches INTVL_MAX, only pool 0 is used.
64  *
65  * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
66  * (if active) every 3^k timer reseeds.  (§ 3.3)
67  *
68  * (Entropy harvesting only round robins across active pools.)
69  */
70 #define	FXENT_RESEED_BASE		3
71 
72 /*
73  * Number of bytes from high quality sources to allocate to pool 0 before
74  * normal round-robin allocation after each timer reseed. (§ 3.4)
75  */
76 #define	FXENT_HI_SRC_POOL0_BYTES	32
77 
78 /*
79  * § 3.1
80  *
81  * Low sources provide unconditioned entropy, such as mouse movements; high
82  * sources are assumed to provide high-quality random bytes.  Pull sources are
83  * those which can be polled, i.e., anything randomdev calls a "random_source."
84  *
85  * In the whitepaper, low sources are pull.  For us, at least in the existing
86  * design, low-quality sources push into some global ring buffer and then get
87  * forwarded into the RNG by a thread that continually polls.  Presumably their
88  * design batches low entopy signals in some way (SHA512?) and only requests
89  * them dynamically on reseed.  I'm not sure what the benefit is vs feeding
90  * into the pools directly.
91  */
92 enum fxrng_ent_access_cls {
93 	FXRNG_PUSH,
94 	FXRNG_PULL,
95 };
96 enum fxrng_ent_source_cls {
97 	FXRNG_HI,
98 	FXRNG_LO,
99 	FXRNG_GARBAGE,
100 };
101 struct fxrng_ent_cls {
102 	enum fxrng_ent_access_cls	entc_axx_cls;
103 	enum fxrng_ent_source_cls	entc_src_cls;
104 };
105 
106 static const struct fxrng_ent_cls fxrng_hi_pull = {
107 	.entc_axx_cls = FXRNG_PULL,
108 	.entc_src_cls = FXRNG_HI,
109 };
110 static const struct fxrng_ent_cls fxrng_hi_push = {
111 	.entc_axx_cls = FXRNG_PUSH,
112 	.entc_src_cls = FXRNG_HI,
113 };
114 static const struct fxrng_ent_cls fxrng_lo_push = {
115 	.entc_axx_cls = FXRNG_PUSH,
116 	.entc_src_cls = FXRNG_LO,
117 };
118 static const struct fxrng_ent_cls fxrng_garbage = {
119 	.entc_axx_cls = FXRNG_PUSH,
120 	.entc_src_cls = FXRNG_GARBAGE,
121 };
122 
123 /*
124  * This table is a mapping of randomdev's current source abstractions to the
125  * designations above; at some point, if the design seems reasonable, it would
126  * make more sense to pull this up into the abstraction layer instead.
127  */
128 static const struct fxrng_ent_char {
129 	const struct fxrng_ent_cls	*entc_cls;
130 } fxrng_ent_char[ENTROPYSOURCE] = {
131 	[RANDOM_CACHED] = {
132 		.entc_cls = &fxrng_hi_push,
133 	},
134 	[RANDOM_ATTACH] = {
135 		.entc_cls = &fxrng_lo_push,
136 	},
137 	[RANDOM_KEYBOARD] = {
138 		.entc_cls = &fxrng_lo_push,
139 	},
140 	[RANDOM_MOUSE] = {
141 		.entc_cls = &fxrng_lo_push,
142 	},
143 	[RANDOM_NET_TUN] = {
144 		.entc_cls = &fxrng_lo_push,
145 	},
146 	[RANDOM_NET_ETHER] = {
147 		.entc_cls = &fxrng_lo_push,
148 	},
149 	[RANDOM_NET_NG] = {
150 		.entc_cls = &fxrng_lo_push,
151 	},
152 	[RANDOM_INTERRUPT] = {
153 		.entc_cls = &fxrng_lo_push,
154 	},
155 	[RANDOM_SWI] = {
156 		.entc_cls = &fxrng_lo_push,
157 	},
158 	[RANDOM_FS_ATIME] = {
159 		.entc_cls = &fxrng_lo_push,
160 	},
161 	[RANDOM_UMA] = {
162 		.entc_cls = &fxrng_lo_push,
163 	},
164 	[RANDOM_CALLOUT] = {
165 		.entc_cls = &fxrng_lo_push,
166 	},
167 	[RANDOM_RANDOMDEV] = {
168 		.entc_cls = &fxrng_lo_push,
169 	},
170 	[RANDOM_PURE_SAFE] = {
171 		.entc_cls = &fxrng_hi_push,
172 	},
173 	[RANDOM_PURE_GLXSB] = {
174 		.entc_cls = &fxrng_hi_push,
175 	},
176 	[RANDOM_PURE_RDRAND] = {
177 		.entc_cls = &fxrng_hi_pull,
178 	},
179 	[RANDOM_PURE_RDSEED] = {
180 		.entc_cls = &fxrng_hi_pull,
181 	},
182 	[RANDOM_PURE_NEHEMIAH] = {
183 		.entc_cls = &fxrng_hi_pull,
184 	},
185 	[RANDOM_PURE_RNDTEST] = {
186 		.entc_cls = &fxrng_garbage,
187 	},
188 	[RANDOM_PURE_VIRTIO] = {
189 		.entc_cls = &fxrng_hi_pull,
190 	},
191 	[RANDOM_PURE_BROADCOM] = {
192 		.entc_cls = &fxrng_hi_push,
193 	},
194 	[RANDOM_PURE_CCP] = {
195 		.entc_cls = &fxrng_hi_pull,
196 	},
197 	[RANDOM_PURE_DARN] = {
198 		.entc_cls = &fxrng_hi_pull,
199 	},
200 	[RANDOM_PURE_TPM] = {
201 		.entc_cls = &fxrng_hi_push,
202 	},
203 	[RANDOM_PURE_VMGENID] = {
204 		.entc_cls = &fxrng_hi_push,
205 	},
206 	[RANDOM_PURE_QUALCOMM] = {
207 		.entc_cls = &fxrng_hi_pull,
208 	},
209 	[RANDOM_PURE_ARMV8] = {
210 		.entc_cls = &fxrng_hi_pull,
211 	},
212 	[RANDOM_PURE_ARM_TRNG] = {
213 		.entc_cls = &fxrng_hi_pull,
214 	},
215 };
216 
217 /* Useful for single-bit-per-source state. */
218 BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
219 
220 /* XXX Borrowed from not-yet-committed D22702. */
221 #ifndef BIT_TEST_SET_ATOMIC_ACQ
222 #define	BIT_TEST_SET_ATOMIC_ACQ(_s, n, p)	\
223 	(atomic_testandset_acq_long(		\
224 	    &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
225 #endif
226 #define	FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
227 	BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
228 
229 /* For special behavior on first-time entropy sources. (§ 3.1) */
230 static struct fxrng_bits __read_mostly fxrng_seen;
231 
232 /* For special behavior for high-entropy sources after a reseed. (§ 3.4) */
233 _Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
234 static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
235 
236 /* Entropy pools.  Lock order is ENT -> RNG(root) -> RNG(leaf). */
237 static struct mtx fxent_pool_lk;
238 MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
239 #define	FXENT_LOCK()		mtx_lock(&fxent_pool_lk)
240 #define	FXENT_UNLOCK()		mtx_unlock(&fxent_pool_lk)
241 #define	FXENT_ASSERT(rng)	mtx_assert(&fxent_pool_lk, MA_OWNED)
242 #define	FXENT_ASSERT_NOT(rng)	mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
243 static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
244 static unsigned __read_mostly fxent_nactpools = 1;
245 static struct timeout_task fxent_reseed_timer;
246 static int __read_mostly fxent_timer_ready;
247 
248 /*
249  * Track number of bytes of entropy harvested from high-quality sources prior
250  * to initial keying.  The idea is to collect more jitter entropy when fewer
251  * high-quality bytes were available and less if we had other good sources.  We
252  * want to provide always-on availability but don't necessarily have *any*
253  * great sources on some platforms.
254  *
255  * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
256  * make more sense to pull this up into the abstraction layer instead.
257  *
258  * Jitter entropy is unimplemented for now.
259  */
260 static unsigned long fxrng_preseed_ent;
261 
262 void
fxrng_pools_init(void)263 fxrng_pools_init(void)
264 {
265 	size_t i;
266 
267 	for (i = 0; i < nitems(fxent_pool); i++)
268 		fxrng_hash_init(&fxent_pool[i]);
269 }
270 
271 static inline bool
fxrng_hi_source(enum random_entropy_source src)272 fxrng_hi_source(enum random_entropy_source src)
273 {
274 	return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
275 }
276 
277 /*
278  * A racy check that this high-entropy source's event should contribute to
279  * pool0 on the basis of per-source byte count.  The check is racy for two
280  * reasons:
281  *   - Performance: The vast majority of the time, we've already taken 32 bytes
282  *     from any present high quality source and the racy check lets us avoid
283  *     dirtying the cache for the global array.
284  *   - Correctness: It's fine that the check is racy.  The failure modes are:
285  *     • False positive: We will detect when we take the lock.
286  *     • False negative: We still collect the entropy; it just won't be
287  *       preferentially placed in pool0 in this case.
288  */
289 static inline bool
fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)290 fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
291 {
292 	return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
293 	    FXENT_HI_SRC_POOL0_BYTES);
294 }
295 
296 /*
297  * Top level entropy processing API from randomdev.
298  *
299  * Invoked by the core randomdev subsystem both for preload entropy, "push"
300  * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
301  */
302 void
fxrng_event_processor(struct harvest_event * event)303 fxrng_event_processor(struct harvest_event *event)
304 {
305 	enum random_entropy_source src;
306 	unsigned pool;
307 	bool first_time, first_32;
308 
309 	src = event->he_source;
310 
311 	ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
312 	    "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
313 	    (unsigned)event->he_size, sizeof(event->he_entropy));
314 
315 	/*
316 	 * Zero bytes of source entropy doesn't count as observing this source
317 	 * for the first time.  We still harvest the counter entropy.
318 	 */
319 	first_time = event->he_size > 0 &&
320 	    !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
321 	if (__predict_false(first_time)) {
322 		/*
323 		 * "The first time [any source] provides entropy, it is used to
324 		 * directly reseed the root PRNG.  The entropy pools are
325 		 * bypassed." (§ 3.1)
326 		 *
327 		 * Unlike Windows, we cannot rely on loader(8) seed material
328 		 * being present, so we perform initial keying in the kernel.
329 		 * We use brng_generation 0 to represent an unkeyed state.
330 		 *
331 		 * Prior to initial keying, it doesn't make sense to try to mix
332 		 * the entropy directly with the root PRNG state, as the root
333 		 * PRNG is unkeyed.  Instead, we collect pre-keying dynamic
334 		 * entropy in pool0 and do not bump the root PRNG seed version
335 		 * or set its key.  Initial keying will incorporate pool0 and
336 		 * bump the brng_generation (seed version).
337 		 *
338 		 * After initial keying, we do directly mix in first-time
339 		 * entropy sources.  We use the root BRNG to generate 32 bytes
340 		 * and use fxrng_hash to mix it with the new entropy source and
341 		 * re-key with the first 256 bits of hash output.
342 		 */
343 		FXENT_LOCK();
344 		FXRNG_BRNG_LOCK(&fxrng_root);
345 		if (__predict_true(fxrng_root.brng_generation > 0)) {
346 			/* Bypass the pools: */
347 			FXENT_UNLOCK();
348 			fxrng_brng_src_reseed(event);
349 			FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
350 			return;
351 		}
352 
353 		/*
354 		 * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
355 		 * lock, so we only need to hold on to the pool lock to prevent
356 		 * initial keying without this entropy.
357 		 */
358 		FXRNG_BRNG_UNLOCK(&fxrng_root);
359 
360 		/* Root PRNG hasn't been keyed yet, just accumulate event. */
361 		fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
362 		    sizeof(event->he_somecounter));
363 		fxrng_hash_update(&fxent_pool[0], event->he_entropy,
364 		    event->he_size);
365 
366 		if (fxrng_hi_source(src)) {
367 			/* Prevent overflow. */
368 			if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
369 				fxrng_preseed_ent += event->he_size;
370 		}
371 		FXENT_UNLOCK();
372 		return;
373 	}
374 	/* !first_time */
375 
376 	/*
377 	 * "The first 32 bytes produced by a high entropy source after a reseed
378 	 * from the pools is always put in pool 0." (§ 3.4)
379 	 *
380 	 * The first-32-byte tracking data in fxrng_reseed_seen is reset in
381 	 * fxent_timer_reseed_npools() below.
382 	 */
383 	first_32 = event->he_size > 0 &&
384 	    fxrng_hi_source(src) &&
385 	    atomic_load_acq_int(&fxent_nactpools) > 1 &&
386 	    fxrng_hi_pool0_eligible_racy(src);
387 	if (__predict_false(first_32)) {
388 		unsigned rem, seen;
389 
390 		FXENT_LOCK();
391 		seen = fxrng_reseed_seen[src];
392 		if (seen == FXENT_HI_SRC_POOL0_BYTES)
393 			goto round_robin;
394 
395 		rem = FXENT_HI_SRC_POOL0_BYTES - seen;
396 		rem = MIN(rem, event->he_size);
397 
398 		fxrng_reseed_seen[src] = seen + rem;
399 
400 		/*
401 		 * We put 'rem' bytes in pool0, and any remaining bytes are
402 		 * round-robin'd across other pools.
403 		 */
404 		fxrng_hash_update(&fxent_pool[0],
405 		    ((uint8_t *)event->he_entropy) + event->he_size - rem,
406 		    rem);
407 		if (rem == event->he_size) {
408 			fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
409 			    sizeof(event->he_somecounter));
410 			FXENT_UNLOCK();
411 			return;
412 		}
413 
414 		/*
415 		 * If fewer bytes were needed than this even provied, We only
416 		 * take the last rem bytes of the entropy buffer and leave the
417 		 * timecounter to be round-robin'd with the remaining entropy.
418 		 */
419 		event->he_size -= rem;
420 		goto round_robin;
421 	}
422 	/* !first_32 */
423 
424 	FXENT_LOCK();
425 
426 round_robin:
427 	FXENT_ASSERT();
428 	pool = event->he_destination % fxent_nactpools;
429 	fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
430 	    event->he_size);
431 	fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
432 	    sizeof(event->he_somecounter));
433 
434 	if (__predict_false(fxrng_hi_source(src) &&
435 	    atomic_load_acq_64(&fxrng_root_generation) == 0)) {
436 		/* Prevent overflow. */
437 		if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
438 			fxrng_preseed_ent += event->he_size;
439 	}
440 	FXENT_UNLOCK();
441 }
442 
443 /*
444  * Top level "seeded" API/signal from randomdev.
445  *
446  * This is our warning that a request is coming: we need to be seeded.  In
447  * fenestrasX, a request for random bytes _never_ fails.  "We (ed: ditto) have
448  * observed that there are many callers that never check for the error code,
449  * even if they are generating cryptographic key material." (§ 1.6)
450  *
451  * If we returned 'false', both read_random(9) and chacha20_randomstir()
452  * (arc4random(9)) will blindly charge on with something almost certainly worse
453  * than what we've got, or are able to get quickly enough.
454  */
455 bool
fxrng_alg_seeded(void)456 fxrng_alg_seeded(void)
457 {
458 	uint8_t hash[FXRNG_HASH_SZ];
459 	sbintime_t sbt;
460 
461 	/* The vast majority of the time, we expect to already be seeded. */
462 	if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
463 		return (true);
464 
465 	/*
466 	 * Take the lock and recheck; only one thread needs to do the initial
467 	 * seeding work.
468 	 */
469 	FXENT_LOCK();
470 	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
471 		FXENT_UNLOCK();
472 		return (true);
473 	}
474 	/* XXX Any one-off initial seeding goes here. */
475 
476 	fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
477 	fxrng_hash_init(&fxent_pool[0]);
478 
479 	fxrng_brng_reseed(hash, sizeof(hash));
480 	FXENT_UNLOCK();
481 
482 	randomdev_unblock();
483 	explicit_bzero(hash, sizeof(hash));
484 
485 	/*
486 	 * This may be called too early for taskqueue_thread to be initialized.
487 	 * fxent_pool_timer_init will detect if we've already unblocked and
488 	 * queue the first timer reseed at that point.
489 	 */
490 	if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
491 		sbt = SBT_1S;
492 		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
493 		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
494 	}
495 	return (true);
496 }
497 
498 /*
499  * Timer-based reseeds and pool expansion.
500  */
501 static void
fxent_timer_reseed_npools(unsigned n)502 fxent_timer_reseed_npools(unsigned n)
503 {
504 	/*
505 	 * 64 * 8 => moderately large 512 bytes.  Could be static, as we are
506 	 * only used in a static context.  On the other hand, this is in
507 	 * threadqueue TASK context and we're likely nearly at top of stack
508 	 * already.
509 	 */
510 	uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
511 	unsigned i;
512 
513 	ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
514 
515 	FXENT_ASSERT();
516 	/*
517 	 * Collect entropy from pools 0..n-1 by concatenating the output hashes
518 	 * and then feeding them into fxrng_brng_reseed, which will hash the
519 	 * aggregate together with the current root PRNG keystate to produce a
520 	 * new key.  It will also bump the global generation counter
521 	 * appropriately.
522 	 */
523 	for (i = 0; i < n; i++) {
524 		fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
525 		    FXRNG_HASH_SZ);
526 		fxrng_hash_init(&fxent_pool[i]);
527 	}
528 
529 	fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
530 	explicit_bzero(hash, n * FXRNG_HASH_SZ);
531 
532 	/*
533 	 * "The first 32 bytes produced by a high entropy source after a reseed
534 	 * from the pools is always put in pool 0." (§ 3.4)
535 	 *
536 	 * So here we reset the tracking (somewhat naively given the majority
537 	 * of sources on most machines are not what we consider "high", but at
538 	 * 32 bytes it's smaller than a cache line), so the next 32 bytes are
539 	 * prioritized into pool0.
540 	 *
541 	 * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
542 	 */
543 	memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
544 	FXENT_ASSERT();
545 }
546 
547 static void
fxent_timer_reseed(void * ctx __unused,int pending __unused)548 fxent_timer_reseed(void *ctx __unused, int pending __unused)
549 {
550 	static unsigned reseed_intvl_sec = 1;
551 	/* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
552 	static uint64_t reseed_number = 1;
553 
554 	unsigned next_ival, i, k;
555 	sbintime_t sbt;
556 
557 	if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
558 		next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
559 		if (next_ival > FXENT_RESEED_INTVL_MAX)
560 			next_ival = FXENT_RESEED_INTVL_MAX;
561 		FXENT_LOCK();
562 		fxent_timer_reseed_npools(1);
563 		FXENT_UNLOCK();
564 	} else {
565 		/*
566 		 * The creation of entropy pools beyond 0 is enabled when the
567 		 * reseed interval hits the maximum. (§ 3.3)
568 		 */
569 		next_ival = reseed_intvl_sec;
570 
571 		/*
572 		 * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
573 		 * general, pool n..0 every 3^n reseeds.
574 		 */
575 		k = reseed_number;
576 		reseed_number++;
577 
578 		/* Count how many pools, from [0, i), to use for reseed. */
579 		for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
580 			if ((k % FXENT_RESEED_BASE) != 0)
581 				break;
582 			k /= FXENT_RESEED_BASE;
583 		}
584 
585 		/*
586 		 * If we haven't activated pool i yet, activate it and only
587 		 * reseed from [0, i-1).  (§ 3.3)
588 		 */
589 		FXENT_LOCK();
590 		if (i == fxent_nactpools + 1) {
591 			fxent_timer_reseed_npools(fxent_nactpools);
592 			fxent_nactpools++;
593 		} else {
594 			/* Just reseed from [0, i). */
595 			fxent_timer_reseed_npools(i);
596 		}
597 		FXENT_UNLOCK();
598 	}
599 
600 	/* Schedule the next reseed. */
601 	sbt = next_ival * SBT_1S;
602 	taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
603 	    -sbt, (sbt / 3), C_PREL(2));
604 
605 	reseed_intvl_sec = next_ival;
606 }
607 
608 static void
fxent_pool_timer_init(void * dummy __unused)609 fxent_pool_timer_init(void *dummy __unused)
610 {
611 	sbintime_t sbt;
612 
613 	TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
614 	    fxent_timer_reseed, NULL);
615 
616 	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
617 		sbt = SBT_1S;
618 		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
619 		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
620 	}
621 	atomic_store_rel_int(&fxent_timer_ready, 1);
622 }
623 /* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
624 SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
625     fxent_pool_timer_init, NULL);
626