xref: /freebsd/sys/dev/random/fenestrasX/fx_pool.c (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/domainset.h>
31 #include <sys/fail.h>
32 #include <sys/limits.h>
33 #include <sys/lock.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/queue.h>
38 #include <sys/random.h>
39 #include <sys/sdt.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 #include <sys/taskqueue.h>
43 
44 #include <machine/atomic.h>
45 #include <machine/smp.h>
46 
47 #include <dev/random/randomdev.h>
48 #include <dev/random/random_harvestq.h>
49 
50 #include <dev/random/fenestrasX/fx_brng.h>
51 #include <dev/random/fenestrasX/fx_hash.h>
52 #include <dev/random/fenestrasX/fx_pool.h>
53 #include <dev/random/fenestrasX/fx_priv.h>
54 #include <dev/random/fenestrasX/fx_pub.h>
55 
56 /*
57  * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2)
58  */
59 #define	FXENT_RESSED_INTVL_GFACT	3
60 #define	FXENT_RESEED_INTVL_MAX		3600
61 
62 /*
63  * Pool reseed schedule.  Initially, only pool 0 is active.  Until the timer
64  * interval reaches INTVL_MAX, only pool 0 is used.
65  *
66  * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
67  * (if active) every 3^k timer reseeds.  (§ 3.3)
68  *
69  * (Entropy harvesting only round robins across active pools.)
70  */
71 #define	FXENT_RESEED_BASE		3
72 
73 /*
74  * Number of bytes from high quality sources to allocate to pool 0 before
75  * normal round-robin allocation after each timer reseed. (§ 3.4)
76  */
77 #define	FXENT_HI_SRC_POOL0_BYTES	32
78 
79 /*
80  * § 3.1
81  *
82  * Low sources provide unconditioned entropy, such as mouse movements; high
83  * sources are assumed to provide high-quality random bytes.  Pull sources are
84  * those which can be polled, i.e., anything randomdev calls a "random_source."
85  *
86  * In the whitepaper, low sources are pull.  For us, at least in the existing
87  * design, low-quality sources push into some global ring buffer and then get
88  * forwarded into the RNG by a thread that continually polls.  Presumably their
89  * design batches low entopy signals in some way (SHA512?) and only requests
90  * them dynamically on reseed.  I'm not sure what the benefit is vs feeding
91  * into the pools directly.
92  */
93 enum fxrng_ent_access_cls {
94 	FXRNG_PUSH,
95 	FXRNG_PULL,
96 };
97 enum fxrng_ent_source_cls {
98 	FXRNG_HI,
99 	FXRNG_LO,
100 	FXRNG_GARBAGE,
101 };
102 struct fxrng_ent_cls {
103 	enum fxrng_ent_access_cls	entc_axx_cls;
104 	enum fxrng_ent_source_cls	entc_src_cls;
105 };
106 
107 static const struct fxrng_ent_cls fxrng_hi_pull = {
108 	.entc_axx_cls = FXRNG_PULL,
109 	.entc_src_cls = FXRNG_HI,
110 };
111 static const struct fxrng_ent_cls fxrng_hi_push = {
112 	.entc_axx_cls = FXRNG_PUSH,
113 	.entc_src_cls = FXRNG_HI,
114 };
115 static const struct fxrng_ent_cls fxrng_lo_push = {
116 	.entc_axx_cls = FXRNG_PUSH,
117 	.entc_src_cls = FXRNG_LO,
118 };
119 static const struct fxrng_ent_cls fxrng_garbage = {
120 	.entc_axx_cls = FXRNG_PUSH,
121 	.entc_src_cls = FXRNG_GARBAGE,
122 };
123 
124 /*
125  * This table is a mapping of randomdev's current source abstractions to the
126  * designations above; at some point, if the design seems reasonable, it would
127  * make more sense to pull this up into the abstraction layer instead.
128  */
129 static const struct fxrng_ent_char {
130 	const struct fxrng_ent_cls	*entc_cls;
131 } fxrng_ent_char[ENTROPYSOURCE] = {
132 	[RANDOM_CACHED] = {
133 		.entc_cls = &fxrng_hi_push,
134 	},
135 	[RANDOM_ATTACH] = {
136 		.entc_cls = &fxrng_lo_push,
137 	},
138 	[RANDOM_KEYBOARD] = {
139 		.entc_cls = &fxrng_lo_push,
140 	},
141 	[RANDOM_MOUSE] = {
142 		.entc_cls = &fxrng_lo_push,
143 	},
144 	[RANDOM_NET_TUN] = {
145 		.entc_cls = &fxrng_lo_push,
146 	},
147 	[RANDOM_NET_ETHER] = {
148 		.entc_cls = &fxrng_lo_push,
149 	},
150 	[RANDOM_NET_NG] = {
151 		.entc_cls = &fxrng_lo_push,
152 	},
153 	[RANDOM_INTERRUPT] = {
154 		.entc_cls = &fxrng_lo_push,
155 	},
156 	[RANDOM_SWI] = {
157 		.entc_cls = &fxrng_lo_push,
158 	},
159 	[RANDOM_FS_ATIME] = {
160 		.entc_cls = &fxrng_lo_push,
161 	},
162 	[RANDOM_UMA] = {
163 		.entc_cls = &fxrng_lo_push,
164 	},
165 	[RANDOM_CALLOUT] = {
166 		.entc_cls = &fxrng_lo_push,
167 	},
168 	[RANDOM_PURE_OCTEON] = {
169 		.entc_cls = &fxrng_hi_push,	/* Could be made pull. */
170 	},
171 	[RANDOM_PURE_SAFE] = {
172 		.entc_cls = &fxrng_hi_push,
173 	},
174 	[RANDOM_PURE_GLXSB] = {
175 		.entc_cls = &fxrng_hi_push,
176 	},
177 	[RANDOM_PURE_HIFN] = {
178 		.entc_cls = &fxrng_hi_push,
179 	},
180 	[RANDOM_PURE_RDRAND] = {
181 		.entc_cls = &fxrng_hi_pull,
182 	},
183 	[RANDOM_PURE_NEHEMIAH] = {
184 		.entc_cls = &fxrng_hi_pull,
185 	},
186 	[RANDOM_PURE_RNDTEST] = {
187 		.entc_cls = &fxrng_garbage,
188 	},
189 	[RANDOM_PURE_VIRTIO] = {
190 		.entc_cls = &fxrng_hi_pull,
191 	},
192 	[RANDOM_PURE_BROADCOM] = {
193 		.entc_cls = &fxrng_hi_push,
194 	},
195 	[RANDOM_PURE_CCP] = {
196 		.entc_cls = &fxrng_hi_pull,
197 	},
198 	[RANDOM_PURE_DARN] = {
199 		.entc_cls = &fxrng_hi_pull,
200 	},
201 	[RANDOM_PURE_TPM] = {
202 		.entc_cls = &fxrng_hi_push,
203 	},
204 	[RANDOM_PURE_VMGENID] = {
205 		.entc_cls = &fxrng_hi_push,
206 	},
207 };
208 
209 /* Useful for single-bit-per-source state. */
210 BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
211 
212 /* XXX Borrowed from not-yet-committed D22702. */
213 #ifndef BIT_TEST_SET_ATOMIC_ACQ
214 #define	BIT_TEST_SET_ATOMIC_ACQ(_s, n, p)	\
215 	(atomic_testandset_acq_long(		\
216 	    &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
217 #endif
218 #define	FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
219 	BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
220 
221 /* For special behavior on first-time entropy sources. (§ 3.1) */
222 static struct fxrng_bits __read_mostly fxrng_seen;
223 
224 /* For special behavior for high-entropy sources after a reseed. (§ 3.4) */
225 _Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
226 static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
227 
228 /* Entropy pools.  Lock order is ENT -> RNG(root) -> RNG(leaf). */
229 static struct mtx fxent_pool_lk;
230 MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
231 #define	FXENT_LOCK()		mtx_lock(&fxent_pool_lk)
232 #define	FXENT_UNLOCK()		mtx_unlock(&fxent_pool_lk)
233 #define	FXENT_ASSERT(rng)	mtx_assert(&fxent_pool_lk, MA_OWNED)
234 #define	FXENT_ASSERT_NOT(rng)	mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
235 static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
236 static unsigned __read_mostly fxent_nactpools = 1;
237 static struct timeout_task fxent_reseed_timer;
238 static int __read_mostly fxent_timer_ready;
239 
240 /*
241  * Track number of bytes of entropy harvested from high-quality sources prior
242  * to initial keying.  The idea is to collect more jitter entropy when fewer
243  * high-quality bytes were available and less if we had other good sources.  We
244  * want to provide always-on availability but don't necessarily have *any*
245  * great sources on some platforms.
246  *
247  * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
248  * make more sense to pull this up into the abstraction layer instead.
249  *
250  * Jitter entropy is unimplemented for now.
251  */
252 static unsigned long fxrng_preseed_ent;
253 
254 void
255 fxrng_pools_init(void)
256 {
257 	size_t i;
258 
259 	for (i = 0; i < nitems(fxent_pool); i++)
260 		fxrng_hash_init(&fxent_pool[i]);
261 }
262 
263 static inline bool
264 fxrng_hi_source(enum random_entropy_source src)
265 {
266 	return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
267 }
268 
269 /*
270  * A racy check that this high-entropy source's event should contribute to
271  * pool0 on the basis of per-source byte count.  The check is racy for two
272  * reasons:
273  *   - Performance: The vast majority of the time, we've already taken 32 bytes
274  *     from any present high quality source and the racy check lets us avoid
275  *     dirtying the cache for the global array.
276  *   - Correctness: It's fine that the check is racy.  The failure modes are:
277  *     • False positive: We will detect when we take the lock.
278  *     • False negative: We still collect the entropy; it just won't be
279  *       preferentially placed in pool0 in this case.
280  */
281 static inline bool
282 fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
283 {
284 	return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
285 	    FXENT_HI_SRC_POOL0_BYTES);
286 }
287 
288 /*
289  * Top level entropy processing API from randomdev.
290  *
291  * Invoked by the core randomdev subsystem both for preload entropy, "push"
292  * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
293  */
294 void
295 fxrng_event_processor(struct harvest_event *event)
296 {
297 	enum random_entropy_source src;
298 	unsigned pool;
299 	bool first_time, first_32;
300 
301 	src = event->he_source;
302 
303 	ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
304 	    "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
305 	    (unsigned)event->he_size, sizeof(event->he_entropy));
306 
307 	/*
308 	 * Zero bytes of source entropy doesn't count as observing this source
309 	 * for the first time.  We still harvest the counter entropy.
310 	 */
311 	first_time = event->he_size > 0 &&
312 	    !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
313 	if (__predict_false(first_time)) {
314 		/*
315 		 * "The first time [any source] provides entropy, it is used to
316 		 * directly reseed the root PRNG.  The entropy pools are
317 		 * bypassed." (§ 3.1)
318 		 *
319 		 * Unlike Windows, we cannot rely on loader(8) seed material
320 		 * being present, so we perform initial keying in the kernel.
321 		 * We use brng_generation 0 to represent an unkeyed state.
322 		 *
323 		 * Prior to initial keying, it doesn't make sense to try to mix
324 		 * the entropy directly with the root PRNG state, as the root
325 		 * PRNG is unkeyed.  Instead, we collect pre-keying dynamic
326 		 * entropy in pool0 and do not bump the root PRNG seed version
327 		 * or set its key.  Initial keying will incorporate pool0 and
328 		 * bump the brng_generation (seed version).
329 		 *
330 		 * After initial keying, we do directly mix in first-time
331 		 * entropy sources.  We use the root BRNG to generate 32 bytes
332 		 * and use fxrng_hash to mix it with the new entropy source and
333 		 * re-key with the first 256 bits of hash output.
334 		 */
335 		FXENT_LOCK();
336 		FXRNG_BRNG_LOCK(&fxrng_root);
337 		if (__predict_true(fxrng_root.brng_generation > 0)) {
338 			/* Bypass the pools: */
339 			FXENT_UNLOCK();
340 			fxrng_brng_src_reseed(event);
341 			FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
342 			return;
343 		}
344 
345 		/*
346 		 * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
347 		 * lock, so we only need to hold on to the pool lock to prevent
348 		 * initial keying without this entropy.
349 		 */
350 		FXRNG_BRNG_UNLOCK(&fxrng_root);
351 
352 		/* Root PRNG hasn't been keyed yet, just accumulate event. */
353 		fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
354 		    sizeof(event->he_somecounter));
355 		fxrng_hash_update(&fxent_pool[0], event->he_entropy,
356 		    event->he_size);
357 
358 		if (fxrng_hi_source(src)) {
359 			/* Prevent overflow. */
360 			if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
361 				fxrng_preseed_ent += event->he_size;
362 		}
363 		FXENT_UNLOCK();
364 		return;
365 	}
366 	/* !first_time */
367 
368 	/*
369 	 * "The first 32 bytes produced by a high entropy source after a reseed
370 	 * from the pools is always put in pool 0." (§ 3.4)
371 	 *
372 	 * The first-32-byte tracking data in fxrng_reseed_seen is reset in
373 	 * fxent_timer_reseed_npools() below.
374 	 */
375 	first_32 = event->he_size > 0 &&
376 	    fxrng_hi_source(src) &&
377 	    atomic_load_acq_int(&fxent_nactpools) > 1 &&
378 	    fxrng_hi_pool0_eligible_racy(src);
379 	if (__predict_false(first_32)) {
380 		unsigned rem, seen;
381 
382 		FXENT_LOCK();
383 		seen = fxrng_reseed_seen[src];
384 		if (seen == FXENT_HI_SRC_POOL0_BYTES)
385 			goto round_robin;
386 
387 		rem = FXENT_HI_SRC_POOL0_BYTES - seen;
388 		rem = MIN(rem, event->he_size);
389 
390 		fxrng_reseed_seen[src] = seen + rem;
391 
392 		/*
393 		 * We put 'rem' bytes in pool0, and any remaining bytes are
394 		 * round-robin'd across other pools.
395 		 */
396 		fxrng_hash_update(&fxent_pool[0],
397 		    ((uint8_t *)event->he_entropy) + event->he_size - rem,
398 		    rem);
399 		if (rem == event->he_size) {
400 			fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
401 			    sizeof(event->he_somecounter));
402 			FXENT_UNLOCK();
403 			return;
404 		}
405 
406 		/*
407 		 * If fewer bytes were needed than this even provied, We only
408 		 * take the last rem bytes of the entropy buffer and leave the
409 		 * timecounter to be round-robin'd with the remaining entropy.
410 		 */
411 		event->he_size -= rem;
412 		goto round_robin;
413 	}
414 	/* !first_32 */
415 
416 	FXENT_LOCK();
417 
418 round_robin:
419 	FXENT_ASSERT();
420 	pool = event->he_destination % fxent_nactpools;
421 	fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
422 	    event->he_size);
423 	fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
424 	    sizeof(event->he_somecounter));
425 
426 	if (__predict_false(fxrng_hi_source(src) &&
427 	    atomic_load_acq_64(&fxrng_root_generation) == 0)) {
428 		/* Prevent overflow. */
429 		if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
430 			fxrng_preseed_ent += event->he_size;
431 	}
432 	FXENT_UNLOCK();
433 }
434 
435 /*
436  * Top level "seeded" API/signal from randomdev.
437  *
438  * This is our warning that a request is coming: we need to be seeded.  In
439  * fenestrasX, a request for random bytes _never_ fails.  "We (ed: ditto) have
440  * observed that there are many callers that never check for the error code,
441  * even if they are generating cryptographic key material." (§ 1.6)
442  *
443  * If we returned 'false', both read_random(9) and chacha20_randomstir()
444  * (arc4random(9)) will blindly charge on with something almost certainly worse
445  * than what we've got, or are able to get quickly enough.
446  */
447 bool
448 fxrng_alg_seeded(void)
449 {
450 	uint8_t hash[FXRNG_HASH_SZ];
451 	sbintime_t sbt;
452 
453 	/* The vast majority of the time, we expect to already be seeded. */
454 	if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
455 		return (true);
456 
457 	/*
458 	 * Take the lock and recheck; only one thread needs to do the initial
459 	 * seeding work.
460 	 */
461 	FXENT_LOCK();
462 	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
463 		FXENT_UNLOCK();
464 		return (true);
465 	}
466 	/* XXX Any one-off initial seeding goes here. */
467 
468 	fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
469 	fxrng_hash_init(&fxent_pool[0]);
470 
471 	fxrng_brng_reseed(hash, sizeof(hash));
472 	FXENT_UNLOCK();
473 
474 	randomdev_unblock();
475 	explicit_bzero(hash, sizeof(hash));
476 
477 	/*
478 	 * This may be called too early for taskqueue_thread to be initialized.
479 	 * fxent_pool_timer_init will detect if we've already unblocked and
480 	 * queue the first timer reseed at that point.
481 	 */
482 	if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
483 		sbt = SBT_1S;
484 		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
485 		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
486 	}
487 	return (true);
488 }
489 
490 /*
491  * Timer-based reseeds and pool expansion.
492  */
493 static void
494 fxent_timer_reseed_npools(unsigned n)
495 {
496 	/*
497 	 * 64 * 8 => moderately large 512 bytes.  Could be static, as we are
498 	 * only used in a static context.  On the other hand, this is in
499 	 * threadqueue TASK context and we're likely nearly at top of stack
500 	 * already.
501 	 */
502 	uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
503 	unsigned i;
504 
505 	ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
506 
507 	FXENT_ASSERT();
508 	/*
509 	 * Collect entropy from pools 0..n-1 by concatenating the output hashes
510 	 * and then feeding them into fxrng_brng_reseed, which will hash the
511 	 * aggregate together with the current root PRNG keystate to produce a
512 	 * new key.  It will also bump the global generation counter
513 	 * appropriately.
514 	 */
515 	for (i = 0; i < n; i++) {
516 		fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
517 		    FXRNG_HASH_SZ);
518 		fxrng_hash_init(&fxent_pool[i]);
519 	}
520 
521 	fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
522 	explicit_bzero(hash, n * FXRNG_HASH_SZ);
523 
524 	/*
525 	 * "The first 32 bytes produced by a high entropy source after a reseed
526 	 * from the pools is always put in pool 0." (§ 3.4)
527 	 *
528 	 * So here we reset the tracking (somewhat naively given the majority
529 	 * of sources on most machines are not what we consider "high", but at
530 	 * 32 bytes it's smaller than a cache line), so the next 32 bytes are
531 	 * prioritized into pool0.
532 	 *
533 	 * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
534 	 */
535 	memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
536 	FXENT_ASSERT();
537 }
538 
539 static void
540 fxent_timer_reseed(void *ctx __unused, int pending __unused)
541 {
542 	static unsigned reseed_intvl_sec = 1;
543 	/* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
544 	static uint64_t reseed_number = 1;
545 
546 	unsigned next_ival, i, k;
547 	sbintime_t sbt;
548 
549 	if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
550 		next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
551 		if (next_ival > FXENT_RESEED_INTVL_MAX)
552 			next_ival = FXENT_RESEED_INTVL_MAX;
553 		FXENT_LOCK();
554 		fxent_timer_reseed_npools(1);
555 		FXENT_UNLOCK();
556 	} else {
557 		/*
558 		 * The creation of entropy pools beyond 0 is enabled when the
559 		 * reseed interval hits the maximum. (§ 3.3)
560 		 */
561 		next_ival = reseed_intvl_sec;
562 
563 		/*
564 		 * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
565 		 * general, pool n..0 every 3^n reseeds.
566 		 */
567 		k = reseed_number;
568 		reseed_number++;
569 
570 		/* Count how many pools, from [0, i), to use for reseed. */
571 		for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
572 			if ((k % FXENT_RESEED_BASE) != 0)
573 				break;
574 			k /= FXENT_RESEED_BASE;
575 		}
576 
577 		/*
578 		 * If we haven't activated pool i yet, activate it and only
579 		 * reseed from [0, i-1).  (§ 3.3)
580 		 */
581 		FXENT_LOCK();
582 		if (i == fxent_nactpools + 1) {
583 			fxent_timer_reseed_npools(fxent_nactpools);
584 			fxent_nactpools++;
585 		} else {
586 			/* Just reseed from [0, i). */
587 			fxent_timer_reseed_npools(i);
588 		}
589 		FXENT_UNLOCK();
590 	}
591 
592 	/* Schedule the next reseed. */
593 	sbt = next_ival * SBT_1S;
594 	taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
595 	    -sbt, (sbt / 3), C_PREL(2));
596 
597 	reseed_intvl_sec = next_ival;
598 }
599 
600 static void
601 fxent_pool_timer_init(void *dummy __unused)
602 {
603 	sbintime_t sbt;
604 
605 	TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
606 	    fxent_timer_reseed, NULL);
607 
608 	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
609 		sbt = SBT_1S;
610 		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
611 		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
612 	}
613 	atomic_store_rel_int(&fxent_timer_ready, 1);
614 }
615 /* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
616 SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
617     fxent_pool_timer_init, NULL);
618