1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/domainset.h>
30 #include <sys/fail.h>
31 #include <sys/limits.h>
32 #include <sys/lock.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/queue.h>
37 #include <sys/random.h>
38 #include <sys/sdt.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <sys/taskqueue.h>
42
43 #include <machine/atomic.h>
44 #include <machine/smp.h>
45
46 #include <dev/random/randomdev.h>
47 #include <dev/random/random_harvestq.h>
48
49 #include <dev/random/fenestrasX/fx_brng.h>
50 #include <dev/random/fenestrasX/fx_hash.h>
51 #include <dev/random/fenestrasX/fx_pool.h>
52 #include <dev/random/fenestrasX/fx_priv.h>
53 #include <dev/random/fenestrasX/fx_pub.h>
54
55 /*
56 * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2)
57 */
58 #define FXENT_RESSED_INTVL_GFACT 3
59 #define FXENT_RESEED_INTVL_MAX 3600
60
61 /*
62 * Pool reseed schedule. Initially, only pool 0 is active. Until the timer
63 * interval reaches INTVL_MAX, only pool 0 is used.
64 *
65 * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
66 * (if active) every 3^k timer reseeds. (§ 3.3)
67 *
68 * (Entropy harvesting only round robins across active pools.)
69 */
70 #define FXENT_RESEED_BASE 3
71
72 /*
73 * Number of bytes from high quality sources to allocate to pool 0 before
74 * normal round-robin allocation after each timer reseed. (§ 3.4)
75 */
76 #define FXENT_HI_SRC_POOL0_BYTES 32
77
78 /*
79 * § 3.1
80 *
81 * Low sources provide unconditioned entropy, such as mouse movements; high
82 * sources are assumed to provide high-quality random bytes. Pull sources are
83 * those which can be polled, i.e., anything randomdev calls a "random_source."
84 *
85 * In the whitepaper, low sources are pull. For us, at least in the existing
86 * design, low-quality sources push into some global ring buffer and then get
87 * forwarded into the RNG by a thread that continually polls. Presumably their
88 * design batches low entopy signals in some way (SHA512?) and only requests
89 * them dynamically on reseed. I'm not sure what the benefit is vs feeding
90 * into the pools directly.
91 */
92 enum fxrng_ent_access_cls {
93 FXRNG_PUSH,
94 FXRNG_PULL,
95 };
96 enum fxrng_ent_source_cls {
97 FXRNG_HI,
98 FXRNG_LO,
99 FXRNG_GARBAGE,
100 };
101 struct fxrng_ent_cls {
102 enum fxrng_ent_access_cls entc_axx_cls;
103 enum fxrng_ent_source_cls entc_src_cls;
104 };
105
106 static const struct fxrng_ent_cls fxrng_hi_pull = {
107 .entc_axx_cls = FXRNG_PULL,
108 .entc_src_cls = FXRNG_HI,
109 };
110 static const struct fxrng_ent_cls fxrng_hi_push = {
111 .entc_axx_cls = FXRNG_PUSH,
112 .entc_src_cls = FXRNG_HI,
113 };
114 static const struct fxrng_ent_cls fxrng_lo_push = {
115 .entc_axx_cls = FXRNG_PUSH,
116 .entc_src_cls = FXRNG_LO,
117 };
118 static const struct fxrng_ent_cls fxrng_garbage = {
119 .entc_axx_cls = FXRNG_PUSH,
120 .entc_src_cls = FXRNG_GARBAGE,
121 };
122
123 /*
124 * This table is a mapping of randomdev's current source abstractions to the
125 * designations above; at some point, if the design seems reasonable, it would
126 * make more sense to pull this up into the abstraction layer instead.
127 */
128 static const struct fxrng_ent_char {
129 const struct fxrng_ent_cls *entc_cls;
130 } fxrng_ent_char[ENTROPYSOURCE] = {
131 [RANDOM_CACHED] = {
132 .entc_cls = &fxrng_hi_push,
133 },
134 [RANDOM_ATTACH] = {
135 .entc_cls = &fxrng_lo_push,
136 },
137 [RANDOM_KEYBOARD] = {
138 .entc_cls = &fxrng_lo_push,
139 },
140 [RANDOM_MOUSE] = {
141 .entc_cls = &fxrng_lo_push,
142 },
143 [RANDOM_NET_TUN] = {
144 .entc_cls = &fxrng_lo_push,
145 },
146 [RANDOM_NET_ETHER] = {
147 .entc_cls = &fxrng_lo_push,
148 },
149 [RANDOM_NET_NG] = {
150 .entc_cls = &fxrng_lo_push,
151 },
152 [RANDOM_INTERRUPT] = {
153 .entc_cls = &fxrng_lo_push,
154 },
155 [RANDOM_SWI] = {
156 .entc_cls = &fxrng_lo_push,
157 },
158 [RANDOM_FS_ATIME] = {
159 .entc_cls = &fxrng_lo_push,
160 },
161 [RANDOM_UMA] = {
162 .entc_cls = &fxrng_lo_push,
163 },
164 [RANDOM_CALLOUT] = {
165 .entc_cls = &fxrng_lo_push,
166 },
167 [RANDOM_RANDOMDEV] = {
168 .entc_cls = &fxrng_lo_push,
169 },
170 [RANDOM_PURE_SAFE] = {
171 .entc_cls = &fxrng_hi_push,
172 },
173 [RANDOM_PURE_GLXSB] = {
174 .entc_cls = &fxrng_hi_push,
175 },
176 [RANDOM_PURE_HIFN] = {
177 .entc_cls = &fxrng_hi_push,
178 },
179 [RANDOM_PURE_RDRAND] = {
180 .entc_cls = &fxrng_hi_pull,
181 },
182 [RANDOM_PURE_RDSEED] = {
183 .entc_cls = &fxrng_hi_pull,
184 },
185 [RANDOM_PURE_NEHEMIAH] = {
186 .entc_cls = &fxrng_hi_pull,
187 },
188 [RANDOM_PURE_RNDTEST] = {
189 .entc_cls = &fxrng_garbage,
190 },
191 [RANDOM_PURE_VIRTIO] = {
192 .entc_cls = &fxrng_hi_pull,
193 },
194 [RANDOM_PURE_BROADCOM] = {
195 .entc_cls = &fxrng_hi_push,
196 },
197 [RANDOM_PURE_CCP] = {
198 .entc_cls = &fxrng_hi_pull,
199 },
200 [RANDOM_PURE_DARN] = {
201 .entc_cls = &fxrng_hi_pull,
202 },
203 [RANDOM_PURE_TPM] = {
204 .entc_cls = &fxrng_hi_push,
205 },
206 [RANDOM_PURE_VMGENID] = {
207 .entc_cls = &fxrng_hi_push,
208 },
209 [RANDOM_PURE_QUALCOMM] = {
210 .entc_cls = &fxrng_hi_pull,
211 },
212 [RANDOM_PURE_ARMV8] = {
213 .entc_cls = &fxrng_hi_pull,
214 },
215 [RANDOM_PURE_ARM_TRNG] = {
216 .entc_cls = &fxrng_hi_pull,
217 },
218 };
219
220 /* Useful for single-bit-per-source state. */
221 BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
222
223 /* XXX Borrowed from not-yet-committed D22702. */
224 #ifndef BIT_TEST_SET_ATOMIC_ACQ
225 #define BIT_TEST_SET_ATOMIC_ACQ(_s, n, p) \
226 (atomic_testandset_acq_long( \
227 &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
228 #endif
229 #define FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
230 BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
231
232 /* For special behavior on first-time entropy sources. (§ 3.1) */
233 static struct fxrng_bits __read_mostly fxrng_seen;
234
235 /* For special behavior for high-entropy sources after a reseed. (§ 3.4) */
236 _Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
237 static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
238
239 /* Entropy pools. Lock order is ENT -> RNG(root) -> RNG(leaf). */
240 static struct mtx fxent_pool_lk;
241 MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
242 #define FXENT_LOCK() mtx_lock(&fxent_pool_lk)
243 #define FXENT_UNLOCK() mtx_unlock(&fxent_pool_lk)
244 #define FXENT_ASSERT(rng) mtx_assert(&fxent_pool_lk, MA_OWNED)
245 #define FXENT_ASSERT_NOT(rng) mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
246 static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
247 static unsigned __read_mostly fxent_nactpools = 1;
248 static struct timeout_task fxent_reseed_timer;
249 static int __read_mostly fxent_timer_ready;
250
251 /*
252 * Track number of bytes of entropy harvested from high-quality sources prior
253 * to initial keying. The idea is to collect more jitter entropy when fewer
254 * high-quality bytes were available and less if we had other good sources. We
255 * want to provide always-on availability but don't necessarily have *any*
256 * great sources on some platforms.
257 *
258 * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
259 * make more sense to pull this up into the abstraction layer instead.
260 *
261 * Jitter entropy is unimplemented for now.
262 */
263 static unsigned long fxrng_preseed_ent;
264
265 void
fxrng_pools_init(void)266 fxrng_pools_init(void)
267 {
268 size_t i;
269
270 for (i = 0; i < nitems(fxent_pool); i++)
271 fxrng_hash_init(&fxent_pool[i]);
272 }
273
274 static inline bool
fxrng_hi_source(enum random_entropy_source src)275 fxrng_hi_source(enum random_entropy_source src)
276 {
277 return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
278 }
279
280 /*
281 * A racy check that this high-entropy source's event should contribute to
282 * pool0 on the basis of per-source byte count. The check is racy for two
283 * reasons:
284 * - Performance: The vast majority of the time, we've already taken 32 bytes
285 * from any present high quality source and the racy check lets us avoid
286 * dirtying the cache for the global array.
287 * - Correctness: It's fine that the check is racy. The failure modes are:
288 * • False positive: We will detect when we take the lock.
289 * • False negative: We still collect the entropy; it just won't be
290 * preferentially placed in pool0 in this case.
291 */
292 static inline bool
fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)293 fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
294 {
295 return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
296 FXENT_HI_SRC_POOL0_BYTES);
297 }
298
299 /*
300 * Top level entropy processing API from randomdev.
301 *
302 * Invoked by the core randomdev subsystem both for preload entropy, "push"
303 * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
304 */
305 void
fxrng_event_processor(struct harvest_event * event)306 fxrng_event_processor(struct harvest_event *event)
307 {
308 enum random_entropy_source src;
309 unsigned pool;
310 bool first_time, first_32;
311
312 src = event->he_source;
313
314 ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
315 "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
316 (unsigned)event->he_size, sizeof(event->he_entropy));
317
318 /*
319 * Zero bytes of source entropy doesn't count as observing this source
320 * for the first time. We still harvest the counter entropy.
321 */
322 first_time = event->he_size > 0 &&
323 !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
324 if (__predict_false(first_time)) {
325 /*
326 * "The first time [any source] provides entropy, it is used to
327 * directly reseed the root PRNG. The entropy pools are
328 * bypassed." (§ 3.1)
329 *
330 * Unlike Windows, we cannot rely on loader(8) seed material
331 * being present, so we perform initial keying in the kernel.
332 * We use brng_generation 0 to represent an unkeyed state.
333 *
334 * Prior to initial keying, it doesn't make sense to try to mix
335 * the entropy directly with the root PRNG state, as the root
336 * PRNG is unkeyed. Instead, we collect pre-keying dynamic
337 * entropy in pool0 and do not bump the root PRNG seed version
338 * or set its key. Initial keying will incorporate pool0 and
339 * bump the brng_generation (seed version).
340 *
341 * After initial keying, we do directly mix in first-time
342 * entropy sources. We use the root BRNG to generate 32 bytes
343 * and use fxrng_hash to mix it with the new entropy source and
344 * re-key with the first 256 bits of hash output.
345 */
346 FXENT_LOCK();
347 FXRNG_BRNG_LOCK(&fxrng_root);
348 if (__predict_true(fxrng_root.brng_generation > 0)) {
349 /* Bypass the pools: */
350 FXENT_UNLOCK();
351 fxrng_brng_src_reseed(event);
352 FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
353 return;
354 }
355
356 /*
357 * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
358 * lock, so we only need to hold on to the pool lock to prevent
359 * initial keying without this entropy.
360 */
361 FXRNG_BRNG_UNLOCK(&fxrng_root);
362
363 /* Root PRNG hasn't been keyed yet, just accumulate event. */
364 fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
365 sizeof(event->he_somecounter));
366 fxrng_hash_update(&fxent_pool[0], event->he_entropy,
367 event->he_size);
368
369 if (fxrng_hi_source(src)) {
370 /* Prevent overflow. */
371 if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
372 fxrng_preseed_ent += event->he_size;
373 }
374 FXENT_UNLOCK();
375 return;
376 }
377 /* !first_time */
378
379 /*
380 * "The first 32 bytes produced by a high entropy source after a reseed
381 * from the pools is always put in pool 0." (§ 3.4)
382 *
383 * The first-32-byte tracking data in fxrng_reseed_seen is reset in
384 * fxent_timer_reseed_npools() below.
385 */
386 first_32 = event->he_size > 0 &&
387 fxrng_hi_source(src) &&
388 atomic_load_acq_int(&fxent_nactpools) > 1 &&
389 fxrng_hi_pool0_eligible_racy(src);
390 if (__predict_false(first_32)) {
391 unsigned rem, seen;
392
393 FXENT_LOCK();
394 seen = fxrng_reseed_seen[src];
395 if (seen == FXENT_HI_SRC_POOL0_BYTES)
396 goto round_robin;
397
398 rem = FXENT_HI_SRC_POOL0_BYTES - seen;
399 rem = MIN(rem, event->he_size);
400
401 fxrng_reseed_seen[src] = seen + rem;
402
403 /*
404 * We put 'rem' bytes in pool0, and any remaining bytes are
405 * round-robin'd across other pools.
406 */
407 fxrng_hash_update(&fxent_pool[0],
408 ((uint8_t *)event->he_entropy) + event->he_size - rem,
409 rem);
410 if (rem == event->he_size) {
411 fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
412 sizeof(event->he_somecounter));
413 FXENT_UNLOCK();
414 return;
415 }
416
417 /*
418 * If fewer bytes were needed than this even provied, We only
419 * take the last rem bytes of the entropy buffer and leave the
420 * timecounter to be round-robin'd with the remaining entropy.
421 */
422 event->he_size -= rem;
423 goto round_robin;
424 }
425 /* !first_32 */
426
427 FXENT_LOCK();
428
429 round_robin:
430 FXENT_ASSERT();
431 pool = event->he_destination % fxent_nactpools;
432 fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
433 event->he_size);
434 fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
435 sizeof(event->he_somecounter));
436
437 if (__predict_false(fxrng_hi_source(src) &&
438 atomic_load_acq_64(&fxrng_root_generation) == 0)) {
439 /* Prevent overflow. */
440 if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
441 fxrng_preseed_ent += event->he_size;
442 }
443 FXENT_UNLOCK();
444 }
445
446 /*
447 * Top level "seeded" API/signal from randomdev.
448 *
449 * This is our warning that a request is coming: we need to be seeded. In
450 * fenestrasX, a request for random bytes _never_ fails. "We (ed: ditto) have
451 * observed that there are many callers that never check for the error code,
452 * even if they are generating cryptographic key material." (§ 1.6)
453 *
454 * If we returned 'false', both read_random(9) and chacha20_randomstir()
455 * (arc4random(9)) will blindly charge on with something almost certainly worse
456 * than what we've got, or are able to get quickly enough.
457 */
458 bool
fxrng_alg_seeded(void)459 fxrng_alg_seeded(void)
460 {
461 uint8_t hash[FXRNG_HASH_SZ];
462 sbintime_t sbt;
463
464 /* The vast majority of the time, we expect to already be seeded. */
465 if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
466 return (true);
467
468 /*
469 * Take the lock and recheck; only one thread needs to do the initial
470 * seeding work.
471 */
472 FXENT_LOCK();
473 if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
474 FXENT_UNLOCK();
475 return (true);
476 }
477 /* XXX Any one-off initial seeding goes here. */
478
479 fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
480 fxrng_hash_init(&fxent_pool[0]);
481
482 fxrng_brng_reseed(hash, sizeof(hash));
483 FXENT_UNLOCK();
484
485 randomdev_unblock();
486 explicit_bzero(hash, sizeof(hash));
487
488 /*
489 * This may be called too early for taskqueue_thread to be initialized.
490 * fxent_pool_timer_init will detect if we've already unblocked and
491 * queue the first timer reseed at that point.
492 */
493 if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
494 sbt = SBT_1S;
495 taskqueue_enqueue_timeout_sbt(taskqueue_thread,
496 &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
497 }
498 return (true);
499 }
500
501 /*
502 * Timer-based reseeds and pool expansion.
503 */
504 static void
fxent_timer_reseed_npools(unsigned n)505 fxent_timer_reseed_npools(unsigned n)
506 {
507 /*
508 * 64 * 8 => moderately large 512 bytes. Could be static, as we are
509 * only used in a static context. On the other hand, this is in
510 * threadqueue TASK context and we're likely nearly at top of stack
511 * already.
512 */
513 uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
514 unsigned i;
515
516 ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
517
518 FXENT_ASSERT();
519 /*
520 * Collect entropy from pools 0..n-1 by concatenating the output hashes
521 * and then feeding them into fxrng_brng_reseed, which will hash the
522 * aggregate together with the current root PRNG keystate to produce a
523 * new key. It will also bump the global generation counter
524 * appropriately.
525 */
526 for (i = 0; i < n; i++) {
527 fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
528 FXRNG_HASH_SZ);
529 fxrng_hash_init(&fxent_pool[i]);
530 }
531
532 fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
533 explicit_bzero(hash, n * FXRNG_HASH_SZ);
534
535 /*
536 * "The first 32 bytes produced by a high entropy source after a reseed
537 * from the pools is always put in pool 0." (§ 3.4)
538 *
539 * So here we reset the tracking (somewhat naively given the majority
540 * of sources on most machines are not what we consider "high", but at
541 * 32 bytes it's smaller than a cache line), so the next 32 bytes are
542 * prioritized into pool0.
543 *
544 * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
545 */
546 memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
547 FXENT_ASSERT();
548 }
549
550 static void
fxent_timer_reseed(void * ctx __unused,int pending __unused)551 fxent_timer_reseed(void *ctx __unused, int pending __unused)
552 {
553 static unsigned reseed_intvl_sec = 1;
554 /* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
555 static uint64_t reseed_number = 1;
556
557 unsigned next_ival, i, k;
558 sbintime_t sbt;
559
560 if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
561 next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
562 if (next_ival > FXENT_RESEED_INTVL_MAX)
563 next_ival = FXENT_RESEED_INTVL_MAX;
564 FXENT_LOCK();
565 fxent_timer_reseed_npools(1);
566 FXENT_UNLOCK();
567 } else {
568 /*
569 * The creation of entropy pools beyond 0 is enabled when the
570 * reseed interval hits the maximum. (§ 3.3)
571 */
572 next_ival = reseed_intvl_sec;
573
574 /*
575 * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
576 * general, pool n..0 every 3^n reseeds.
577 */
578 k = reseed_number;
579 reseed_number++;
580
581 /* Count how many pools, from [0, i), to use for reseed. */
582 for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
583 if ((k % FXENT_RESEED_BASE) != 0)
584 break;
585 k /= FXENT_RESEED_BASE;
586 }
587
588 /*
589 * If we haven't activated pool i yet, activate it and only
590 * reseed from [0, i-1). (§ 3.3)
591 */
592 FXENT_LOCK();
593 if (i == fxent_nactpools + 1) {
594 fxent_timer_reseed_npools(fxent_nactpools);
595 fxent_nactpools++;
596 } else {
597 /* Just reseed from [0, i). */
598 fxent_timer_reseed_npools(i);
599 }
600 FXENT_UNLOCK();
601 }
602
603 /* Schedule the next reseed. */
604 sbt = next_ival * SBT_1S;
605 taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
606 -sbt, (sbt / 3), C_PREL(2));
607
608 reseed_intvl_sec = next_ival;
609 }
610
611 static void
fxent_pool_timer_init(void * dummy __unused)612 fxent_pool_timer_init(void *dummy __unused)
613 {
614 sbintime_t sbt;
615
616 TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
617 fxent_timer_reseed, NULL);
618
619 if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
620 sbt = SBT_1S;
621 taskqueue_enqueue_timeout_sbt(taskqueue_thread,
622 &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
623 }
624 atomic_store_rel_int(&fxent_timer_ready, 1);
625 }
626 /* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
627 SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
628 fxent_pool_timer_init, NULL);
629