1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/domainset.h>
30 #include <sys/fail.h>
31 #include <sys/limits.h>
32 #include <sys/lock.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/queue.h>
37 #include <sys/random.h>
38 #include <sys/sdt.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <sys/taskqueue.h>
42
43 #include <machine/atomic.h>
44 #include <machine/smp.h>
45
46 #include <dev/random/randomdev.h>
47 #include <dev/random/random_harvestq.h>
48
49 #include <dev/random/fenestrasX/fx_brng.h>
50 #include <dev/random/fenestrasX/fx_hash.h>
51 #include <dev/random/fenestrasX/fx_pool.h>
52 #include <dev/random/fenestrasX/fx_priv.h>
53 #include <dev/random/fenestrasX/fx_pub.h>
54
55 /*
56 * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2)
57 */
58 #define FXENT_RESSED_INTVL_GFACT 3
59 #define FXENT_RESEED_INTVL_MAX 3600
60
61 /*
62 * Pool reseed schedule. Initially, only pool 0 is active. Until the timer
63 * interval reaches INTVL_MAX, only pool 0 is used.
64 *
65 * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
66 * (if active) every 3^k timer reseeds. (§ 3.3)
67 *
68 * (Entropy harvesting only round robins across active pools.)
69 */
70 #define FXENT_RESEED_BASE 3
71
72 /*
73 * Number of bytes from high quality sources to allocate to pool 0 before
74 * normal round-robin allocation after each timer reseed. (§ 3.4)
75 */
76 #define FXENT_HI_SRC_POOL0_BYTES 32
77
78 /*
79 * § 3.1
80 *
81 * Low sources provide unconditioned entropy, such as mouse movements; high
82 * sources are assumed to provide high-quality random bytes. Pull sources are
83 * those which can be polled, i.e., anything randomdev calls a "random_source."
84 *
85 * In the whitepaper, low sources are pull. For us, at least in the existing
86 * design, low-quality sources push into some global ring buffer and then get
87 * forwarded into the RNG by a thread that continually polls. Presumably their
88 * design batches low entopy signals in some way (SHA512?) and only requests
89 * them dynamically on reseed. I'm not sure what the benefit is vs feeding
90 * into the pools directly.
91 */
92 enum fxrng_ent_access_cls {
93 FXRNG_PUSH,
94 FXRNG_PULL,
95 };
96 enum fxrng_ent_source_cls {
97 FXRNG_HI,
98 FXRNG_LO,
99 FXRNG_GARBAGE,
100 };
101 struct fxrng_ent_cls {
102 enum fxrng_ent_access_cls entc_axx_cls;
103 enum fxrng_ent_source_cls entc_src_cls;
104 };
105
106 static const struct fxrng_ent_cls fxrng_hi_pull = {
107 .entc_axx_cls = FXRNG_PULL,
108 .entc_src_cls = FXRNG_HI,
109 };
110 static const struct fxrng_ent_cls fxrng_hi_push = {
111 .entc_axx_cls = FXRNG_PUSH,
112 .entc_src_cls = FXRNG_HI,
113 };
114 static const struct fxrng_ent_cls fxrng_lo_push = {
115 .entc_axx_cls = FXRNG_PUSH,
116 .entc_src_cls = FXRNG_LO,
117 };
118 static const struct fxrng_ent_cls fxrng_garbage = {
119 .entc_axx_cls = FXRNG_PUSH,
120 .entc_src_cls = FXRNG_GARBAGE,
121 };
122
123 /*
124 * This table is a mapping of randomdev's current source abstractions to the
125 * designations above; at some point, if the design seems reasonable, it would
126 * make more sense to pull this up into the abstraction layer instead.
127 */
128 static const struct fxrng_ent_char {
129 const struct fxrng_ent_cls *entc_cls;
130 } fxrng_ent_char[/*ENTROPYSOURCE*/] = {
131 [RANDOM_CACHED] = {
132 .entc_cls = &fxrng_hi_push,
133 },
134 [RANDOM_ATTACH] = {
135 .entc_cls = &fxrng_lo_push,
136 },
137 [RANDOM_KEYBOARD] = {
138 .entc_cls = &fxrng_lo_push,
139 },
140 [RANDOM_MOUSE] = {
141 .entc_cls = &fxrng_lo_push,
142 },
143 [RANDOM_NET_TUN] = {
144 .entc_cls = &fxrng_lo_push,
145 },
146 [RANDOM_NET_ETHER] = {
147 .entc_cls = &fxrng_lo_push,
148 },
149 [RANDOM_NET_NG] = {
150 .entc_cls = &fxrng_lo_push,
151 },
152 [RANDOM_INTERRUPT] = {
153 .entc_cls = &fxrng_lo_push,
154 },
155 [RANDOM_SWI] = {
156 .entc_cls = &fxrng_lo_push,
157 },
158 [RANDOM_FS_ATIME] = {
159 .entc_cls = &fxrng_lo_push,
160 },
161 [RANDOM_UMA] = {
162 .entc_cls = &fxrng_lo_push,
163 },
164 [RANDOM_CALLOUT] = {
165 .entc_cls = &fxrng_lo_push,
166 },
167 [RANDOM_RANDOMDEV] = {
168 .entc_cls = &fxrng_lo_push,
169 },
170 [RANDOM_PURE_SAFE] = {
171 .entc_cls = &fxrng_hi_push,
172 },
173 [RANDOM_PURE_GLXSB] = {
174 .entc_cls = &fxrng_hi_push,
175 },
176 [RANDOM_PURE_RDRAND] = {
177 .entc_cls = &fxrng_hi_pull,
178 },
179 [RANDOM_PURE_RDSEED] = {
180 .entc_cls = &fxrng_hi_pull,
181 },
182 [RANDOM_PURE_NEHEMIAH] = {
183 .entc_cls = &fxrng_hi_pull,
184 },
185 [RANDOM_PURE_RNDTEST] = {
186 .entc_cls = &fxrng_garbage,
187 },
188 [RANDOM_PURE_VIRTIO] = {
189 .entc_cls = &fxrng_hi_pull,
190 },
191 [RANDOM_PURE_BROADCOM] = {
192 .entc_cls = &fxrng_hi_push,
193 },
194 [RANDOM_PURE_CCP] = {
195 .entc_cls = &fxrng_hi_pull,
196 },
197 [RANDOM_PURE_DARN] = {
198 .entc_cls = &fxrng_hi_pull,
199 },
200 [RANDOM_PURE_TPM] = {
201 .entc_cls = &fxrng_hi_push,
202 },
203 [RANDOM_PURE_VMGENID] = {
204 .entc_cls = &fxrng_hi_push,
205 },
206 [RANDOM_PURE_QUALCOMM] = {
207 .entc_cls = &fxrng_hi_pull,
208 },
209 [RANDOM_PURE_ARMV8] = {
210 .entc_cls = &fxrng_hi_pull,
211 },
212 [RANDOM_PURE_ARM_TRNG] = {
213 .entc_cls = &fxrng_hi_pull,
214 },
215 };
216 CTASSERT(nitems(fxrng_ent_char) == ENTROPYSOURCE);
217
218 /* Useful for single-bit-per-source state. */
219 BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
220
221 /* XXX Borrowed from not-yet-committed D22702. */
222 #ifndef BIT_TEST_SET_ATOMIC_ACQ
223 #define BIT_TEST_SET_ATOMIC_ACQ(_s, n, p) \
224 (atomic_testandset_acq_long( \
225 &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
226 #endif
227 #define FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
228 BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
229
230 /* For special behavior on first-time entropy sources. (§ 3.1) */
231 static struct fxrng_bits __read_mostly fxrng_seen;
232
233 /* For special behavior for high-entropy sources after a reseed. (§ 3.4) */
234 _Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
235 static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
236
237 /* Entropy pools. Lock order is ENT -> RNG(root) -> RNG(leaf). */
238 static struct mtx fxent_pool_lk;
239 MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
240 #define FXENT_LOCK() mtx_lock(&fxent_pool_lk)
241 #define FXENT_UNLOCK() mtx_unlock(&fxent_pool_lk)
242 #define FXENT_ASSERT(rng) mtx_assert(&fxent_pool_lk, MA_OWNED)
243 #define FXENT_ASSERT_NOT(rng) mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
244 static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
245 static unsigned __read_mostly fxent_nactpools = 1;
246 static struct timeout_task fxent_reseed_timer;
247 static int __read_mostly fxent_timer_ready;
248
249 /*
250 * Track number of bytes of entropy harvested from high-quality sources prior
251 * to initial keying. The idea is to collect more jitter entropy when fewer
252 * high-quality bytes were available and less if we had other good sources. We
253 * want to provide always-on availability but don't necessarily have *any*
254 * great sources on some platforms.
255 *
256 * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
257 * make more sense to pull this up into the abstraction layer instead.
258 *
259 * Jitter entropy is unimplemented for now.
260 */
261 static unsigned long fxrng_preseed_ent;
262
263 void
fxrng_pools_init(void)264 fxrng_pools_init(void)
265 {
266 size_t i;
267
268 for (i = 0; i < nitems(fxent_pool); i++)
269 fxrng_hash_init(&fxent_pool[i]);
270 }
271
272 static inline bool
fxrng_hi_source(enum random_entropy_source src)273 fxrng_hi_source(enum random_entropy_source src)
274 {
275 return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
276 }
277
278 /*
279 * A racy check that this high-entropy source's event should contribute to
280 * pool0 on the basis of per-source byte count. The check is racy for two
281 * reasons:
282 * - Performance: The vast majority of the time, we've already taken 32 bytes
283 * from any present high quality source and the racy check lets us avoid
284 * dirtying the cache for the global array.
285 * - Correctness: It's fine that the check is racy. The failure modes are:
286 * • False positive: We will detect when we take the lock.
287 * • False negative: We still collect the entropy; it just won't be
288 * preferentially placed in pool0 in this case.
289 */
290 static inline bool
fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)291 fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
292 {
293 return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
294 FXENT_HI_SRC_POOL0_BYTES);
295 }
296
297 /*
298 * Top level entropy processing API from randomdev.
299 *
300 * Invoked by the core randomdev subsystem both for preload entropy, "push"
301 * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
302 */
303 void
fxrng_event_processor(struct harvest_event * event)304 fxrng_event_processor(struct harvest_event *event)
305 {
306 enum random_entropy_source src;
307 unsigned pool;
308 bool first_time, first_32;
309
310 src = event->he_source;
311
312 ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
313 "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
314 (unsigned)event->he_size, sizeof(event->he_entropy));
315
316 /*
317 * Zero bytes of source entropy doesn't count as observing this source
318 * for the first time. We still harvest the counter entropy.
319 */
320 first_time = event->he_size > 0 &&
321 !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
322 if (__predict_false(first_time)) {
323 /*
324 * "The first time [any source] provides entropy, it is used to
325 * directly reseed the root PRNG. The entropy pools are
326 * bypassed." (§ 3.1)
327 *
328 * Unlike Windows, we cannot rely on loader(8) seed material
329 * being present, so we perform initial keying in the kernel.
330 * We use brng_generation 0 to represent an unkeyed state.
331 *
332 * Prior to initial keying, it doesn't make sense to try to mix
333 * the entropy directly with the root PRNG state, as the root
334 * PRNG is unkeyed. Instead, we collect pre-keying dynamic
335 * entropy in pool0 and do not bump the root PRNG seed version
336 * or set its key. Initial keying will incorporate pool0 and
337 * bump the brng_generation (seed version).
338 *
339 * After initial keying, we do directly mix in first-time
340 * entropy sources. We use the root BRNG to generate 32 bytes
341 * and use fxrng_hash to mix it with the new entropy source and
342 * re-key with the first 256 bits of hash output.
343 */
344 FXENT_LOCK();
345 FXRNG_BRNG_LOCK(&fxrng_root);
346 if (__predict_true(fxrng_root.brng_generation > 0)) {
347 /* Bypass the pools: */
348 FXENT_UNLOCK();
349 fxrng_brng_src_reseed(event);
350 FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
351 return;
352 }
353
354 /*
355 * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
356 * lock, so we only need to hold on to the pool lock to prevent
357 * initial keying without this entropy.
358 */
359 FXRNG_BRNG_UNLOCK(&fxrng_root);
360
361 /* Root PRNG hasn't been keyed yet, just accumulate event. */
362 fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
363 sizeof(event->he_somecounter));
364 fxrng_hash_update(&fxent_pool[0], event->he_entropy,
365 event->he_size);
366
367 if (fxrng_hi_source(src)) {
368 /* Prevent overflow. */
369 if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
370 fxrng_preseed_ent += event->he_size;
371 }
372 FXENT_UNLOCK();
373 return;
374 }
375 /* !first_time */
376
377 /*
378 * "The first 32 bytes produced by a high entropy source after a reseed
379 * from the pools is always put in pool 0." (§ 3.4)
380 *
381 * The first-32-byte tracking data in fxrng_reseed_seen is reset in
382 * fxent_timer_reseed_npools() below.
383 */
384 first_32 = event->he_size > 0 &&
385 fxrng_hi_source(src) &&
386 atomic_load_acq_int(&fxent_nactpools) > 1 &&
387 fxrng_hi_pool0_eligible_racy(src);
388 if (__predict_false(first_32)) {
389 unsigned rem, seen;
390
391 FXENT_LOCK();
392 seen = fxrng_reseed_seen[src];
393 if (seen == FXENT_HI_SRC_POOL0_BYTES)
394 goto round_robin;
395
396 rem = FXENT_HI_SRC_POOL0_BYTES - seen;
397 rem = MIN(rem, event->he_size);
398
399 fxrng_reseed_seen[src] = seen + rem;
400
401 /*
402 * We put 'rem' bytes in pool0, and any remaining bytes are
403 * round-robin'd across other pools.
404 */
405 fxrng_hash_update(&fxent_pool[0],
406 ((uint8_t *)event->he_entropy) + event->he_size - rem,
407 rem);
408 if (rem == event->he_size) {
409 fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
410 sizeof(event->he_somecounter));
411 FXENT_UNLOCK();
412 return;
413 }
414
415 /*
416 * If fewer bytes were needed than this even provied, We only
417 * take the last rem bytes of the entropy buffer and leave the
418 * timecounter to be round-robin'd with the remaining entropy.
419 */
420 event->he_size -= rem;
421 goto round_robin;
422 }
423 /* !first_32 */
424
425 FXENT_LOCK();
426
427 round_robin:
428 FXENT_ASSERT();
429 pool = event->he_destination % fxent_nactpools;
430 fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
431 event->he_size);
432 fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
433 sizeof(event->he_somecounter));
434
435 if (__predict_false(fxrng_hi_source(src) &&
436 atomic_load_acq_64(&fxrng_root_generation) == 0)) {
437 /* Prevent overflow. */
438 if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
439 fxrng_preseed_ent += event->he_size;
440 }
441 FXENT_UNLOCK();
442 }
443
444 /*
445 * Top level "seeded" API/signal from randomdev.
446 *
447 * This is our warning that a request is coming: we need to be seeded. In
448 * fenestrasX, a request for random bytes _never_ fails. "We (ed: ditto) have
449 * observed that there are many callers that never check for the error code,
450 * even if they are generating cryptographic key material." (§ 1.6)
451 *
452 * If we returned 'false', both read_random(9) and chacha20_randomstir()
453 * (arc4random(9)) will blindly charge on with something almost certainly worse
454 * than what we've got, or are able to get quickly enough.
455 */
456 bool
fxrng_alg_seeded(void)457 fxrng_alg_seeded(void)
458 {
459 uint8_t hash[FXRNG_HASH_SZ];
460 sbintime_t sbt;
461
462 /* The vast majority of the time, we expect to already be seeded. */
463 if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
464 return (true);
465
466 /*
467 * Take the lock and recheck; only one thread needs to do the initial
468 * seeding work.
469 */
470 FXENT_LOCK();
471 if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
472 FXENT_UNLOCK();
473 return (true);
474 }
475 /* XXX Any one-off initial seeding goes here. */
476
477 fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
478 fxrng_hash_init(&fxent_pool[0]);
479
480 fxrng_brng_reseed(hash, sizeof(hash));
481 FXENT_UNLOCK();
482
483 randomdev_unblock();
484 explicit_bzero(hash, sizeof(hash));
485
486 /*
487 * This may be called too early for taskqueue_thread to be initialized.
488 * fxent_pool_timer_init will detect if we've already unblocked and
489 * queue the first timer reseed at that point.
490 */
491 if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
492 sbt = SBT_1S;
493 taskqueue_enqueue_timeout_sbt(taskqueue_thread,
494 &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
495 }
496 return (true);
497 }
498
499 /*
500 * Timer-based reseeds and pool expansion.
501 */
502 static void
fxent_timer_reseed_npools(unsigned n)503 fxent_timer_reseed_npools(unsigned n)
504 {
505 /*
506 * 64 * 8 => moderately large 512 bytes. Could be static, as we are
507 * only used in a static context. On the other hand, this is in
508 * threadqueue TASK context and we're likely nearly at top of stack
509 * already.
510 */
511 uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
512 unsigned i;
513
514 ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
515
516 FXENT_ASSERT();
517 /*
518 * Collect entropy from pools 0..n-1 by concatenating the output hashes
519 * and then feeding them into fxrng_brng_reseed, which will hash the
520 * aggregate together with the current root PRNG keystate to produce a
521 * new key. It will also bump the global generation counter
522 * appropriately.
523 */
524 for (i = 0; i < n; i++) {
525 fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
526 FXRNG_HASH_SZ);
527 fxrng_hash_init(&fxent_pool[i]);
528 }
529
530 fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
531 explicit_bzero(hash, n * FXRNG_HASH_SZ);
532
533 /*
534 * "The first 32 bytes produced by a high entropy source after a reseed
535 * from the pools is always put in pool 0." (§ 3.4)
536 *
537 * So here we reset the tracking (somewhat naively given the majority
538 * of sources on most machines are not what we consider "high", but at
539 * 32 bytes it's smaller than a cache line), so the next 32 bytes are
540 * prioritized into pool0.
541 *
542 * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
543 */
544 memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
545 FXENT_ASSERT();
546 }
547
548 static void
fxent_timer_reseed(void * ctx __unused,int pending __unused)549 fxent_timer_reseed(void *ctx __unused, int pending __unused)
550 {
551 static unsigned reseed_intvl_sec = 1;
552 /* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
553 static uint64_t reseed_number = 1;
554
555 unsigned next_ival, i, k;
556 sbintime_t sbt;
557
558 if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
559 next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
560 if (next_ival > FXENT_RESEED_INTVL_MAX)
561 next_ival = FXENT_RESEED_INTVL_MAX;
562 FXENT_LOCK();
563 fxent_timer_reseed_npools(1);
564 FXENT_UNLOCK();
565 } else {
566 /*
567 * The creation of entropy pools beyond 0 is enabled when the
568 * reseed interval hits the maximum. (§ 3.3)
569 */
570 next_ival = reseed_intvl_sec;
571
572 /*
573 * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
574 * general, pool n..0 every 3^n reseeds.
575 */
576 k = reseed_number;
577 reseed_number++;
578
579 /* Count how many pools, from [0, i), to use for reseed. */
580 for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
581 if ((k % FXENT_RESEED_BASE) != 0)
582 break;
583 k /= FXENT_RESEED_BASE;
584 }
585
586 /*
587 * If we haven't activated pool i yet, activate it and only
588 * reseed from [0, i-1). (§ 3.3)
589 */
590 FXENT_LOCK();
591 if (i == fxent_nactpools + 1) {
592 fxent_timer_reseed_npools(fxent_nactpools);
593 fxent_nactpools++;
594 } else {
595 /* Just reseed from [0, i). */
596 fxent_timer_reseed_npools(i);
597 }
598 FXENT_UNLOCK();
599 }
600
601 /* Schedule the next reseed. */
602 sbt = next_ival * SBT_1S;
603 taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
604 -sbt, (sbt / 3), C_PREL(2));
605
606 reseed_intvl_sec = next_ival;
607 }
608
609 static void
fxent_pool_timer_init(void * dummy __unused)610 fxent_pool_timer_init(void *dummy __unused)
611 {
612 sbintime_t sbt;
613
614 TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
615 fxent_timer_reseed, NULL);
616
617 if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
618 sbt = SBT_1S;
619 taskqueue_enqueue_timeout_sbt(taskqueue_thread,
620 &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
621 }
622 atomic_store_rel_int(&fxent_timer_ready, 1);
623 }
624 /* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
625 SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
626 fxent_pool_timer_init, NULL);
627