Lines Matching +full:reseed +full:- +full:disable

1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
15 * - Sysctl interface.
81 * crng_init is protected by base_crng->lock, and only increases
82 * its value (from empty->early->ready).
91 /* Various types of waiters for crng_init->CRNG_READY transition. */
102 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
135 * -ERESTARTSYS if the function was interrupted by a signal.
164 nb->notifier_call(nb, 0, NULL);
183 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
200 * functions may be higher performance for one-off random integers,
270 * because the per-cpu crngs are initialized to ULONG_MAX, so this
286 * Cast to unsigned long for 32-bit architectures, since atomic 64-bit
288 * because base_crng.generation is a 32-bit value. On big-endian
293 smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
348 * then re-check once locked later. In the case where we're really not
372 * If our per-cpu crng is older than the base_crng, then it means
375 * for our per-cpu crng. This brings us up to date with base_crng.
377 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
380 crng->key, sizeof(crng->key));
381 crng->generation = base_crng.generation;
386 * Finally, when we've made it this far, our per-cpu crng has an up
392 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
407 len -= first_block_len;
421 len -= CHACHA_BLOCK_SIZE;
488 return ret ? ret : -EFAULT;
536 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
537 next_gen != batch->generation) { \
538 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
539 batch->position = 0; \
540 batch->generation = next_gen; \
543 ret = batch->entropy[batch->position]; \
544 batch->entropy[batch->position] = 0; \
545 ++batch->position; \
564 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
565 * in 32-bits.
572 * for the non-underscored constant version in the header, we build bug
573 * on that. But for the non-constant case, it's convenient to have that
583 u32 bound = -ceil % ceil;
600 * the per-cpu crng and all batches, so that we serve fresh
603 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
604 per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
605 per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
606 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
607 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
633 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
634 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
669 * This is an HKDF-like construction for using the hashed collected entropy
670 * as a PRF key, that's then expanded block-by-block.
683 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
688 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
714 len -= i;
746 WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
784 * read-out of the RTC. This does *not* credit any actual entropy to
799 * and then force-reseeds the crng so that it takes effect immediately.
810 * layer request events, on a per-disk_devt basis, as input to the
811 * entropy pool. Note that high-speed solid state drives with very low
876 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
882 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
888 arch_bits -= sizeof(*entropy) * 8;
895 /* Reseed if already seeded by earlier phases. */
923 /* Reseed if already seeded by earlier phases. */
934 * Add device- or boot-specific data to the input pool to help
954 * Interface for in-kernel drivers of true hardware RNGs. Those devices
964 * Throttle writing to once every reseed interval, unless we're not yet
988 * don't credit it, but we do immediately force a reseed after so
1038 * This is [Half]SipHash-1-x, starting from an empty key. Because
1039 * the key is fixed, it assumes that its inputs are non-malicious,
1041 * four-word SipHash state, while v represents a two-word input.
1071 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1100 memcpy(pool, fast_pool->pool, sizeof(pool));
1101 count = fast_pool->count;
1102 fast_pool->count = 0;
1103 fast_pool->last = jiffies;
1120 fast_mix(fast_pool->pool, entropy,
1122 new_count = ++fast_pool->count;
1127 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1130 fast_pool->count |= MIX_INFLIGHT;
1131 if (!timer_pending(&fast_pool->mix)) {
1132 fast_pool->mix.expires = jiffies;
1133 add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1162 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1175 * We take into account the first, second and third-order deltas
1178 delta = now - READ_ONCE(state->last_time);
1179 WRITE_ONCE(state->last_time, now);
1181 delta2 = delta - READ_ONCE(state->last_delta);
1182 WRITE_ONCE(state->last_delta, delta);
1184 delta3 = delta2 - READ_ONCE(state->last_delta2);
1185 WRITE_ONCE(state->last_delta2, delta2);
1188 delta = -delta;
1190 delta2 = -delta2;
1192 delta3 = -delta3;
1212 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1235 if (!disk || !disk->random)
1238 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1252 state->last_time = INITIAL_JIFFIES;
1253 disk->random = state;
1270 * Note that we don't re-arm the timer in the timer itself - we are happy to be
1274 * So the re-arming always happens in the entropy loop itself.
1282 if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
1293 u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
1297 int cpu = -1;
1299 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1300 stack->entropy = random_get_entropy();
1301 if (stack->entropy != last)
1303 last = stack->entropy;
1305 stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1306 if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
1309 atomic_set(&stack->samples, 0);
1310 timer_setup_on_stack(&stack->timer, entropy_timer, 0);
1316 if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
1335 /* Basic CPU round-robin, which avoids the current CPU. */
1343 stack->timer.expires = jiffies;
1345 add_timer_on(&stack->timer, cpu);
1349 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
1351 stack->entropy = random_get_entropy();
1353 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
1355 del_timer_sync(&stack->timer);
1356 destroy_timer_on_stack(&stack->timer);
1394 return -EINVAL;
1401 return -EINVAL;
1405 return -EAGAIN;
1448 return ret ? ret : -EFAULT;
1471 --maxwarn;
1473 current->comm, iov_iter_count(iter));
1485 ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1486 (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1487 return -EAGAIN;
1504 return -EFAULT;
1508 return -EPERM;
1510 return -EFAULT;
1512 return -EINVAL;
1521 return -EPERM;
1523 return -EFAULT;
1525 return -EINVAL;
1527 return -EFAULT;
1536 return -EFAULT;
1544 return -EPERM;
1548 return -EPERM;
1550 return -ENODATA;
1554 return -EINVAL;
1595 * - boot_id - a UUID representing the current boot.
1597 * - uuid - a random UUID, different each time the file is read.
1599 * - poolsize - the number of bits of entropy that the input pool can
1602 * - entropy_avail - the number of bits of entropy currently in the
1605 * - write_wakeup_threshold - the amount of entropy in the input pool
1611 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1628 * UUID. The difference is in whether table->data is NULL; if it is,
1642 return -EPERM;
1644 uuid = table->data;