1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. 6 * 7 * This driver produces cryptographically secure pseudorandom data. It is divided 8 * into roughly six sections, each with a section header: 9 * 10 * - Initialization and readiness waiting. 11 * - Fast key erasure RNG, the "crng". 12 * - Entropy accumulation and extraction routines. 13 * - Entropy collection routines. 14 * - Userspace reader/writer interfaces. 15 * - Sysctl interface. 16 * 17 * The high level overview is that there is one input pool, into which 18 * various pieces of data are hashed. Prior to initialization, some of that 19 * data is then "credited" as having a certain number of bits of entropy. 20 * When enough bits of entropy are available, the hash is finalized and 21 * handed as a key to a stream cipher that expands it indefinitely for 22 * various consumers. This key is periodically refreshed as the various 23 * entropy collectors, described below, add data to the input pool. 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/utsname.h> 29 #include <linux/module.h> 30 #include <linux/kernel.h> 31 #include <linux/major.h> 32 #include <linux/string.h> 33 #include <linux/fcntl.h> 34 #include <linux/slab.h> 35 #include <linux/random.h> 36 #include <linux/poll.h> 37 #include <linux/init.h> 38 #include <linux/fs.h> 39 #include <linux/blkdev.h> 40 #include <linux/interrupt.h> 41 #include <linux/mm.h> 42 #include <linux/nodemask.h> 43 #include <linux/spinlock.h> 44 #include <linux/kthread.h> 45 #include <linux/percpu.h> 46 #include <linux/ptrace.h> 47 #include <linux/workqueue.h> 48 #include <linux/irq.h> 49 #include <linux/ratelimit.h> 50 #include <linux/syscalls.h> 51 #include <linux/completion.h> 52 #include <linux/uuid.h> 53 #include <linux/uaccess.h> 54 #include <linux/suspend.h> 55 #include <linux/siphash.h> 56 #include <linux/sched/isolation.h> 57 #include <crypto/chacha.h> 58 #include <crypto/blake2s.h> 59 #ifdef CONFIG_VDSO_GETRANDOM 60 #include <vdso/getrandom.h> 61 #include <vdso/datapage.h> 62 #include <vdso/vsyscall.h> 63 #endif 64 #include <asm/archrandom.h> 65 #include <asm/processor.h> 66 #include <asm/irq.h> 67 #include <asm/irq_regs.h> 68 #include <asm/io.h> 69 70 /********************************************************************* 71 * 72 * Initialization and readiness waiting. 73 * 74 * Much of the RNG infrastructure is devoted to various dependencies 75 * being able to wait until the RNG has collected enough entropy and 76 * is ready for safe consumption. 77 * 78 *********************************************************************/ 79 80 /* 81 * crng_init is protected by base_crng->lock, and only increases 82 * its value (from empty->early->ready). 83 */ 84 static enum { 85 CRNG_EMPTY = 0, /* Little to no entropy collected */ 86 CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ 87 CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ 88 } crng_init __read_mostly = CRNG_EMPTY; 89 static DEFINE_STATIC_KEY_FALSE(crng_is_ready); 90 #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY) 91 /* Various types of waiters for crng_init->CRNG_READY transition. */ 92 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); 93 static struct fasync_struct *fasync; 94 static ATOMIC_NOTIFIER_HEAD(random_ready_notifier); 95 96 /* Control how we warn userspace. */ 97 static struct ratelimit_state urandom_warning = 98 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); 99 static int ratelimit_disable __read_mostly = 0; 100 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); 101 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); 102 103 /* 104 * Returns whether or not the input pool has been seeded and thus guaranteed 105 * to supply cryptographically secure random numbers. This applies to: the 106 * /dev/urandom device, the get_random_bytes function, and the get_random_{u8, 107 * u16,u32,u64,long} family of functions. 108 * 109 * Returns: true if the input pool has been seeded. 110 * false if the input pool has not been seeded. 111 */ 112 bool rng_is_initialized(void) 113 { 114 return crng_ready(); 115 } 116 EXPORT_SYMBOL(rng_is_initialized); 117 118 static void __cold crng_set_ready(struct work_struct *work) 119 { 120 static_branch_enable(&crng_is_ready); 121 } 122 123 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ 124 static void try_to_generate_entropy(void); 125 126 /* 127 * Wait for the input pool to be seeded and thus guaranteed to supply 128 * cryptographically secure random numbers. This applies to: the /dev/urandom 129 * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64, 130 * long} family of functions. Using any of these functions without first 131 * calling this function forfeits the guarantee of security. 132 * 133 * Returns: 0 if the input pool has been seeded. 134 * -ERESTARTSYS if the function was interrupted by a signal. 135 */ 136 int wait_for_random_bytes(void) 137 { 138 while (!crng_ready()) { 139 int ret; 140 141 try_to_generate_entropy(); 142 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); 143 if (ret) 144 return ret > 0 ? 0 : ret; 145 } 146 return 0; 147 } 148 EXPORT_SYMBOL(wait_for_random_bytes); 149 150 /* 151 * Add a callback function that will be invoked when the crng is initialised, 152 * or immediately if it already has been. Only use this is you are absolutely 153 * sure it is required. Most users should instead be able to test 154 * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`. 155 */ 156 int __cold execute_with_initialized_rng(struct notifier_block *nb) 157 { 158 unsigned long flags; 159 int ret = 0; 160 161 spin_lock_irqsave(&random_ready_notifier.lock, flags); 162 if (crng_ready()) 163 nb->notifier_call(nb, 0, NULL); 164 else 165 ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb); 166 spin_unlock_irqrestore(&random_ready_notifier.lock, flags); 167 return ret; 168 } 169 170 /********************************************************************* 171 * 172 * Fast key erasure RNG, the "crng". 173 * 174 * These functions expand entropy from the entropy extractor into 175 * long streams for external consumption using the "fast key erasure" 176 * RNG described at <https://blog.cr.yp.to/20170723-random.html>. 177 * 178 * There are a few exported interfaces for use by other drivers: 179 * 180 * void get_random_bytes(void *buf, size_t len) 181 * u8 get_random_u8() 182 * u16 get_random_u16() 183 * u32 get_random_u32() 184 * u32 get_random_u32_below(u32 ceil) 185 * u32 get_random_u32_above(u32 floor) 186 * u32 get_random_u32_inclusive(u32 floor, u32 ceil) 187 * u64 get_random_u64() 188 * unsigned long get_random_long() 189 * 190 * These interfaces will return the requested number of random bytes 191 * into the given buffer or as a return value. This is equivalent to 192 * a read from /dev/urandom. The u8, u16, u32, u64, long family of 193 * functions may be higher performance for one-off random integers, 194 * because they do a bit of buffering and do not invoke reseeding 195 * until the buffer is emptied. 196 * 197 *********************************************************************/ 198 199 enum { 200 CRNG_RESEED_START_INTERVAL = HZ, 201 CRNG_RESEED_INTERVAL = 60 * HZ 202 }; 203 204 static struct { 205 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); 206 unsigned long generation; 207 spinlock_t lock; 208 } base_crng = { 209 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) 210 }; 211 212 struct crng { 213 u8 key[CHACHA_KEY_SIZE]; 214 unsigned long generation; 215 local_lock_t lock; 216 }; 217 218 static DEFINE_PER_CPU(struct crng, crngs) = { 219 .generation = ULONG_MAX, 220 .lock = INIT_LOCAL_LOCK(crngs.lock), 221 }; 222 223 /* 224 * Return the interval until the next reseeding, which is normally 225 * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval 226 * proportional to the uptime. 227 */ 228 static unsigned int crng_reseed_interval(void) 229 { 230 static bool early_boot = true; 231 232 if (unlikely(READ_ONCE(early_boot))) { 233 time64_t uptime = ktime_get_seconds(); 234 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) 235 WRITE_ONCE(early_boot, false); 236 else 237 return max_t(unsigned int, CRNG_RESEED_START_INTERVAL, 238 (unsigned int)uptime / 2 * HZ); 239 } 240 return CRNG_RESEED_INTERVAL; 241 } 242 243 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ 244 static void extract_entropy(void *buf, size_t len); 245 246 /* This extracts a new crng key from the input pool. */ 247 static void crng_reseed(struct work_struct *work) 248 { 249 static DECLARE_DELAYED_WORK(next_reseed, crng_reseed); 250 unsigned long flags; 251 unsigned long next_gen; 252 u8 key[CHACHA_KEY_SIZE]; 253 254 /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */ 255 if (likely(system_dfl_wq)) 256 queue_delayed_work(system_dfl_wq, &next_reseed, crng_reseed_interval()); 257 258 extract_entropy(key, sizeof(key)); 259 260 /* 261 * We copy the new key into the base_crng, overwriting the old one, 262 * and update the generation counter. We avoid hitting ULONG_MAX, 263 * because the per-cpu crngs are initialized to ULONG_MAX, so this 264 * forces new CPUs that come online to always initialize. 265 */ 266 spin_lock_irqsave(&base_crng.lock, flags); 267 memcpy(base_crng.key, key, sizeof(base_crng.key)); 268 next_gen = base_crng.generation + 1; 269 if (next_gen == ULONG_MAX) 270 ++next_gen; 271 WRITE_ONCE(base_crng.generation, next_gen); 272 #ifdef CONFIG_VDSO_GETRANDOM 273 /* base_crng.generation's invalid value is ULONG_MAX, while 274 * vdso_k_rng_data->generation's invalid value is 0, so add one to the 275 * former to arrive at the latter. Use smp_store_release so that this 276 * is ordered with the write above to base_crng.generation. Pairs with 277 * the smp_rmb() before the syscall in the vDSO code. 278 * 279 * Cast to unsigned long for 32-bit architectures, since atomic 64-bit 280 * operations are not supported on those architectures. This is safe 281 * because base_crng.generation is a 32-bit value. On big-endian 282 * architectures it will be stored in the upper 32 bits, but that's okay 283 * because the vDSO side only checks whether the value changed, without 284 * actually using or interpreting the value. 285 */ 286 smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1); 287 #endif 288 if (!static_branch_likely(&crng_is_ready)) 289 crng_init = CRNG_READY; 290 spin_unlock_irqrestore(&base_crng.lock, flags); 291 memzero_explicit(key, sizeof(key)); 292 } 293 294 /* 295 * This generates a ChaCha block using the provided key, and then 296 * immediately overwrites that key with half the block. It returns 297 * the resultant ChaCha state to the user, along with the second 298 * half of the block containing 32 bytes of random data that may 299 * be used; random_data_len may not be greater than 32. 300 * 301 * The returned ChaCha state contains within it a copy of the old 302 * key value, at index 4, so the state should always be zeroed out 303 * immediately after using in order to maintain forward secrecy. 304 * If the state cannot be erased in a timely manner, then it is 305 * safer to set the random_data parameter to &chacha_state->x[4] 306 * so that this function overwrites it before returning. 307 */ 308 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], 309 struct chacha_state *chacha_state, 310 u8 *random_data, size_t random_data_len) 311 { 312 u8 first_block[CHACHA_BLOCK_SIZE]; 313 314 BUG_ON(random_data_len > 32); 315 316 chacha_init_consts(chacha_state); 317 memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); 318 memset(&chacha_state->x[12], 0, sizeof(u32) * 4); 319 chacha20_block(chacha_state, first_block); 320 321 memcpy(key, first_block, CHACHA_KEY_SIZE); 322 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); 323 memzero_explicit(first_block, sizeof(first_block)); 324 } 325 326 /* 327 * This function returns a ChaCha state that you may use for generating 328 * random data. It also returns up to 32 bytes on its own of random data 329 * that may be used; random_data_len may not be greater than 32. 330 */ 331 static void crng_make_state(struct chacha_state *chacha_state, 332 u8 *random_data, size_t random_data_len) 333 { 334 unsigned long flags; 335 struct crng *crng; 336 337 BUG_ON(random_data_len > 32); 338 339 /* 340 * For the fast path, we check whether we're ready, unlocked first, and 341 * then re-check once locked later. In the case where we're really not 342 * ready, we do fast key erasure with the base_crng directly, extracting 343 * when crng_init is CRNG_EMPTY. 344 */ 345 if (!crng_ready()) { 346 bool ready; 347 348 spin_lock_irqsave(&base_crng.lock, flags); 349 ready = crng_ready(); 350 if (!ready) { 351 if (crng_init == CRNG_EMPTY) 352 extract_entropy(base_crng.key, sizeof(base_crng.key)); 353 crng_fast_key_erasure(base_crng.key, chacha_state, 354 random_data, random_data_len); 355 } 356 spin_unlock_irqrestore(&base_crng.lock, flags); 357 if (!ready) 358 return; 359 } 360 361 local_lock_irqsave(&crngs.lock, flags); 362 crng = raw_cpu_ptr(&crngs); 363 364 /* 365 * If our per-cpu crng is older than the base_crng, then it means 366 * somebody reseeded the base_crng. In that case, we do fast key 367 * erasure on the base_crng, and use its output as the new key 368 * for our per-cpu crng. This brings us up to date with base_crng. 369 */ 370 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { 371 spin_lock(&base_crng.lock); 372 crng_fast_key_erasure(base_crng.key, chacha_state, 373 crng->key, sizeof(crng->key)); 374 crng->generation = base_crng.generation; 375 spin_unlock(&base_crng.lock); 376 } 377 378 /* 379 * Finally, when we've made it this far, our per-cpu crng has an up 380 * to date key, and we can do fast key erasure with it to produce 381 * some random data and a ChaCha state for the caller. All other 382 * branches of this function are "unlikely", so most of the time we 383 * should wind up here immediately. 384 */ 385 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); 386 local_unlock_irqrestore(&crngs.lock, flags); 387 } 388 389 static void _get_random_bytes(void *buf, size_t len) 390 { 391 struct chacha_state chacha_state; 392 u8 tmp[CHACHA_BLOCK_SIZE]; 393 size_t first_block_len; 394 395 if (!len) 396 return; 397 398 first_block_len = min_t(size_t, 32, len); 399 crng_make_state(&chacha_state, buf, first_block_len); 400 len -= first_block_len; 401 buf += first_block_len; 402 403 while (len) { 404 if (len < CHACHA_BLOCK_SIZE) { 405 chacha20_block(&chacha_state, tmp); 406 memcpy(buf, tmp, len); 407 memzero_explicit(tmp, sizeof(tmp)); 408 break; 409 } 410 411 chacha20_block(&chacha_state, buf); 412 if (unlikely(chacha_state.x[12] == 0)) 413 ++chacha_state.x[13]; 414 len -= CHACHA_BLOCK_SIZE; 415 buf += CHACHA_BLOCK_SIZE; 416 } 417 418 chacha_zeroize_state(&chacha_state); 419 } 420 421 /* 422 * This returns random bytes in arbitrary quantities. The quality of the 423 * random bytes is as good as /dev/urandom. In order to ensure that the 424 * randomness provided by this function is okay, the function 425 * wait_for_random_bytes() should be called and return 0 at least once 426 * at any point prior. 427 */ 428 void get_random_bytes(void *buf, size_t len) 429 { 430 _get_random_bytes(buf, len); 431 } 432 EXPORT_SYMBOL(get_random_bytes); 433 434 static ssize_t get_random_bytes_user(struct iov_iter *iter) 435 { 436 struct chacha_state chacha_state; 437 u8 block[CHACHA_BLOCK_SIZE]; 438 size_t ret = 0, copied; 439 440 if (unlikely(!iov_iter_count(iter))) 441 return 0; 442 443 /* 444 * Immediately overwrite the ChaCha key at index 4 with random 445 * bytes, in case userspace causes copy_to_iter() below to sleep 446 * forever, so that we still retain forward secrecy in that case. 447 */ 448 crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4], 449 CHACHA_KEY_SIZE); 450 /* 451 * However, if we're doing a read of len <= 32, we don't need to 452 * use chacha_state after, so we can simply return those bytes to 453 * the user directly. 454 */ 455 if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { 456 ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter); 457 goto out_zero_chacha; 458 } 459 460 for (;;) { 461 chacha20_block(&chacha_state, block); 462 if (unlikely(chacha_state.x[12] == 0)) 463 ++chacha_state.x[13]; 464 465 copied = copy_to_iter(block, sizeof(block), iter); 466 ret += copied; 467 if (!iov_iter_count(iter) || copied != sizeof(block)) 468 break; 469 470 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); 471 if (ret % PAGE_SIZE == 0) { 472 if (signal_pending(current)) 473 break; 474 cond_resched(); 475 } 476 } 477 478 memzero_explicit(block, sizeof(block)); 479 out_zero_chacha: 480 chacha_zeroize_state(&chacha_state); 481 return ret ? ret : -EFAULT; 482 } 483 484 /* 485 * Batched entropy returns random integers. The quality of the random 486 * number is as good as /dev/urandom. In order to ensure that the randomness 487 * provided by this function is okay, the function wait_for_random_bytes() 488 * should be called and return 0 at least once at any point prior. 489 */ 490 491 #define DEFINE_BATCHED_ENTROPY(type) \ 492 struct batch_ ##type { \ 493 /* \ 494 * We make this 1.5x a ChaCha block, so that we get the \ 495 * remaining 32 bytes from fast key erasure, plus one full \ 496 * block from the detached ChaCha state. We can increase \ 497 * the size of this later if needed so long as we keep the \ 498 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ 499 */ \ 500 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ 501 local_lock_t lock; \ 502 unsigned long generation; \ 503 unsigned int position; \ 504 }; \ 505 \ 506 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ 507 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ 508 .position = UINT_MAX \ 509 }; \ 510 \ 511 type get_random_ ##type(void) \ 512 { \ 513 type ret; \ 514 unsigned long flags; \ 515 struct batch_ ##type *batch; \ 516 unsigned long next_gen; \ 517 \ 518 if (!crng_ready()) { \ 519 _get_random_bytes(&ret, sizeof(ret)); \ 520 return ret; \ 521 } \ 522 \ 523 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ 524 batch = raw_cpu_ptr(&batched_entropy_##type); \ 525 \ 526 next_gen = READ_ONCE(base_crng.generation); \ 527 if (batch->position >= ARRAY_SIZE(batch->entropy) || \ 528 next_gen != batch->generation) { \ 529 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ 530 batch->position = 0; \ 531 batch->generation = next_gen; \ 532 } \ 533 \ 534 ret = batch->entropy[batch->position]; \ 535 batch->entropy[batch->position] = 0; \ 536 ++batch->position; \ 537 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ 538 return ret; \ 539 } \ 540 EXPORT_SYMBOL(get_random_ ##type); 541 542 DEFINE_BATCHED_ENTROPY(u8) 543 DEFINE_BATCHED_ENTROPY(u16) 544 DEFINE_BATCHED_ENTROPY(u32) 545 DEFINE_BATCHED_ENTROPY(u64) 546 547 u32 __get_random_u32_below(u32 ceil) 548 { 549 /* 550 * This is the slow path for variable ceil. It is still fast, most of 551 * the time, by doing traditional reciprocal multiplication and 552 * opportunistically comparing the lower half to ceil itself, before 553 * falling back to computing a larger bound, and then rejecting samples 554 * whose lower half would indicate a range indivisible by ceil. The use 555 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable 556 * in 32-bits. 557 */ 558 u32 rand = get_random_u32(); 559 u64 mult; 560 561 /* 562 * This function is technically undefined for ceil == 0, and in fact 563 * for the non-underscored constant version in the header, we build bug 564 * on that. But for the non-constant case, it's convenient to have that 565 * evaluate to being a straight call to get_random_u32(), so that 566 * get_random_u32_inclusive() can work over its whole range without 567 * undefined behavior. 568 */ 569 if (unlikely(!ceil)) 570 return rand; 571 572 mult = (u64)ceil * rand; 573 if (unlikely((u32)mult < ceil)) { 574 u32 bound = -ceil % ceil; 575 while (unlikely((u32)mult < bound)) 576 mult = (u64)ceil * get_random_u32(); 577 } 578 return mult >> 32; 579 } 580 EXPORT_SYMBOL(__get_random_u32_below); 581 582 #ifdef CONFIG_SMP 583 /* 584 * This function is called when the CPU is coming up, with entry 585 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. 586 */ 587 int __cold random_prepare_cpu(unsigned int cpu) 588 { 589 /* 590 * When the cpu comes back online, immediately invalidate both 591 * the per-cpu crng and all batches, so that we serve fresh 592 * randomness. 593 */ 594 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; 595 per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX; 596 per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX; 597 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; 598 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; 599 return 0; 600 } 601 #endif 602 603 604 /********************************************************************** 605 * 606 * Entropy accumulation and extraction routines. 607 * 608 * Callers may add entropy via: 609 * 610 * static void mix_pool_bytes(const void *buf, size_t len) 611 * 612 * After which, if added entropy should be credited: 613 * 614 * static void credit_init_bits(size_t bits) 615 * 616 * Finally, extract entropy via: 617 * 618 * static void extract_entropy(void *buf, size_t len) 619 * 620 **********************************************************************/ 621 622 enum { 623 POOL_BITS = BLAKE2S_HASH_SIZE * 8, 624 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ 625 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ 626 }; 627 628 static struct { 629 struct blake2s_ctx hash; 630 spinlock_t lock; 631 unsigned int init_bits; 632 } input_pool = { 633 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), 634 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, 635 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, 636 .hash.outlen = BLAKE2S_HASH_SIZE, 637 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), 638 }; 639 640 static void _mix_pool_bytes(const void *buf, size_t len) 641 { 642 blake2s_update(&input_pool.hash, buf, len); 643 } 644 645 /* 646 * This function adds bytes into the input pool. It does not 647 * update the initialization bit counter; the caller should call 648 * credit_init_bits if this is appropriate. 649 */ 650 static void mix_pool_bytes(const void *buf, size_t len) 651 { 652 unsigned long flags; 653 654 spin_lock_irqsave(&input_pool.lock, flags); 655 _mix_pool_bytes(buf, len); 656 spin_unlock_irqrestore(&input_pool.lock, flags); 657 } 658 659 /* 660 * This is an HKDF-like construction for using the hashed collected entropy 661 * as a PRF key, that's then expanded block-by-block. 662 */ 663 static void extract_entropy(void *buf, size_t len) 664 { 665 unsigned long flags; 666 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; 667 struct { 668 unsigned long rdseed[32 / sizeof(long)]; 669 size_t counter; 670 } block; 671 size_t i, longs; 672 673 for (i = 0; i < ARRAY_SIZE(block.rdseed);) { 674 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); 675 if (longs) { 676 i += longs; 677 continue; 678 } 679 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); 680 if (longs) { 681 i += longs; 682 continue; 683 } 684 block.rdseed[i++] = random_get_entropy(); 685 } 686 687 spin_lock_irqsave(&input_pool.lock, flags); 688 689 /* seed = HASHPRF(last_key, entropy_input) */ 690 blake2s_final(&input_pool.hash, seed); 691 692 /* next_key = HASHPRF(seed, RDSEED || 0) */ 693 block.counter = 0; 694 blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key)); 695 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); 696 697 spin_unlock_irqrestore(&input_pool.lock, flags); 698 memzero_explicit(next_key, sizeof(next_key)); 699 700 while (len) { 701 i = min_t(size_t, len, BLAKE2S_HASH_SIZE); 702 /* output = HASHPRF(seed, RDSEED || ++counter) */ 703 ++block.counter; 704 blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i); 705 len -= i; 706 buf += i; 707 } 708 709 memzero_explicit(seed, sizeof(seed)); 710 memzero_explicit(&block, sizeof(block)); 711 } 712 713 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) 714 715 static void __cold _credit_init_bits(size_t bits) 716 { 717 static DECLARE_WORK(set_ready, crng_set_ready); 718 unsigned int new, orig, add; 719 unsigned long flags; 720 int m; 721 722 if (!bits) 723 return; 724 725 add = min_t(size_t, bits, POOL_BITS); 726 727 orig = READ_ONCE(input_pool.init_bits); 728 do { 729 new = min_t(unsigned int, POOL_BITS, orig + add); 730 } while (!try_cmpxchg(&input_pool.init_bits, &orig, new)); 731 732 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { 733 crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */ 734 if (system_dfl_wq) 735 queue_work(system_dfl_wq, &set_ready); 736 atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); 737 #ifdef CONFIG_VDSO_GETRANDOM 738 WRITE_ONCE(vdso_k_rng_data->is_ready, true); 739 #endif 740 wake_up_interruptible(&crng_init_wait); 741 kill_fasync(&fasync, SIGIO, POLL_IN); 742 pr_notice("crng init done\n"); 743 m = ratelimit_state_get_miss(&urandom_warning); 744 if (m) 745 pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m); 746 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { 747 spin_lock_irqsave(&base_crng.lock, flags); 748 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ 749 if (crng_init == CRNG_EMPTY) { 750 extract_entropy(base_crng.key, sizeof(base_crng.key)); 751 crng_init = CRNG_EARLY; 752 } 753 spin_unlock_irqrestore(&base_crng.lock, flags); 754 } 755 } 756 757 758 /********************************************************************** 759 * 760 * Entropy collection routines. 761 * 762 * The following exported functions are used for pushing entropy into 763 * the above entropy accumulation routines: 764 * 765 * void add_device_randomness(const void *buf, size_t len); 766 * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after); 767 * void add_bootloader_randomness(const void *buf, size_t len); 768 * void add_vmfork_randomness(const void *unique_vm_id, size_t len); 769 * void add_interrupt_randomness(int irq); 770 * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); 771 * void add_disk_randomness(struct gendisk *disk); 772 * 773 * add_device_randomness() adds data to the input pool that 774 * is likely to differ between two devices (or possibly even per boot). 775 * This would be things like MAC addresses or serial numbers, or the 776 * read-out of the RTC. This does *not* credit any actual entropy to 777 * the pool, but it initializes the pool to different values for devices 778 * that might otherwise be identical and have very little entropy 779 * available to them (particularly common in the embedded world). 780 * 781 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit 782 * entropy as specified by the caller. If the entropy pool is full it will 783 * block until more entropy is needed. 784 * 785 * add_bootloader_randomness() is called by bootloader drivers, such as EFI 786 * and device tree, and credits its input depending on whether or not the 787 * command line option 'random.trust_bootloader' is set. 788 * 789 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID 790 * representing the current instance of a VM to the pool, without crediting, 791 * and then force-reseeds the crng so that it takes effect immediately. 792 * 793 * add_interrupt_randomness() uses the interrupt timing as random 794 * inputs to the entropy pool. Using the cycle counters and the irq source 795 * as inputs, it feeds the input pool roughly once a second or after 64 796 * interrupts, crediting 1 bit of entropy for whichever comes first. 797 * 798 * add_input_randomness() uses the input layer interrupt timing, as well 799 * as the event type information from the hardware. 800 * 801 * add_disk_randomness() uses what amounts to the seek time of block 802 * layer request events, on a per-disk_devt basis, as input to the 803 * entropy pool. Note that high-speed solid state drives with very low 804 * seek times do not make for good sources of entropy, as their seek 805 * times are usually fairly consistent. 806 * 807 * The last two routines try to estimate how many bits of entropy 808 * to credit. They do this by keeping track of the first and second 809 * order deltas of the event timings. 810 * 811 **********************************************************************/ 812 813 static bool trust_cpu __initdata = true; 814 static bool trust_bootloader __initdata = true; 815 static int __init parse_trust_cpu(char *arg) 816 { 817 return kstrtobool(arg, &trust_cpu); 818 } 819 static int __init parse_trust_bootloader(char *arg) 820 { 821 return kstrtobool(arg, &trust_bootloader); 822 } 823 early_param("random.trust_cpu", parse_trust_cpu); 824 early_param("random.trust_bootloader", parse_trust_bootloader); 825 826 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data) 827 { 828 unsigned long flags, entropy = random_get_entropy(); 829 830 /* 831 * Encode a representation of how long the system has been suspended, 832 * in a way that is distinct from prior system suspends. 833 */ 834 ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() }; 835 836 spin_lock_irqsave(&input_pool.lock, flags); 837 _mix_pool_bytes(&action, sizeof(action)); 838 _mix_pool_bytes(stamps, sizeof(stamps)); 839 _mix_pool_bytes(&entropy, sizeof(entropy)); 840 spin_unlock_irqrestore(&input_pool.lock, flags); 841 842 if (crng_ready() && (action == PM_RESTORE_PREPARE || 843 (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && 844 !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) { 845 crng_reseed(NULL); 846 pr_notice("crng reseeded on system resumption\n"); 847 } 848 return 0; 849 } 850 851 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification }; 852 853 /* 854 * This is called extremely early, before time keeping functionality is 855 * available, but arch randomness is. Interrupts are not yet enabled. 856 */ 857 void __init random_init_early(const char *command_line) 858 { 859 unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)]; 860 size_t i, longs, arch_bits; 861 862 #if defined(LATENT_ENTROPY_PLUGIN) 863 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; 864 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); 865 #endif 866 867 for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) { 868 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i); 869 if (longs) { 870 _mix_pool_bytes(entropy, sizeof(*entropy) * longs); 871 i += longs; 872 continue; 873 } 874 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i); 875 if (longs) { 876 _mix_pool_bytes(entropy, sizeof(*entropy) * longs); 877 i += longs; 878 continue; 879 } 880 arch_bits -= sizeof(*entropy) * 8; 881 ++i; 882 } 883 884 _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname()))); 885 _mix_pool_bytes(command_line, strlen(command_line)); 886 887 /* Reseed if already seeded by earlier phases. */ 888 if (crng_ready()) 889 crng_reseed(NULL); 890 else if (trust_cpu) 891 _credit_init_bits(arch_bits); 892 } 893 894 /* 895 * This is called a little bit after the prior function, and now there is 896 * access to timestamps counters. Interrupts are not yet enabled. 897 */ 898 void __init random_init(void) 899 { 900 unsigned long entropy = random_get_entropy(); 901 ktime_t now = ktime_get_real(); 902 903 _mix_pool_bytes(&now, sizeof(now)); 904 _mix_pool_bytes(&entropy, sizeof(entropy)); 905 add_latent_entropy(); 906 907 /* 908 * If we were initialized by the cpu or bootloader before workqueues 909 * are initialized, then we should enable the static branch here. 910 */ 911 if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) 912 crng_set_ready(NULL); 913 914 /* Reseed if already seeded by earlier phases. */ 915 if (crng_ready()) 916 crng_reseed(NULL); 917 918 WARN_ON(register_pm_notifier(&pm_notifier)); 919 920 WARN(!entropy, "Missing cycle counter and fallback timer; RNG " 921 "entropy collection will consequently suffer."); 922 } 923 924 /* 925 * Add device- or boot-specific data to the input pool to help 926 * initialize it. 927 * 928 * None of this adds any entropy; it is meant to avoid the problem of 929 * the entropy pool having similar initial state across largely 930 * identical devices. 931 */ 932 void add_device_randomness(const void *buf, size_t len) 933 { 934 unsigned long entropy = random_get_entropy(); 935 unsigned long flags; 936 937 spin_lock_irqsave(&input_pool.lock, flags); 938 _mix_pool_bytes(&entropy, sizeof(entropy)); 939 _mix_pool_bytes(buf, len); 940 spin_unlock_irqrestore(&input_pool.lock, flags); 941 } 942 EXPORT_SYMBOL(add_device_randomness); 943 944 /* 945 * Interface for in-kernel drivers of true hardware RNGs. Those devices 946 * may produce endless random bits, so this function will sleep for 947 * some amount of time after, if the sleep_after parameter is true. 948 */ 949 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after) 950 { 951 mix_pool_bytes(buf, len); 952 credit_init_bits(entropy); 953 954 /* 955 * Throttle writing to once every reseed interval, unless we're not yet 956 * initialized or no entropy is credited. 957 */ 958 if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy)) 959 schedule_timeout_interruptible(crng_reseed_interval()); 960 } 961 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); 962 963 /* 964 * Handle random seed passed by bootloader, and credit it depending 965 * on the command line option 'random.trust_bootloader'. 966 */ 967 void __init add_bootloader_randomness(const void *buf, size_t len) 968 { 969 mix_pool_bytes(buf, len); 970 if (trust_bootloader) 971 credit_init_bits(len * 8); 972 } 973 974 #if IS_ENABLED(CONFIG_VMGENID) 975 static BLOCKING_NOTIFIER_HEAD(vmfork_chain); 976 977 /* 978 * Handle a new unique VM ID, which is unique, not secret, so we 979 * don't credit it, but we do immediately force a reseed after so 980 * that it's used by the crng posthaste. 981 */ 982 void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len) 983 { 984 add_device_randomness(unique_vm_id, len); 985 if (crng_ready()) { 986 crng_reseed(NULL); 987 pr_notice("crng reseeded due to virtual machine fork\n"); 988 } 989 blocking_notifier_call_chain(&vmfork_chain, 0, NULL); 990 } 991 #if IS_MODULE(CONFIG_VMGENID) 992 EXPORT_SYMBOL_GPL(add_vmfork_randomness); 993 #endif 994 995 int __cold register_random_vmfork_notifier(struct notifier_block *nb) 996 { 997 return blocking_notifier_chain_register(&vmfork_chain, nb); 998 } 999 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); 1000 1001 int __cold unregister_random_vmfork_notifier(struct notifier_block *nb) 1002 { 1003 return blocking_notifier_chain_unregister(&vmfork_chain, nb); 1004 } 1005 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); 1006 #endif 1007 1008 struct fast_pool { 1009 unsigned long pool[4]; 1010 unsigned long last; 1011 unsigned int count; 1012 struct timer_list mix; 1013 }; 1014 1015 static void mix_interrupt_randomness(struct timer_list *work); 1016 1017 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { 1018 #ifdef CONFIG_64BIT 1019 #define FASTMIX_PERM SIPHASH_PERMUTATION 1020 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }, 1021 #else 1022 #define FASTMIX_PERM HSIPHASH_PERMUTATION 1023 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }, 1024 #endif 1025 .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0) 1026 }; 1027 1028 /* 1029 * This is [Half]SipHash-1-x, starting from an empty key. Because 1030 * the key is fixed, it assumes that its inputs are non-malicious, 1031 * and therefore this has no security on its own. s represents the 1032 * four-word SipHash state, while v represents a two-word input. 1033 */ 1034 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) 1035 { 1036 s[3] ^= v1; 1037 FASTMIX_PERM(s[0], s[1], s[2], s[3]); 1038 s[0] ^= v1; 1039 s[3] ^= v2; 1040 FASTMIX_PERM(s[0], s[1], s[2], s[3]); 1041 s[0] ^= v2; 1042 } 1043 1044 #ifdef CONFIG_SMP 1045 /* 1046 * This function is called when the CPU has just come online, with 1047 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. 1048 */ 1049 int __cold random_online_cpu(unsigned int cpu) 1050 { 1051 /* 1052 * During CPU shutdown and before CPU onlining, add_interrupt_ 1053 * randomness() may schedule mix_interrupt_randomness(), and 1054 * set the MIX_INFLIGHT flag. However, because the worker can 1055 * be scheduled on a different CPU during this period, that 1056 * flag will never be cleared. For that reason, we zero out 1057 * the flag here, which runs just after workqueues are onlined 1058 * for the CPU again. This also has the effect of setting the 1059 * irq randomness count to zero so that new accumulated irqs 1060 * are fresh. 1061 */ 1062 per_cpu_ptr(&irq_randomness, cpu)->count = 0; 1063 return 0; 1064 } 1065 #endif 1066 1067 static void mix_interrupt_randomness(struct timer_list *work) 1068 { 1069 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); 1070 /* 1071 * The size of the copied stack pool is explicitly 2 longs so that we 1072 * only ever ingest half of the siphash output each time, retaining 1073 * the other half as the next "key" that carries over. The entropy is 1074 * supposed to be sufficiently dispersed between bits so on average 1075 * we don't wind up "losing" some. 1076 */ 1077 unsigned long pool[2]; 1078 unsigned int count; 1079 1080 /* Check to see if we're running on the wrong CPU due to hotplug. */ 1081 local_irq_disable(); 1082 if (fast_pool != this_cpu_ptr(&irq_randomness)) { 1083 local_irq_enable(); 1084 return; 1085 } 1086 1087 /* 1088 * Copy the pool to the stack so that the mixer always has a 1089 * consistent view, before we reenable irqs again. 1090 */ 1091 memcpy(pool, fast_pool->pool, sizeof(pool)); 1092 count = fast_pool->count; 1093 fast_pool->count = 0; 1094 fast_pool->last = jiffies; 1095 local_irq_enable(); 1096 1097 mix_pool_bytes(pool, sizeof(pool)); 1098 credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8)); 1099 1100 memzero_explicit(pool, sizeof(pool)); 1101 } 1102 1103 void add_interrupt_randomness(int irq) 1104 { 1105 enum { MIX_INFLIGHT = 1U << 31 }; 1106 unsigned long entropy = random_get_entropy(); 1107 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); 1108 struct pt_regs *regs = get_irq_regs(); 1109 unsigned int new_count; 1110 1111 fast_mix(fast_pool->pool, entropy, 1112 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); 1113 new_count = ++fast_pool->count; 1114 1115 if (new_count & MIX_INFLIGHT) 1116 return; 1117 1118 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) 1119 return; 1120 1121 fast_pool->count |= MIX_INFLIGHT; 1122 if (!timer_pending(&fast_pool->mix)) { 1123 fast_pool->mix.expires = jiffies; 1124 add_timer_on(&fast_pool->mix, raw_smp_processor_id()); 1125 } 1126 } 1127 EXPORT_SYMBOL_GPL(add_interrupt_randomness); 1128 1129 /* There is one of these per entropy source */ 1130 struct timer_rand_state { 1131 unsigned long last_time; 1132 long last_delta, last_delta2; 1133 }; 1134 1135 /* 1136 * This function adds entropy to the entropy "pool" by using timing 1137 * delays. It uses the timer_rand_state structure to make an estimate 1138 * of how many bits of entropy this call has added to the pool. The 1139 * value "num" is also added to the pool; it should somehow describe 1140 * the type of event that just happened. 1141 */ 1142 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) 1143 { 1144 unsigned long entropy = random_get_entropy(), now = jiffies, flags; 1145 long delta, delta2, delta3; 1146 unsigned int bits; 1147 1148 /* 1149 * If we're in a hard IRQ, add_interrupt_randomness() will be called 1150 * sometime after, so mix into the fast pool. 1151 */ 1152 if (in_hardirq()) { 1153 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); 1154 } else { 1155 spin_lock_irqsave(&input_pool.lock, flags); 1156 _mix_pool_bytes(&entropy, sizeof(entropy)); 1157 _mix_pool_bytes(&num, sizeof(num)); 1158 spin_unlock_irqrestore(&input_pool.lock, flags); 1159 } 1160 1161 if (crng_ready()) 1162 return; 1163 1164 /* 1165 * Calculate number of bits of randomness we probably added. 1166 * We take into account the first, second and third-order deltas 1167 * in order to make our estimate. 1168 */ 1169 delta = now - READ_ONCE(state->last_time); 1170 WRITE_ONCE(state->last_time, now); 1171 1172 delta2 = delta - READ_ONCE(state->last_delta); 1173 WRITE_ONCE(state->last_delta, delta); 1174 1175 delta3 = delta2 - READ_ONCE(state->last_delta2); 1176 WRITE_ONCE(state->last_delta2, delta2); 1177 1178 if (delta < 0) 1179 delta = -delta; 1180 if (delta2 < 0) 1181 delta2 = -delta2; 1182 if (delta3 < 0) 1183 delta3 = -delta3; 1184 if (delta > delta2) 1185 delta = delta2; 1186 if (delta > delta3) 1187 delta = delta3; 1188 1189 /* 1190 * delta is now minimum absolute delta. Round down by 1 bit 1191 * on general principles, and limit entropy estimate to 11 bits. 1192 */ 1193 bits = min(fls(delta >> 1), 11); 1194 1195 /* 1196 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() 1197 * will run after this, which uses a different crediting scheme of 1 bit 1198 * per every 64 interrupts. In order to let that function do accounting 1199 * close to the one in this function, we credit a full 64/64 bit per bit, 1200 * and then subtract one to account for the extra one added. 1201 */ 1202 if (in_hardirq()) 1203 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; 1204 else 1205 _credit_init_bits(bits); 1206 } 1207 1208 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) 1209 { 1210 static unsigned char last_value; 1211 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; 1212 1213 /* Ignore autorepeat and the like. */ 1214 if (value == last_value) 1215 return; 1216 1217 last_value = value; 1218 add_timer_randomness(&input_timer_state, 1219 (type << 4) ^ code ^ (code >> 4) ^ value); 1220 } 1221 EXPORT_SYMBOL_GPL(add_input_randomness); 1222 1223 #ifdef CONFIG_BLOCK 1224 void add_disk_randomness(struct gendisk *disk) 1225 { 1226 if (!disk || !disk->random) 1227 return; 1228 /* First major is 1, so we get >= 0x200 here. */ 1229 add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); 1230 } 1231 EXPORT_SYMBOL_GPL(add_disk_randomness); 1232 1233 void __cold rand_initialize_disk(struct gendisk *disk) 1234 { 1235 struct timer_rand_state *state; 1236 1237 /* 1238 * If kzalloc returns null, we just won't use that entropy 1239 * source. 1240 */ 1241 state = kzalloc_obj(struct timer_rand_state); 1242 if (state) { 1243 state->last_time = INITIAL_JIFFIES; 1244 disk->random = state; 1245 } 1246 } 1247 #endif 1248 1249 struct entropy_timer_state { 1250 unsigned long entropy; 1251 struct timer_list timer; 1252 atomic_t samples; 1253 unsigned int samples_per_bit; 1254 }; 1255 1256 /* 1257 * Each time the timer fires, we expect that we got an unpredictable jump in 1258 * the cycle counter. Even if the timer is running on another CPU, the timer 1259 * activity will be touching the stack of the CPU that is generating entropy. 1260 * 1261 * Note that we don't re-arm the timer in the timer itself - we are happy to be 1262 * scheduled away, since that just makes the load more complex, but we do not 1263 * want the timer to keep ticking unless the entropy loop is running. 1264 * 1265 * So the re-arming always happens in the entropy loop itself. 1266 */ 1267 static void __cold entropy_timer(struct timer_list *timer) 1268 { 1269 struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); 1270 unsigned long entropy = random_get_entropy(); 1271 1272 mix_pool_bytes(&entropy, sizeof(entropy)); 1273 if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0) 1274 credit_init_bits(1); 1275 } 1276 1277 /* 1278 * If we have an actual cycle counter, see if we can generate enough entropy 1279 * with timing noise. 1280 */ 1281 static void __cold try_to_generate_entropy(void) 1282 { 1283 enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 }; 1284 u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1]; 1285 struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES); 1286 unsigned int i, num_different = 0; 1287 unsigned long last = random_get_entropy(); 1288 cpumask_var_t timer_cpus; 1289 int cpu = -1; 1290 1291 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { 1292 stack->entropy = random_get_entropy(); 1293 if (stack->entropy != last) 1294 ++num_different; 1295 last = stack->entropy; 1296 } 1297 stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); 1298 if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT) 1299 return; 1300 1301 atomic_set(&stack->samples, 0); 1302 timer_setup_on_stack(&stack->timer, entropy_timer, 0); 1303 if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL)) 1304 goto out; 1305 1306 while (!crng_ready() && !signal_pending(current)) { 1307 /* 1308 * Check !timer_pending() and then ensure that any previous callback has finished 1309 * executing by checking timer_delete_sync_try(), before queueing the next one. 1310 */ 1311 if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) { 1312 unsigned int num_cpus; 1313 1314 /* 1315 * Preemption must be disabled here, both to read the current CPU number 1316 * and to avoid scheduling a timer on a dead CPU. 1317 */ 1318 preempt_disable(); 1319 1320 /* Only schedule callbacks on timer CPUs that are online. */ 1321 cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask); 1322 num_cpus = cpumask_weight(timer_cpus); 1323 /* In very bizarre case of misconfiguration, fallback to all online. */ 1324 if (unlikely(num_cpus == 0)) { 1325 *timer_cpus = *cpu_online_mask; 1326 num_cpus = cpumask_weight(timer_cpus); 1327 } 1328 1329 /* Basic CPU round-robin, which avoids the current CPU. */ 1330 do { 1331 cpu = cpumask_next(cpu, timer_cpus); 1332 if (cpu >= nr_cpu_ids) 1333 cpu = cpumask_first(timer_cpus); 1334 } while (cpu == smp_processor_id() && num_cpus > 1); 1335 1336 /* Expiring the timer at `jiffies` means it's the next tick. */ 1337 stack->timer.expires = jiffies; 1338 1339 add_timer_on(&stack->timer, cpu); 1340 1341 preempt_enable(); 1342 } 1343 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); 1344 schedule(); 1345 stack->entropy = random_get_entropy(); 1346 } 1347 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); 1348 1349 free_cpumask_var(timer_cpus); 1350 out: 1351 timer_delete_sync(&stack->timer); 1352 timer_destroy_on_stack(&stack->timer); 1353 } 1354 1355 1356 /********************************************************************** 1357 * 1358 * Userspace reader/writer interfaces. 1359 * 1360 * getrandom(2) is the primary modern interface into the RNG and should 1361 * be used in preference to anything else. 1362 * 1363 * Reading from /dev/random has the same functionality as calling 1364 * getrandom(2) with flags=0. In earlier versions, however, it had 1365 * vastly different semantics and should therefore be avoided, to 1366 * prevent backwards compatibility issues. 1367 * 1368 * Reading from /dev/urandom has the same functionality as calling 1369 * getrandom(2) with flags=GRND_INSECURE. Because it does not block 1370 * waiting for the RNG to be ready, it should not be used. 1371 * 1372 * Writing to either /dev/random or /dev/urandom adds entropy to 1373 * the input pool but does not credit it. 1374 * 1375 * Polling on /dev/random indicates when the RNG is initialized, on 1376 * the read side, and when it wants new entropy, on the write side. 1377 * 1378 * Both /dev/random and /dev/urandom have the same set of ioctls for 1379 * adding entropy, getting the entropy count, zeroing the count, and 1380 * reseeding the crng. 1381 * 1382 **********************************************************************/ 1383 1384 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) 1385 { 1386 struct iov_iter iter; 1387 int ret; 1388 1389 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) 1390 return -EINVAL; 1391 1392 /* 1393 * Requesting insecure and blocking randomness at the same time makes 1394 * no sense. 1395 */ 1396 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) 1397 return -EINVAL; 1398 1399 if (!crng_ready() && !(flags & GRND_INSECURE)) { 1400 if (flags & GRND_NONBLOCK) 1401 return -EAGAIN; 1402 ret = wait_for_random_bytes(); 1403 if (unlikely(ret)) 1404 return ret; 1405 } 1406 1407 ret = import_ubuf(ITER_DEST, ubuf, len, &iter); 1408 if (unlikely(ret)) 1409 return ret; 1410 return get_random_bytes_user(&iter); 1411 } 1412 1413 static __poll_t random_poll(struct file *file, poll_table *wait) 1414 { 1415 poll_wait(file, &crng_init_wait, wait); 1416 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; 1417 } 1418 1419 static ssize_t write_pool_user(struct iov_iter *iter) 1420 { 1421 u8 block[BLAKE2S_BLOCK_SIZE]; 1422 ssize_t ret = 0; 1423 size_t copied; 1424 1425 if (unlikely(!iov_iter_count(iter))) 1426 return 0; 1427 1428 for (;;) { 1429 copied = copy_from_iter(block, sizeof(block), iter); 1430 ret += copied; 1431 mix_pool_bytes(block, copied); 1432 if (!iov_iter_count(iter) || copied != sizeof(block)) 1433 break; 1434 1435 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); 1436 if (ret % PAGE_SIZE == 0) { 1437 if (signal_pending(current)) 1438 break; 1439 cond_resched(); 1440 } 1441 } 1442 1443 memzero_explicit(block, sizeof(block)); 1444 return ret ? ret : -EFAULT; 1445 } 1446 1447 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) 1448 { 1449 return write_pool_user(iter); 1450 } 1451 1452 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) 1453 { 1454 static int maxwarn = 10; 1455 1456 /* 1457 * Opportunistically attempt to initialize the RNG on platforms that 1458 * have fast cycle counters, but don't (for now) require it to succeed. 1459 */ 1460 if (!crng_ready()) 1461 try_to_generate_entropy(); 1462 1463 if (!crng_ready()) { 1464 if (!ratelimit_disable && maxwarn <= 0) 1465 ratelimit_state_inc_miss(&urandom_warning); 1466 else if (ratelimit_disable || __ratelimit(&urandom_warning)) { 1467 --maxwarn; 1468 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", 1469 current->comm, iov_iter_count(iter)); 1470 } 1471 } 1472 1473 return get_random_bytes_user(iter); 1474 } 1475 1476 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) 1477 { 1478 int ret; 1479 1480 if (!crng_ready() && 1481 ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) || 1482 (kiocb->ki_filp->f_flags & O_NONBLOCK))) 1483 return -EAGAIN; 1484 1485 ret = wait_for_random_bytes(); 1486 if (ret != 0) 1487 return ret; 1488 return get_random_bytes_user(iter); 1489 } 1490 1491 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 1492 { 1493 int __user *p = (int __user *)arg; 1494 int ent_count; 1495 1496 switch (cmd) { 1497 case RNDGETENTCNT: 1498 /* Inherently racy, no point locking. */ 1499 if (put_user(input_pool.init_bits, p)) 1500 return -EFAULT; 1501 return 0; 1502 case RNDADDTOENTCNT: 1503 if (!capable(CAP_SYS_ADMIN)) 1504 return -EPERM; 1505 if (get_user(ent_count, p)) 1506 return -EFAULT; 1507 if (ent_count < 0) 1508 return -EINVAL; 1509 credit_init_bits(ent_count); 1510 return 0; 1511 case RNDADDENTROPY: { 1512 struct iov_iter iter; 1513 ssize_t ret; 1514 int len; 1515 1516 if (!capable(CAP_SYS_ADMIN)) 1517 return -EPERM; 1518 if (get_user(ent_count, p++)) 1519 return -EFAULT; 1520 if (ent_count < 0) 1521 return -EINVAL; 1522 if (get_user(len, p++)) 1523 return -EFAULT; 1524 ret = import_ubuf(ITER_SOURCE, p, len, &iter); 1525 if (unlikely(ret)) 1526 return ret; 1527 ret = write_pool_user(&iter); 1528 if (unlikely(ret < 0)) 1529 return ret; 1530 /* Since we're crediting, enforce that it was all written into the pool. */ 1531 if (unlikely(ret != len)) 1532 return -EFAULT; 1533 credit_init_bits(ent_count); 1534 return 0; 1535 } 1536 case RNDZAPENTCNT: 1537 case RNDCLEARPOOL: 1538 /* No longer has any effect. */ 1539 if (!capable(CAP_SYS_ADMIN)) 1540 return -EPERM; 1541 return 0; 1542 case RNDRESEEDCRNG: 1543 if (!capable(CAP_SYS_ADMIN)) 1544 return -EPERM; 1545 if (!crng_ready()) 1546 return -ENODATA; 1547 crng_reseed(NULL); 1548 return 0; 1549 default: 1550 return -EINVAL; 1551 } 1552 } 1553 1554 static int random_fasync(int fd, struct file *filp, int on) 1555 { 1556 return fasync_helper(fd, filp, on, &fasync); 1557 } 1558 1559 const struct file_operations random_fops = { 1560 .read_iter = random_read_iter, 1561 .write_iter = random_write_iter, 1562 .poll = random_poll, 1563 .unlocked_ioctl = random_ioctl, 1564 .compat_ioctl = compat_ptr_ioctl, 1565 .fasync = random_fasync, 1566 .llseek = noop_llseek, 1567 .splice_read = copy_splice_read, 1568 .splice_write = iter_file_splice_write, 1569 }; 1570 1571 const struct file_operations urandom_fops = { 1572 .read_iter = urandom_read_iter, 1573 .write_iter = random_write_iter, 1574 .unlocked_ioctl = random_ioctl, 1575 .compat_ioctl = compat_ptr_ioctl, 1576 .fasync = random_fasync, 1577 .llseek = noop_llseek, 1578 .splice_read = copy_splice_read, 1579 .splice_write = iter_file_splice_write, 1580 }; 1581 1582 1583 /******************************************************************** 1584 * 1585 * Sysctl interface. 1586 * 1587 * These are partly unused legacy knobs with dummy values to not break 1588 * userspace and partly still useful things. They are usually accessible 1589 * in /proc/sys/kernel/random/ and are as follows: 1590 * 1591 * - boot_id - a UUID representing the current boot. 1592 * 1593 * - uuid - a random UUID, different each time the file is read. 1594 * 1595 * - poolsize - the number of bits of entropy that the input pool can 1596 * hold, tied to the POOL_BITS constant. 1597 * 1598 * - entropy_avail - the number of bits of entropy currently in the 1599 * input pool. Always <= poolsize. 1600 * 1601 * - write_wakeup_threshold - the amount of entropy in the input pool 1602 * below which write polls to /dev/random will unblock, requesting 1603 * more entropy, tied to the POOL_READY_BITS constant. It is writable 1604 * to avoid breaking old userspaces, but writing to it does not 1605 * change any behavior of the RNG. 1606 * 1607 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. 1608 * It is writable to avoid breaking old userspaces, but writing 1609 * to it does not change any behavior of the RNG. 1610 * 1611 ********************************************************************/ 1612 1613 #ifdef CONFIG_SYSCTL 1614 1615 #include <linux/sysctl.h> 1616 1617 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; 1618 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; 1619 static int sysctl_poolsize = POOL_BITS; 1620 static u8 sysctl_bootid[UUID_SIZE]; 1621 1622 /* 1623 * This function is used to return both the bootid UUID, and random 1624 * UUID. The difference is in whether table->data is NULL; if it is, 1625 * then a new UUID is generated and returned to the user. 1626 */ 1627 static int proc_do_uuid(const struct ctl_table *table, int write, void *buf, 1628 size_t *lenp, loff_t *ppos) 1629 { 1630 u8 tmp_uuid[UUID_SIZE], *uuid; 1631 char uuid_string[UUID_STRING_LEN + 1]; 1632 struct ctl_table fake_table = { 1633 .data = uuid_string, 1634 .maxlen = UUID_STRING_LEN 1635 }; 1636 1637 if (write) 1638 return -EPERM; 1639 1640 uuid = table->data; 1641 if (!uuid) { 1642 uuid = tmp_uuid; 1643 generate_random_uuid(uuid); 1644 } else { 1645 static DEFINE_SPINLOCK(bootid_spinlock); 1646 1647 spin_lock(&bootid_spinlock); 1648 if (!uuid[8]) 1649 generate_random_uuid(uuid); 1650 spin_unlock(&bootid_spinlock); 1651 } 1652 1653 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); 1654 return proc_dostring(&fake_table, 0, buf, lenp, ppos); 1655 } 1656 1657 /* The same as proc_dointvec, but writes don't change anything. */ 1658 static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf, 1659 size_t *lenp, loff_t *ppos) 1660 { 1661 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); 1662 } 1663 1664 static const struct ctl_table random_table[] = { 1665 { 1666 .procname = "poolsize", 1667 .data = &sysctl_poolsize, 1668 .maxlen = sizeof(int), 1669 .mode = 0444, 1670 .proc_handler = proc_dointvec, 1671 }, 1672 { 1673 .procname = "entropy_avail", 1674 .data = &input_pool.init_bits, 1675 .maxlen = sizeof(int), 1676 .mode = 0444, 1677 .proc_handler = proc_dointvec, 1678 }, 1679 { 1680 .procname = "write_wakeup_threshold", 1681 .data = &sysctl_random_write_wakeup_bits, 1682 .maxlen = sizeof(int), 1683 .mode = 0644, 1684 .proc_handler = proc_do_rointvec, 1685 }, 1686 { 1687 .procname = "urandom_min_reseed_secs", 1688 .data = &sysctl_random_min_urandom_seed, 1689 .maxlen = sizeof(int), 1690 .mode = 0644, 1691 .proc_handler = proc_do_rointvec, 1692 }, 1693 { 1694 .procname = "boot_id", 1695 .data = &sysctl_bootid, 1696 .mode = 0444, 1697 .proc_handler = proc_do_uuid, 1698 }, 1699 { 1700 .procname = "uuid", 1701 .mode = 0444, 1702 .proc_handler = proc_do_uuid, 1703 }, 1704 }; 1705 1706 /* 1707 * random_init() is called before sysctl_init(), 1708 * so we cannot call register_sysctl_init() in random_init() 1709 */ 1710 static int __init random_sysctls_init(void) 1711 { 1712 register_sysctl_init("kernel/random", random_table); 1713 return 0; 1714 } 1715 device_initcall(random_sysctls_init); 1716 #endif 1717