1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. 6 * 7 * This driver produces cryptographically secure pseudorandom data. It is divided 8 * into roughly six sections, each with a section header: 9 * 10 * - Initialization and readiness waiting. 11 * - Fast key erasure RNG, the "crng". 12 * - Entropy accumulation and extraction routines. 13 * - Entropy collection routines. 14 * - Userspace reader/writer interfaces. 15 * - Sysctl interface. 16 * 17 * The high level overview is that there is one input pool, into which 18 * various pieces of data are hashed. Some of that data is then "credited" as 19 * having a certain number of bits of entropy. When enough bits of entropy are 20 * available, the hash is finalized and handed as a key to a stream cipher that 21 * expands it indefinitely for various consumers. This key is periodically 22 * refreshed as the various entropy collectors, described below, add data to the 23 * input pool and credit it. There is currently no Fortuna-like scheduler 24 * involved, which can lead to malicious entropy sources causing a premature 25 * reseed, and the entropy estimates are, at best, conservative guesses. 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/utsname.h> 31 #include <linux/module.h> 32 #include <linux/kernel.h> 33 #include <linux/major.h> 34 #include <linux/string.h> 35 #include <linux/fcntl.h> 36 #include <linux/slab.h> 37 #include <linux/random.h> 38 #include <linux/poll.h> 39 #include <linux/init.h> 40 #include <linux/fs.h> 41 #include <linux/blkdev.h> 42 #include <linux/interrupt.h> 43 #include <linux/mm.h> 44 #include <linux/nodemask.h> 45 #include <linux/spinlock.h> 46 #include <linux/kthread.h> 47 #include <linux/percpu.h> 48 #include <linux/ptrace.h> 49 #include <linux/workqueue.h> 50 #include <linux/irq.h> 51 #include <linux/ratelimit.h> 52 #include <linux/syscalls.h> 53 #include <linux/completion.h> 54 #include <linux/uuid.h> 55 #include <linux/uaccess.h> 56 #include <crypto/chacha.h> 57 #include <crypto/blake2s.h> 58 #include <asm/processor.h> 59 #include <asm/irq.h> 60 #include <asm/irq_regs.h> 61 #include <asm/io.h> 62 63 /********************************************************************* 64 * 65 * Initialization and readiness waiting. 66 * 67 * Much of the RNG infrastructure is devoted to various dependencies 68 * being able to wait until the RNG has collected enough entropy and 69 * is ready for safe consumption. 70 * 71 *********************************************************************/ 72 73 /* 74 * crng_init = 0 --> Uninitialized 75 * 1 --> Initialized 76 * 2 --> Initialized from input_pool 77 * 78 * crng_init is protected by base_crng->lock, and only increases 79 * its value (from 0->1->2). 80 */ 81 static int crng_init = 0; 82 #define crng_ready() (likely(crng_init > 1)) 83 /* Various types of waiters for crng_init->2 transition. */ 84 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); 85 static struct fasync_struct *fasync; 86 static DEFINE_SPINLOCK(random_ready_chain_lock); 87 static RAW_NOTIFIER_HEAD(random_ready_chain); 88 89 /* Control how we warn userspace. */ 90 static struct ratelimit_state unseeded_warning = 91 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); 92 static struct ratelimit_state urandom_warning = 93 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); 94 static int ratelimit_disable __read_mostly; 95 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); 96 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); 97 98 /* 99 * Returns whether or not the input pool has been seeded and thus guaranteed 100 * to supply cryptographically secure random numbers. This applies to: the 101 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, 102 * ,u64,int,long} family of functions. 103 * 104 * Returns: true if the input pool has been seeded. 105 * false if the input pool has not been seeded. 106 */ 107 bool rng_is_initialized(void) 108 { 109 return crng_ready(); 110 } 111 EXPORT_SYMBOL(rng_is_initialized); 112 113 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ 114 static void try_to_generate_entropy(void); 115 116 /* 117 * Wait for the input pool to be seeded and thus guaranteed to supply 118 * cryptographically secure random numbers. This applies to: the /dev/urandom 119 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} 120 * family of functions. Using any of these functions without first calling 121 * this function forfeits the guarantee of security. 122 * 123 * Returns: 0 if the input pool has been seeded. 124 * -ERESTARTSYS if the function was interrupted by a signal. 125 */ 126 int wait_for_random_bytes(void) 127 { 128 while (!crng_ready()) { 129 int ret; 130 131 try_to_generate_entropy(); 132 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); 133 if (ret) 134 return ret > 0 ? 0 : ret; 135 } 136 return 0; 137 } 138 EXPORT_SYMBOL(wait_for_random_bytes); 139 140 /* 141 * Add a callback function that will be invoked when the input 142 * pool is initialised. 143 * 144 * returns: 0 if callback is successfully added 145 * -EALREADY if pool is already initialised (callback not called) 146 */ 147 int register_random_ready_notifier(struct notifier_block *nb) 148 { 149 unsigned long flags; 150 int ret = -EALREADY; 151 152 if (crng_ready()) 153 return ret; 154 155 spin_lock_irqsave(&random_ready_chain_lock, flags); 156 if (!crng_ready()) 157 ret = raw_notifier_chain_register(&random_ready_chain, nb); 158 spin_unlock_irqrestore(&random_ready_chain_lock, flags); 159 return ret; 160 } 161 162 /* 163 * Delete a previously registered readiness callback function. 164 */ 165 int unregister_random_ready_notifier(struct notifier_block *nb) 166 { 167 unsigned long flags; 168 int ret; 169 170 spin_lock_irqsave(&random_ready_chain_lock, flags); 171 ret = raw_notifier_chain_unregister(&random_ready_chain, nb); 172 spin_unlock_irqrestore(&random_ready_chain_lock, flags); 173 return ret; 174 } 175 176 static void process_random_ready_list(void) 177 { 178 unsigned long flags; 179 180 spin_lock_irqsave(&random_ready_chain_lock, flags); 181 raw_notifier_call_chain(&random_ready_chain, 0, NULL); 182 spin_unlock_irqrestore(&random_ready_chain_lock, flags); 183 } 184 185 #define warn_unseeded_randomness(previous) \ 186 _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) 187 188 static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) 189 { 190 #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM 191 const bool print_once = false; 192 #else 193 static bool print_once __read_mostly; 194 #endif 195 196 if (print_once || crng_ready() || 197 (previous && (caller == READ_ONCE(*previous)))) 198 return; 199 WRITE_ONCE(*previous, caller); 200 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 201 print_once = true; 202 #endif 203 if (__ratelimit(&unseeded_warning)) 204 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", 205 func_name, caller, crng_init); 206 } 207 208 209 /********************************************************************* 210 * 211 * Fast key erasure RNG, the "crng". 212 * 213 * These functions expand entropy from the entropy extractor into 214 * long streams for external consumption using the "fast key erasure" 215 * RNG described at <https://blog.cr.yp.to/20170723-random.html>. 216 * 217 * There are a few exported interfaces for use by other drivers: 218 * 219 * void get_random_bytes(void *buf, size_t nbytes) 220 * u32 get_random_u32() 221 * u64 get_random_u64() 222 * unsigned int get_random_int() 223 * unsigned long get_random_long() 224 * 225 * These interfaces will return the requested number of random bytes 226 * into the given buffer or as a return value. This is equivalent to 227 * a read from /dev/urandom. The u32, u64, int, and long family of 228 * functions may be higher performance for one-off random integers, 229 * because they do a bit of buffering and do not invoke reseeding 230 * until the buffer is emptied. 231 * 232 *********************************************************************/ 233 234 enum { 235 CRNG_RESEED_INTERVAL = 300 * HZ, 236 CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE 237 }; 238 239 static struct { 240 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); 241 unsigned long birth; 242 unsigned long generation; 243 spinlock_t lock; 244 } base_crng = { 245 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) 246 }; 247 248 struct crng { 249 u8 key[CHACHA_KEY_SIZE]; 250 unsigned long generation; 251 local_lock_t lock; 252 }; 253 254 static DEFINE_PER_CPU(struct crng, crngs) = { 255 .generation = ULONG_MAX, 256 .lock = INIT_LOCAL_LOCK(crngs.lock), 257 }; 258 259 /* Used by crng_reseed() to extract a new seed from the input pool. */ 260 static bool drain_entropy(void *buf, size_t nbytes, bool force); 261 262 /* 263 * This extracts a new crng key from the input pool, but only if there is a 264 * sufficient amount of entropy available or force is true, in order to 265 * mitigate bruteforcing of newly added bits. 266 */ 267 static void crng_reseed(bool force) 268 { 269 unsigned long flags; 270 unsigned long next_gen; 271 u8 key[CHACHA_KEY_SIZE]; 272 bool finalize_init = false; 273 274 /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ 275 if (!drain_entropy(key, sizeof(key), force)) 276 return; 277 278 /* 279 * We copy the new key into the base_crng, overwriting the old one, 280 * and update the generation counter. We avoid hitting ULONG_MAX, 281 * because the per-cpu crngs are initialized to ULONG_MAX, so this 282 * forces new CPUs that come online to always initialize. 283 */ 284 spin_lock_irqsave(&base_crng.lock, flags); 285 memcpy(base_crng.key, key, sizeof(base_crng.key)); 286 next_gen = base_crng.generation + 1; 287 if (next_gen == ULONG_MAX) 288 ++next_gen; 289 WRITE_ONCE(base_crng.generation, next_gen); 290 WRITE_ONCE(base_crng.birth, jiffies); 291 if (!crng_ready()) { 292 crng_init = 2; 293 finalize_init = true; 294 } 295 spin_unlock_irqrestore(&base_crng.lock, flags); 296 memzero_explicit(key, sizeof(key)); 297 if (finalize_init) { 298 process_random_ready_list(); 299 wake_up_interruptible(&crng_init_wait); 300 kill_fasync(&fasync, SIGIO, POLL_IN); 301 pr_notice("crng init done\n"); 302 if (unseeded_warning.missed) { 303 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", 304 unseeded_warning.missed); 305 unseeded_warning.missed = 0; 306 } 307 if (urandom_warning.missed) { 308 pr_notice("%d urandom warning(s) missed due to ratelimiting\n", 309 urandom_warning.missed); 310 urandom_warning.missed = 0; 311 } 312 } 313 } 314 315 /* 316 * This generates a ChaCha block using the provided key, and then 317 * immediately overwites that key with half the block. It returns 318 * the resultant ChaCha state to the user, along with the second 319 * half of the block containing 32 bytes of random data that may 320 * be used; random_data_len may not be greater than 32. 321 * 322 * The returned ChaCha state contains within it a copy of the old 323 * key value, at index 4, so the state should always be zeroed out 324 * immediately after using in order to maintain forward secrecy. 325 * If the state cannot be erased in a timely manner, then it is 326 * safer to set the random_data parameter to &chacha_state[4] so 327 * that this function overwrites it before returning. 328 */ 329 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], 330 u32 chacha_state[CHACHA_STATE_WORDS], 331 u8 *random_data, size_t random_data_len) 332 { 333 u8 first_block[CHACHA_BLOCK_SIZE]; 334 335 BUG_ON(random_data_len > 32); 336 337 chacha_init_consts(chacha_state); 338 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); 339 memset(&chacha_state[12], 0, sizeof(u32) * 4); 340 chacha20_block(chacha_state, first_block); 341 342 memcpy(key, first_block, CHACHA_KEY_SIZE); 343 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); 344 memzero_explicit(first_block, sizeof(first_block)); 345 } 346 347 /* 348 * Return whether the crng seed is considered to be sufficiently 349 * old that a reseeding might be attempted. This happens if the last 350 * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at 351 * an interval proportional to the uptime. 352 */ 353 static bool crng_has_old_seed(void) 354 { 355 static bool early_boot = true; 356 unsigned long interval = CRNG_RESEED_INTERVAL; 357 358 if (unlikely(READ_ONCE(early_boot))) { 359 time64_t uptime = ktime_get_seconds(); 360 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) 361 WRITE_ONCE(early_boot, false); 362 else 363 interval = max_t(unsigned int, 5 * HZ, 364 (unsigned int)uptime / 2 * HZ); 365 } 366 return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); 367 } 368 369 /* 370 * This function returns a ChaCha state that you may use for generating 371 * random data. It also returns up to 32 bytes on its own of random data 372 * that may be used; random_data_len may not be greater than 32. 373 */ 374 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], 375 u8 *random_data, size_t random_data_len) 376 { 377 unsigned long flags; 378 struct crng *crng; 379 380 BUG_ON(random_data_len > 32); 381 382 /* 383 * For the fast path, we check whether we're ready, unlocked first, and 384 * then re-check once locked later. In the case where we're really not 385 * ready, we do fast key erasure with the base_crng directly, because 386 * this is what crng_pre_init_inject() mutates during early init. 387 */ 388 if (!crng_ready()) { 389 bool ready; 390 391 spin_lock_irqsave(&base_crng.lock, flags); 392 ready = crng_ready(); 393 if (!ready) 394 crng_fast_key_erasure(base_crng.key, chacha_state, 395 random_data, random_data_len); 396 spin_unlock_irqrestore(&base_crng.lock, flags); 397 if (!ready) 398 return; 399 } 400 401 /* 402 * If the base_crng is old enough, we try to reseed, which in turn 403 * bumps the generation counter that we check below. 404 */ 405 if (unlikely(crng_has_old_seed())) 406 crng_reseed(false); 407 408 local_lock_irqsave(&crngs.lock, flags); 409 crng = raw_cpu_ptr(&crngs); 410 411 /* 412 * If our per-cpu crng is older than the base_crng, then it means 413 * somebody reseeded the base_crng. In that case, we do fast key 414 * erasure on the base_crng, and use its output as the new key 415 * for our per-cpu crng. This brings us up to date with base_crng. 416 */ 417 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { 418 spin_lock(&base_crng.lock); 419 crng_fast_key_erasure(base_crng.key, chacha_state, 420 crng->key, sizeof(crng->key)); 421 crng->generation = base_crng.generation; 422 spin_unlock(&base_crng.lock); 423 } 424 425 /* 426 * Finally, when we've made it this far, our per-cpu crng has an up 427 * to date key, and we can do fast key erasure with it to produce 428 * some random data and a ChaCha state for the caller. All other 429 * branches of this function are "unlikely", so most of the time we 430 * should wind up here immediately. 431 */ 432 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); 433 local_unlock_irqrestore(&crngs.lock, flags); 434 } 435 436 /* 437 * This function is for crng_init == 0 only. It loads entropy directly 438 * into the crng's key, without going through the input pool. It is, 439 * generally speaking, not very safe, but we use this only at early 440 * boot time when it's better to have something there rather than 441 * nothing. 442 * 443 * If account is set, then the crng_init_cnt counter is incremented. 444 * This shouldn't be set by functions like add_device_randomness(), 445 * where we can't trust the buffer passed to it is guaranteed to be 446 * unpredictable (so it might not have any entropy at all). 447 */ 448 static void crng_pre_init_inject(const void *input, size_t len, bool account) 449 { 450 static int crng_init_cnt = 0; 451 struct blake2s_state hash; 452 unsigned long flags; 453 454 blake2s_init(&hash, sizeof(base_crng.key)); 455 456 spin_lock_irqsave(&base_crng.lock, flags); 457 if (crng_init != 0) { 458 spin_unlock_irqrestore(&base_crng.lock, flags); 459 return; 460 } 461 462 blake2s_update(&hash, base_crng.key, sizeof(base_crng.key)); 463 blake2s_update(&hash, input, len); 464 blake2s_final(&hash, base_crng.key); 465 466 if (account) { 467 crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); 468 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 469 ++base_crng.generation; 470 crng_init = 1; 471 } 472 } 473 474 spin_unlock_irqrestore(&base_crng.lock, flags); 475 476 if (crng_init == 1) 477 pr_notice("fast init done\n"); 478 } 479 480 static void _get_random_bytes(void *buf, size_t nbytes) 481 { 482 u32 chacha_state[CHACHA_STATE_WORDS]; 483 u8 tmp[CHACHA_BLOCK_SIZE]; 484 size_t len; 485 486 if (!nbytes) 487 return; 488 489 len = min_t(size_t, 32, nbytes); 490 crng_make_state(chacha_state, buf, len); 491 nbytes -= len; 492 buf += len; 493 494 while (nbytes) { 495 if (nbytes < CHACHA_BLOCK_SIZE) { 496 chacha20_block(chacha_state, tmp); 497 memcpy(buf, tmp, nbytes); 498 memzero_explicit(tmp, sizeof(tmp)); 499 break; 500 } 501 502 chacha20_block(chacha_state, buf); 503 if (unlikely(chacha_state[12] == 0)) 504 ++chacha_state[13]; 505 nbytes -= CHACHA_BLOCK_SIZE; 506 buf += CHACHA_BLOCK_SIZE; 507 } 508 509 memzero_explicit(chacha_state, sizeof(chacha_state)); 510 } 511 512 /* 513 * This function is the exported kernel interface. It returns some 514 * number of good random numbers, suitable for key generation, seeding 515 * TCP sequence numbers, etc. It does not rely on the hardware random 516 * number generator. For random bytes direct from the hardware RNG 517 * (when available), use get_random_bytes_arch(). In order to ensure 518 * that the randomness provided by this function is okay, the function 519 * wait_for_random_bytes() should be called and return 0 at least once 520 * at any point prior. 521 */ 522 void get_random_bytes(void *buf, size_t nbytes) 523 { 524 static void *previous; 525 526 warn_unseeded_randomness(&previous); 527 _get_random_bytes(buf, nbytes); 528 } 529 EXPORT_SYMBOL(get_random_bytes); 530 531 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) 532 { 533 size_t len, left, ret = 0; 534 u32 chacha_state[CHACHA_STATE_WORDS]; 535 u8 output[CHACHA_BLOCK_SIZE]; 536 537 if (!nbytes) 538 return 0; 539 540 /* 541 * Immediately overwrite the ChaCha key at index 4 with random 542 * bytes, in case userspace causes copy_to_user() below to sleep 543 * forever, so that we still retain forward secrecy in that case. 544 */ 545 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); 546 /* 547 * However, if we're doing a read of len <= 32, we don't need to 548 * use chacha_state after, so we can simply return those bytes to 549 * the user directly. 550 */ 551 if (nbytes <= CHACHA_KEY_SIZE) { 552 ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes); 553 goto out_zero_chacha; 554 } 555 556 for (;;) { 557 chacha20_block(chacha_state, output); 558 if (unlikely(chacha_state[12] == 0)) 559 ++chacha_state[13]; 560 561 len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); 562 left = copy_to_user(buf, output, len); 563 if (left) { 564 ret += len - left; 565 break; 566 } 567 568 buf += len; 569 ret += len; 570 nbytes -= len; 571 if (!nbytes) 572 break; 573 574 BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0); 575 if (ret % PAGE_SIZE == 0) { 576 if (signal_pending(current)) 577 break; 578 cond_resched(); 579 } 580 } 581 582 memzero_explicit(output, sizeof(output)); 583 out_zero_chacha: 584 memzero_explicit(chacha_state, sizeof(chacha_state)); 585 return ret ? ret : -EFAULT; 586 } 587 588 /* 589 * Batched entropy returns random integers. The quality of the random 590 * number is good as /dev/urandom. In order to ensure that the randomness 591 * provided by this function is okay, the function wait_for_random_bytes() 592 * should be called and return 0 at least once at any point prior. 593 */ 594 struct batched_entropy { 595 union { 596 /* 597 * We make this 1.5x a ChaCha block, so that we get the 598 * remaining 32 bytes from fast key erasure, plus one full 599 * block from the detached ChaCha state. We can increase 600 * the size of this later if needed so long as we keep the 601 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. 602 */ 603 u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))]; 604 u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))]; 605 }; 606 local_lock_t lock; 607 unsigned long generation; 608 unsigned int position; 609 }; 610 611 612 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { 613 .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock), 614 .position = UINT_MAX 615 }; 616 617 u64 get_random_u64(void) 618 { 619 u64 ret; 620 unsigned long flags; 621 struct batched_entropy *batch; 622 static void *previous; 623 unsigned long next_gen; 624 625 warn_unseeded_randomness(&previous); 626 627 local_lock_irqsave(&batched_entropy_u64.lock, flags); 628 batch = raw_cpu_ptr(&batched_entropy_u64); 629 630 next_gen = READ_ONCE(base_crng.generation); 631 if (batch->position >= ARRAY_SIZE(batch->entropy_u64) || 632 next_gen != batch->generation) { 633 _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64)); 634 batch->position = 0; 635 batch->generation = next_gen; 636 } 637 638 ret = batch->entropy_u64[batch->position]; 639 batch->entropy_u64[batch->position] = 0; 640 ++batch->position; 641 local_unlock_irqrestore(&batched_entropy_u64.lock, flags); 642 return ret; 643 } 644 EXPORT_SYMBOL(get_random_u64); 645 646 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { 647 .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock), 648 .position = UINT_MAX 649 }; 650 651 u32 get_random_u32(void) 652 { 653 u32 ret; 654 unsigned long flags; 655 struct batched_entropy *batch; 656 static void *previous; 657 unsigned long next_gen; 658 659 warn_unseeded_randomness(&previous); 660 661 local_lock_irqsave(&batched_entropy_u32.lock, flags); 662 batch = raw_cpu_ptr(&batched_entropy_u32); 663 664 next_gen = READ_ONCE(base_crng.generation); 665 if (batch->position >= ARRAY_SIZE(batch->entropy_u32) || 666 next_gen != batch->generation) { 667 _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32)); 668 batch->position = 0; 669 batch->generation = next_gen; 670 } 671 672 ret = batch->entropy_u32[batch->position]; 673 batch->entropy_u32[batch->position] = 0; 674 ++batch->position; 675 local_unlock_irqrestore(&batched_entropy_u32.lock, flags); 676 return ret; 677 } 678 EXPORT_SYMBOL(get_random_u32); 679 680 #ifdef CONFIG_SMP 681 /* 682 * This function is called when the CPU is coming up, with entry 683 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. 684 */ 685 int random_prepare_cpu(unsigned int cpu) 686 { 687 /* 688 * When the cpu comes back online, immediately invalidate both 689 * the per-cpu crng and all batches, so that we serve fresh 690 * randomness. 691 */ 692 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; 693 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; 694 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; 695 return 0; 696 } 697 #endif 698 699 /** 700 * randomize_page - Generate a random, page aligned address 701 * @start: The smallest acceptable address the caller will take. 702 * @range: The size of the area, starting at @start, within which the 703 * random address must fall. 704 * 705 * If @start + @range would overflow, @range is capped. 706 * 707 * NOTE: Historical use of randomize_range, which this replaces, presumed that 708 * @start was already page aligned. We now align it regardless. 709 * 710 * Return: A page aligned address within [start, start + range). On error, 711 * @start is returned. 712 */ 713 unsigned long randomize_page(unsigned long start, unsigned long range) 714 { 715 if (!PAGE_ALIGNED(start)) { 716 range -= PAGE_ALIGN(start) - start; 717 start = PAGE_ALIGN(start); 718 } 719 720 if (start > ULONG_MAX - range) 721 range = ULONG_MAX - start; 722 723 range >>= PAGE_SHIFT; 724 725 if (range == 0) 726 return start; 727 728 return start + (get_random_long() % range << PAGE_SHIFT); 729 } 730 731 /* 732 * This function will use the architecture-specific hardware random 733 * number generator if it is available. It is not recommended for 734 * use. Use get_random_bytes() instead. It returns the number of 735 * bytes filled in. 736 */ 737 size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) 738 { 739 size_t left = nbytes; 740 u8 *p = buf; 741 742 while (left) { 743 unsigned long v; 744 size_t chunk = min_t(size_t, left, sizeof(unsigned long)); 745 746 if (!arch_get_random_long(&v)) 747 break; 748 749 memcpy(p, &v, chunk); 750 p += chunk; 751 left -= chunk; 752 } 753 754 return nbytes - left; 755 } 756 EXPORT_SYMBOL(get_random_bytes_arch); 757 758 759 /********************************************************************** 760 * 761 * Entropy accumulation and extraction routines. 762 * 763 * Callers may add entropy via: 764 * 765 * static void mix_pool_bytes(const void *in, size_t nbytes) 766 * 767 * After which, if added entropy should be credited: 768 * 769 * static void credit_entropy_bits(size_t nbits) 770 * 771 * Finally, extract entropy via these two, with the latter one 772 * setting the entropy count to zero and extracting only if there 773 * is POOL_MIN_BITS entropy credited prior or force is true: 774 * 775 * static void extract_entropy(void *buf, size_t nbytes) 776 * static bool drain_entropy(void *buf, size_t nbytes, bool force) 777 * 778 **********************************************************************/ 779 780 enum { 781 POOL_BITS = BLAKE2S_HASH_SIZE * 8, 782 POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */ 783 }; 784 785 /* For notifying userspace should write into /dev/random. */ 786 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 787 788 static struct { 789 struct blake2s_state hash; 790 spinlock_t lock; 791 unsigned int entropy_count; 792 } input_pool = { 793 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), 794 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, 795 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, 796 .hash.outlen = BLAKE2S_HASH_SIZE, 797 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), 798 }; 799 800 static void _mix_pool_bytes(const void *in, size_t nbytes) 801 { 802 blake2s_update(&input_pool.hash, in, nbytes); 803 } 804 805 /* 806 * This function adds bytes into the entropy "pool". It does not 807 * update the entropy estimate. The caller should call 808 * credit_entropy_bits if this is appropriate. 809 */ 810 static void mix_pool_bytes(const void *in, size_t nbytes) 811 { 812 unsigned long flags; 813 814 spin_lock_irqsave(&input_pool.lock, flags); 815 _mix_pool_bytes(in, nbytes); 816 spin_unlock_irqrestore(&input_pool.lock, flags); 817 } 818 819 static void credit_entropy_bits(size_t nbits) 820 { 821 unsigned int entropy_count, orig, add; 822 823 if (!nbits) 824 return; 825 826 add = min_t(size_t, nbits, POOL_BITS); 827 828 do { 829 orig = READ_ONCE(input_pool.entropy_count); 830 entropy_count = min_t(unsigned int, POOL_BITS, orig + add); 831 } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); 832 833 if (!crng_ready() && entropy_count >= POOL_MIN_BITS) 834 crng_reseed(false); 835 } 836 837 /* 838 * This is an HKDF-like construction for using the hashed collected entropy 839 * as a PRF key, that's then expanded block-by-block. 840 */ 841 static void extract_entropy(void *buf, size_t nbytes) 842 { 843 unsigned long flags; 844 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; 845 struct { 846 unsigned long rdseed[32 / sizeof(long)]; 847 size_t counter; 848 } block; 849 size_t i; 850 851 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { 852 if (!arch_get_random_seed_long(&block.rdseed[i]) && 853 !arch_get_random_long(&block.rdseed[i])) 854 block.rdseed[i] = random_get_entropy(); 855 } 856 857 spin_lock_irqsave(&input_pool.lock, flags); 858 859 /* seed = HASHPRF(last_key, entropy_input) */ 860 blake2s_final(&input_pool.hash, seed); 861 862 /* next_key = HASHPRF(seed, RDSEED || 0) */ 863 block.counter = 0; 864 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); 865 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); 866 867 spin_unlock_irqrestore(&input_pool.lock, flags); 868 memzero_explicit(next_key, sizeof(next_key)); 869 870 while (nbytes) { 871 i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); 872 /* output = HASHPRF(seed, RDSEED || ++counter) */ 873 ++block.counter; 874 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); 875 nbytes -= i; 876 buf += i; 877 } 878 879 memzero_explicit(seed, sizeof(seed)); 880 memzero_explicit(&block, sizeof(block)); 881 } 882 883 /* 884 * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force 885 * is true, and then we set the entropy count to zero (but don't actually touch 886 * any data). Only then can we extract a new key with extract_entropy(). 887 */ 888 static bool drain_entropy(void *buf, size_t nbytes, bool force) 889 { 890 unsigned int entropy_count; 891 do { 892 entropy_count = READ_ONCE(input_pool.entropy_count); 893 if (!force && entropy_count < POOL_MIN_BITS) 894 return false; 895 } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); 896 extract_entropy(buf, nbytes); 897 wake_up_interruptible(&random_write_wait); 898 kill_fasync(&fasync, SIGIO, POLL_OUT); 899 return true; 900 } 901 902 903 /********************************************************************** 904 * 905 * Entropy collection routines. 906 * 907 * The following exported functions are used for pushing entropy into 908 * the above entropy accumulation routines: 909 * 910 * void add_device_randomness(const void *buf, size_t size); 911 * void add_input_randomness(unsigned int type, unsigned int code, 912 * unsigned int value); 913 * void add_disk_randomness(struct gendisk *disk); 914 * void add_hwgenerator_randomness(const void *buffer, size_t count, 915 * size_t entropy); 916 * void add_bootloader_randomness(const void *buf, size_t size); 917 * void add_vmfork_randomness(const void *unique_vm_id, size_t size); 918 * void add_interrupt_randomness(int irq); 919 * 920 * add_device_randomness() adds data to the input pool that 921 * is likely to differ between two devices (or possibly even per boot). 922 * This would be things like MAC addresses or serial numbers, or the 923 * read-out of the RTC. This does *not* credit any actual entropy to 924 * the pool, but it initializes the pool to different values for devices 925 * that might otherwise be identical and have very little entropy 926 * available to them (particularly common in the embedded world). 927 * 928 * add_input_randomness() uses the input layer interrupt timing, as well 929 * as the event type information from the hardware. 930 * 931 * add_disk_randomness() uses what amounts to the seek time of block 932 * layer request events, on a per-disk_devt basis, as input to the 933 * entropy pool. Note that high-speed solid state drives with very low 934 * seek times do not make for good sources of entropy, as their seek 935 * times are usually fairly consistent. 936 * 937 * The above two routines try to estimate how many bits of entropy 938 * to credit. They do this by keeping track of the first and second 939 * order deltas of the event timings. 940 * 941 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit 942 * entropy as specified by the caller. If the entropy pool is full it will 943 * block until more entropy is needed. 944 * 945 * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or 946 * add_device_randomness(), depending on whether or not the configuration 947 * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. 948 * 949 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID 950 * representing the current instance of a VM to the pool, without crediting, 951 * and then force-reseeds the crng so that it takes effect immediately. 952 * 953 * add_interrupt_randomness() uses the interrupt timing as random 954 * inputs to the entropy pool. Using the cycle counters and the irq source 955 * as inputs, it feeds the input pool roughly once a second or after 64 956 * interrupts, crediting 1 bit of entropy for whichever comes first. 957 * 958 **********************************************************************/ 959 960 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); 961 static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); 962 static int __init parse_trust_cpu(char *arg) 963 { 964 return kstrtobool(arg, &trust_cpu); 965 } 966 static int __init parse_trust_bootloader(char *arg) 967 { 968 return kstrtobool(arg, &trust_bootloader); 969 } 970 early_param("random.trust_cpu", parse_trust_cpu); 971 early_param("random.trust_bootloader", parse_trust_bootloader); 972 973 /* 974 * The first collection of entropy occurs at system boot while interrupts 975 * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). 976 * Depending on the above configuration knob, RDSEED may be considered 977 * sufficient for initialization. Note that much earlier setup may already 978 * have pushed entropy into the input pool by the time we get here. 979 */ 980 int __init rand_initialize(void) 981 { 982 size_t i; 983 ktime_t now = ktime_get_real(); 984 bool arch_init = true; 985 unsigned long rv; 986 987 #if defined(LATENT_ENTROPY_PLUGIN) 988 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; 989 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); 990 #endif 991 992 for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { 993 if (!arch_get_random_seed_long_early(&rv) && 994 !arch_get_random_long_early(&rv)) { 995 rv = random_get_entropy(); 996 arch_init = false; 997 } 998 _mix_pool_bytes(&rv, sizeof(rv)); 999 } 1000 _mix_pool_bytes(&now, sizeof(now)); 1001 _mix_pool_bytes(utsname(), sizeof(*(utsname()))); 1002 1003 extract_entropy(base_crng.key, sizeof(base_crng.key)); 1004 ++base_crng.generation; 1005 1006 if (arch_init && trust_cpu && !crng_ready()) { 1007 crng_init = 2; 1008 pr_notice("crng init done (trusting CPU's manufacturer)\n"); 1009 } 1010 1011 if (ratelimit_disable) { 1012 urandom_warning.interval = 0; 1013 unseeded_warning.interval = 0; 1014 } 1015 return 0; 1016 } 1017 1018 /* 1019 * Add device- or boot-specific data to the input pool to help 1020 * initialize it. 1021 * 1022 * None of this adds any entropy; it is meant to avoid the problem of 1023 * the entropy pool having similar initial state across largely 1024 * identical devices. 1025 */ 1026 void add_device_randomness(const void *buf, size_t size) 1027 { 1028 unsigned long cycles = random_get_entropy(); 1029 unsigned long flags, now = jiffies; 1030 1031 if (crng_init == 0 && size) 1032 crng_pre_init_inject(buf, size, false); 1033 1034 spin_lock_irqsave(&input_pool.lock, flags); 1035 _mix_pool_bytes(&cycles, sizeof(cycles)); 1036 _mix_pool_bytes(&now, sizeof(now)); 1037 _mix_pool_bytes(buf, size); 1038 spin_unlock_irqrestore(&input_pool.lock, flags); 1039 } 1040 EXPORT_SYMBOL(add_device_randomness); 1041 1042 /* There is one of these per entropy source */ 1043 struct timer_rand_state { 1044 unsigned long last_time; 1045 long last_delta, last_delta2; 1046 }; 1047 1048 /* 1049 * This function adds entropy to the entropy "pool" by using timing 1050 * delays. It uses the timer_rand_state structure to make an estimate 1051 * of how many bits of entropy this call has added to the pool. 1052 * 1053 * The number "num" is also added to the pool - it should somehow describe 1054 * the type of event which just happened. This is currently 0-255 for 1055 * keyboard scan codes, and 256 upwards for interrupts. 1056 */ 1057 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) 1058 { 1059 unsigned long cycles = random_get_entropy(), now = jiffies, flags; 1060 long delta, delta2, delta3; 1061 1062 spin_lock_irqsave(&input_pool.lock, flags); 1063 _mix_pool_bytes(&cycles, sizeof(cycles)); 1064 _mix_pool_bytes(&now, sizeof(now)); 1065 _mix_pool_bytes(&num, sizeof(num)); 1066 spin_unlock_irqrestore(&input_pool.lock, flags); 1067 1068 /* 1069 * Calculate number of bits of randomness we probably added. 1070 * We take into account the first, second and third-order deltas 1071 * in order to make our estimate. 1072 */ 1073 delta = now - READ_ONCE(state->last_time); 1074 WRITE_ONCE(state->last_time, now); 1075 1076 delta2 = delta - READ_ONCE(state->last_delta); 1077 WRITE_ONCE(state->last_delta, delta); 1078 1079 delta3 = delta2 - READ_ONCE(state->last_delta2); 1080 WRITE_ONCE(state->last_delta2, delta2); 1081 1082 if (delta < 0) 1083 delta = -delta; 1084 if (delta2 < 0) 1085 delta2 = -delta2; 1086 if (delta3 < 0) 1087 delta3 = -delta3; 1088 if (delta > delta2) 1089 delta = delta2; 1090 if (delta > delta3) 1091 delta = delta3; 1092 1093 /* 1094 * delta is now minimum absolute delta. 1095 * Round down by 1 bit on general principles, 1096 * and limit entropy estimate to 12 bits. 1097 */ 1098 credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11)); 1099 } 1100 1101 void add_input_randomness(unsigned int type, unsigned int code, 1102 unsigned int value) 1103 { 1104 static unsigned char last_value; 1105 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; 1106 1107 /* Ignore autorepeat and the like. */ 1108 if (value == last_value) 1109 return; 1110 1111 last_value = value; 1112 add_timer_randomness(&input_timer_state, 1113 (type << 4) ^ code ^ (code >> 4) ^ value); 1114 } 1115 EXPORT_SYMBOL_GPL(add_input_randomness); 1116 1117 #ifdef CONFIG_BLOCK 1118 void add_disk_randomness(struct gendisk *disk) 1119 { 1120 if (!disk || !disk->random) 1121 return; 1122 /* First major is 1, so we get >= 0x200 here. */ 1123 add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); 1124 } 1125 EXPORT_SYMBOL_GPL(add_disk_randomness); 1126 1127 void rand_initialize_disk(struct gendisk *disk) 1128 { 1129 struct timer_rand_state *state; 1130 1131 /* 1132 * If kzalloc returns null, we just won't use that entropy 1133 * source. 1134 */ 1135 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); 1136 if (state) { 1137 state->last_time = INITIAL_JIFFIES; 1138 disk->random = state; 1139 } 1140 } 1141 #endif 1142 1143 /* 1144 * Interface for in-kernel drivers of true hardware RNGs. 1145 * Those devices may produce endless random bits and will be throttled 1146 * when our pool is full. 1147 */ 1148 void add_hwgenerator_randomness(const void *buffer, size_t count, 1149 size_t entropy) 1150 { 1151 if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) { 1152 crng_pre_init_inject(buffer, count, true); 1153 mix_pool_bytes(buffer, count); 1154 return; 1155 } 1156 1157 /* 1158 * Throttle writing if we're above the trickle threshold. 1159 * We'll be woken up again once below POOL_MIN_BITS, when 1160 * the calling thread is about to terminate, or once 1161 * CRNG_RESEED_INTERVAL has elapsed. 1162 */ 1163 wait_event_interruptible_timeout(random_write_wait, 1164 !system_wq || kthread_should_stop() || 1165 input_pool.entropy_count < POOL_MIN_BITS, 1166 CRNG_RESEED_INTERVAL); 1167 mix_pool_bytes(buffer, count); 1168 credit_entropy_bits(entropy); 1169 } 1170 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); 1171 1172 /* 1173 * Handle random seed passed by bootloader. 1174 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise 1175 * it would be regarded as device data. 1176 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. 1177 */ 1178 void add_bootloader_randomness(const void *buf, size_t size) 1179 { 1180 if (trust_bootloader) 1181 add_hwgenerator_randomness(buf, size, size * 8); 1182 else 1183 add_device_randomness(buf, size); 1184 } 1185 EXPORT_SYMBOL_GPL(add_bootloader_randomness); 1186 1187 #if IS_ENABLED(CONFIG_VMGENID) 1188 static BLOCKING_NOTIFIER_HEAD(vmfork_chain); 1189 1190 /* 1191 * Handle a new unique VM ID, which is unique, not secret, so we 1192 * don't credit it, but we do immediately force a reseed after so 1193 * that it's used by the crng posthaste. 1194 */ 1195 void add_vmfork_randomness(const void *unique_vm_id, size_t size) 1196 { 1197 add_device_randomness(unique_vm_id, size); 1198 if (crng_ready()) { 1199 crng_reseed(true); 1200 pr_notice("crng reseeded due to virtual machine fork\n"); 1201 } 1202 blocking_notifier_call_chain(&vmfork_chain, 0, NULL); 1203 } 1204 #if IS_MODULE(CONFIG_VMGENID) 1205 EXPORT_SYMBOL_GPL(add_vmfork_randomness); 1206 #endif 1207 1208 int register_random_vmfork_notifier(struct notifier_block *nb) 1209 { 1210 return blocking_notifier_chain_register(&vmfork_chain, nb); 1211 } 1212 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); 1213 1214 int unregister_random_vmfork_notifier(struct notifier_block *nb) 1215 { 1216 return blocking_notifier_chain_unregister(&vmfork_chain, nb); 1217 } 1218 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); 1219 #endif 1220 1221 struct fast_pool { 1222 struct work_struct mix; 1223 unsigned long pool[4]; 1224 unsigned long last; 1225 unsigned int count; 1226 u16 reg_idx; 1227 }; 1228 1229 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { 1230 #ifdef CONFIG_64BIT 1231 /* SipHash constants */ 1232 .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL, 1233 0x6c7967656e657261UL, 0x7465646279746573UL } 1234 #else 1235 /* HalfSipHash constants */ 1236 .pool = { 0, 0, 0x6c796765U, 0x74656462U } 1237 #endif 1238 }; 1239 1240 /* 1241 * This is [Half]SipHash-1-x, starting from an empty key. Because 1242 * the key is fixed, it assumes that its inputs are non-malicious, 1243 * and therefore this has no security on its own. s represents the 1244 * 128 or 256-bit SipHash state, while v represents a 128-bit input. 1245 */ 1246 static void fast_mix(unsigned long s[4], const unsigned long *v) 1247 { 1248 size_t i; 1249 1250 for (i = 0; i < 16 / sizeof(long); ++i) { 1251 s[3] ^= v[i]; 1252 #ifdef CONFIG_64BIT 1253 s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); 1254 s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; 1255 s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; 1256 s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); 1257 #else 1258 s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); 1259 s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; 1260 s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; 1261 s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); 1262 #endif 1263 s[0] ^= v[i]; 1264 } 1265 } 1266 1267 #ifdef CONFIG_SMP 1268 /* 1269 * This function is called when the CPU has just come online, with 1270 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. 1271 */ 1272 int random_online_cpu(unsigned int cpu) 1273 { 1274 /* 1275 * During CPU shutdown and before CPU onlining, add_interrupt_ 1276 * randomness() may schedule mix_interrupt_randomness(), and 1277 * set the MIX_INFLIGHT flag. However, because the worker can 1278 * be scheduled on a different CPU during this period, that 1279 * flag will never be cleared. For that reason, we zero out 1280 * the flag here, which runs just after workqueues are onlined 1281 * for the CPU again. This also has the effect of setting the 1282 * irq randomness count to zero so that new accumulated irqs 1283 * are fresh. 1284 */ 1285 per_cpu_ptr(&irq_randomness, cpu)->count = 0; 1286 return 0; 1287 } 1288 #endif 1289 1290 static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs) 1291 { 1292 unsigned long *ptr = (unsigned long *)regs; 1293 unsigned int idx; 1294 1295 if (regs == NULL) 1296 return 0; 1297 idx = READ_ONCE(f->reg_idx); 1298 if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long)) 1299 idx = 0; 1300 ptr += idx++; 1301 WRITE_ONCE(f->reg_idx, idx); 1302 return *ptr; 1303 } 1304 1305 static void mix_interrupt_randomness(struct work_struct *work) 1306 { 1307 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); 1308 /* 1309 * The size of the copied stack pool is explicitly 16 bytes so that we 1310 * tax mix_pool_byte()'s compression function the same amount on all 1311 * platforms. This means on 64-bit we copy half the pool into this, 1312 * while on 32-bit we copy all of it. The entropy is supposed to be 1313 * sufficiently dispersed between bits that in the sponge-like 1314 * half case, on average we don't wind up "losing" some. 1315 */ 1316 u8 pool[16]; 1317 1318 /* Check to see if we're running on the wrong CPU due to hotplug. */ 1319 local_irq_disable(); 1320 if (fast_pool != this_cpu_ptr(&irq_randomness)) { 1321 local_irq_enable(); 1322 return; 1323 } 1324 1325 /* 1326 * Copy the pool to the stack so that the mixer always has a 1327 * consistent view, before we reenable irqs again. 1328 */ 1329 memcpy(pool, fast_pool->pool, sizeof(pool)); 1330 fast_pool->count = 0; 1331 fast_pool->last = jiffies; 1332 local_irq_enable(); 1333 1334 if (unlikely(crng_init == 0)) { 1335 crng_pre_init_inject(pool, sizeof(pool), true); 1336 mix_pool_bytes(pool, sizeof(pool)); 1337 } else { 1338 mix_pool_bytes(pool, sizeof(pool)); 1339 credit_entropy_bits(1); 1340 } 1341 1342 memzero_explicit(pool, sizeof(pool)); 1343 } 1344 1345 void add_interrupt_randomness(int irq) 1346 { 1347 enum { MIX_INFLIGHT = 1U << 31 }; 1348 unsigned long cycles = random_get_entropy(), now = jiffies; 1349 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); 1350 struct pt_regs *regs = get_irq_regs(); 1351 unsigned int new_count; 1352 union { 1353 u32 u32[4]; 1354 u64 u64[2]; 1355 unsigned long longs[16 / sizeof(long)]; 1356 } irq_data; 1357 1358 if (cycles == 0) 1359 cycles = get_reg(fast_pool, regs); 1360 1361 if (sizeof(unsigned long) == 8) { 1362 irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; 1363 irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; 1364 } else { 1365 irq_data.u32[0] = cycles ^ irq; 1366 irq_data.u32[1] = now; 1367 irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; 1368 irq_data.u32[3] = get_reg(fast_pool, regs); 1369 } 1370 1371 fast_mix(fast_pool->pool, irq_data.longs); 1372 new_count = ++fast_pool->count; 1373 1374 if (new_count & MIX_INFLIGHT) 1375 return; 1376 1377 if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) || 1378 unlikely(crng_init == 0))) 1379 return; 1380 1381 if (unlikely(!fast_pool->mix.func)) 1382 INIT_WORK(&fast_pool->mix, mix_interrupt_randomness); 1383 fast_pool->count |= MIX_INFLIGHT; 1384 queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix); 1385 } 1386 EXPORT_SYMBOL_GPL(add_interrupt_randomness); 1387 1388 /* 1389 * Each time the timer fires, we expect that we got an unpredictable 1390 * jump in the cycle counter. Even if the timer is running on another 1391 * CPU, the timer activity will be touching the stack of the CPU that is 1392 * generating entropy.. 1393 * 1394 * Note that we don't re-arm the timer in the timer itself - we are 1395 * happy to be scheduled away, since that just makes the load more 1396 * complex, but we do not want the timer to keep ticking unless the 1397 * entropy loop is running. 1398 * 1399 * So the re-arming always happens in the entropy loop itself. 1400 */ 1401 static void entropy_timer(struct timer_list *t) 1402 { 1403 credit_entropy_bits(1); 1404 } 1405 1406 /* 1407 * If we have an actual cycle counter, see if we can 1408 * generate enough entropy with timing noise 1409 */ 1410 static void try_to_generate_entropy(void) 1411 { 1412 struct { 1413 unsigned long cycles; 1414 struct timer_list timer; 1415 } stack; 1416 1417 stack.cycles = random_get_entropy(); 1418 1419 /* Slow counter - or none. Don't even bother */ 1420 if (stack.cycles == random_get_entropy()) 1421 return; 1422 1423 timer_setup_on_stack(&stack.timer, entropy_timer, 0); 1424 while (!crng_ready() && !signal_pending(current)) { 1425 if (!timer_pending(&stack.timer)) 1426 mod_timer(&stack.timer, jiffies + 1); 1427 mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); 1428 schedule(); 1429 stack.cycles = random_get_entropy(); 1430 } 1431 1432 del_timer_sync(&stack.timer); 1433 destroy_timer_on_stack(&stack.timer); 1434 mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); 1435 } 1436 1437 1438 /********************************************************************** 1439 * 1440 * Userspace reader/writer interfaces. 1441 * 1442 * getrandom(2) is the primary modern interface into the RNG and should 1443 * be used in preference to anything else. 1444 * 1445 * Reading from /dev/random has the same functionality as calling 1446 * getrandom(2) with flags=0. In earlier versions, however, it had 1447 * vastly different semantics and should therefore be avoided, to 1448 * prevent backwards compatibility issues. 1449 * 1450 * Reading from /dev/urandom has the same functionality as calling 1451 * getrandom(2) with flags=GRND_INSECURE. Because it does not block 1452 * waiting for the RNG to be ready, it should not be used. 1453 * 1454 * Writing to either /dev/random or /dev/urandom adds entropy to 1455 * the input pool but does not credit it. 1456 * 1457 * Polling on /dev/random indicates when the RNG is initialized, on 1458 * the read side, and when it wants new entropy, on the write side. 1459 * 1460 * Both /dev/random and /dev/urandom have the same set of ioctls for 1461 * adding entropy, getting the entropy count, zeroing the count, and 1462 * reseeding the crng. 1463 * 1464 **********************************************************************/ 1465 1466 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, 1467 flags) 1468 { 1469 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) 1470 return -EINVAL; 1471 1472 /* 1473 * Requesting insecure and blocking randomness at the same time makes 1474 * no sense. 1475 */ 1476 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) 1477 return -EINVAL; 1478 1479 if (count > INT_MAX) 1480 count = INT_MAX; 1481 1482 if (!(flags & GRND_INSECURE) && !crng_ready()) { 1483 int ret; 1484 1485 if (flags & GRND_NONBLOCK) 1486 return -EAGAIN; 1487 ret = wait_for_random_bytes(); 1488 if (unlikely(ret)) 1489 return ret; 1490 } 1491 return get_random_bytes_user(buf, count); 1492 } 1493 1494 static __poll_t random_poll(struct file *file, poll_table *wait) 1495 { 1496 __poll_t mask; 1497 1498 poll_wait(file, &crng_init_wait, wait); 1499 poll_wait(file, &random_write_wait, wait); 1500 mask = 0; 1501 if (crng_ready()) 1502 mask |= EPOLLIN | EPOLLRDNORM; 1503 if (input_pool.entropy_count < POOL_MIN_BITS) 1504 mask |= EPOLLOUT | EPOLLWRNORM; 1505 return mask; 1506 } 1507 1508 static int write_pool(const char __user *ubuf, size_t count) 1509 { 1510 size_t len; 1511 int ret = 0; 1512 u8 block[BLAKE2S_BLOCK_SIZE]; 1513 1514 while (count) { 1515 len = min(count, sizeof(block)); 1516 if (copy_from_user(block, ubuf, len)) { 1517 ret = -EFAULT; 1518 goto out; 1519 } 1520 count -= len; 1521 ubuf += len; 1522 mix_pool_bytes(block, len); 1523 cond_resched(); 1524 } 1525 1526 out: 1527 memzero_explicit(block, sizeof(block)); 1528 return ret; 1529 } 1530 1531 static ssize_t random_write(struct file *file, const char __user *buffer, 1532 size_t count, loff_t *ppos) 1533 { 1534 int ret; 1535 1536 ret = write_pool(buffer, count); 1537 if (ret) 1538 return ret; 1539 1540 return (ssize_t)count; 1541 } 1542 1543 static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, 1544 loff_t *ppos) 1545 { 1546 static int maxwarn = 10; 1547 1548 /* 1549 * Opportunistically attempt to initialize the RNG on platforms that 1550 * have fast cycle counters, but don't (for now) require it to succeed. 1551 */ 1552 if (!crng_ready()) 1553 try_to_generate_entropy(); 1554 1555 if (!crng_ready() && maxwarn > 0) { 1556 maxwarn--; 1557 if (__ratelimit(&urandom_warning)) 1558 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", 1559 current->comm, nbytes); 1560 } 1561 1562 return get_random_bytes_user(buf, nbytes); 1563 } 1564 1565 static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, 1566 loff_t *ppos) 1567 { 1568 int ret; 1569 1570 ret = wait_for_random_bytes(); 1571 if (ret != 0) 1572 return ret; 1573 return get_random_bytes_user(buf, nbytes); 1574 } 1575 1576 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 1577 { 1578 int size, ent_count; 1579 int __user *p = (int __user *)arg; 1580 int retval; 1581 1582 switch (cmd) { 1583 case RNDGETENTCNT: 1584 /* Inherently racy, no point locking. */ 1585 if (put_user(input_pool.entropy_count, p)) 1586 return -EFAULT; 1587 return 0; 1588 case RNDADDTOENTCNT: 1589 if (!capable(CAP_SYS_ADMIN)) 1590 return -EPERM; 1591 if (get_user(ent_count, p)) 1592 return -EFAULT; 1593 if (ent_count < 0) 1594 return -EINVAL; 1595 credit_entropy_bits(ent_count); 1596 return 0; 1597 case RNDADDENTROPY: 1598 if (!capable(CAP_SYS_ADMIN)) 1599 return -EPERM; 1600 if (get_user(ent_count, p++)) 1601 return -EFAULT; 1602 if (ent_count < 0) 1603 return -EINVAL; 1604 if (get_user(size, p++)) 1605 return -EFAULT; 1606 retval = write_pool((const char __user *)p, size); 1607 if (retval < 0) 1608 return retval; 1609 credit_entropy_bits(ent_count); 1610 return 0; 1611 case RNDZAPENTCNT: 1612 case RNDCLEARPOOL: 1613 /* 1614 * Clear the entropy pool counters. We no longer clear 1615 * the entropy pool, as that's silly. 1616 */ 1617 if (!capable(CAP_SYS_ADMIN)) 1618 return -EPERM; 1619 if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) { 1620 wake_up_interruptible(&random_write_wait); 1621 kill_fasync(&fasync, SIGIO, POLL_OUT); 1622 } 1623 return 0; 1624 case RNDRESEEDCRNG: 1625 if (!capable(CAP_SYS_ADMIN)) 1626 return -EPERM; 1627 if (!crng_ready()) 1628 return -ENODATA; 1629 crng_reseed(false); 1630 return 0; 1631 default: 1632 return -EINVAL; 1633 } 1634 } 1635 1636 static int random_fasync(int fd, struct file *filp, int on) 1637 { 1638 return fasync_helper(fd, filp, on, &fasync); 1639 } 1640 1641 const struct file_operations random_fops = { 1642 .read = random_read, 1643 .write = random_write, 1644 .poll = random_poll, 1645 .unlocked_ioctl = random_ioctl, 1646 .compat_ioctl = compat_ptr_ioctl, 1647 .fasync = random_fasync, 1648 .llseek = noop_llseek, 1649 }; 1650 1651 const struct file_operations urandom_fops = { 1652 .read = urandom_read, 1653 .write = random_write, 1654 .unlocked_ioctl = random_ioctl, 1655 .compat_ioctl = compat_ptr_ioctl, 1656 .fasync = random_fasync, 1657 .llseek = noop_llseek, 1658 }; 1659 1660 1661 /******************************************************************** 1662 * 1663 * Sysctl interface. 1664 * 1665 * These are partly unused legacy knobs with dummy values to not break 1666 * userspace and partly still useful things. They are usually accessible 1667 * in /proc/sys/kernel/random/ and are as follows: 1668 * 1669 * - boot_id - a UUID representing the current boot. 1670 * 1671 * - uuid - a random UUID, different each time the file is read. 1672 * 1673 * - poolsize - the number of bits of entropy that the input pool can 1674 * hold, tied to the POOL_BITS constant. 1675 * 1676 * - entropy_avail - the number of bits of entropy currently in the 1677 * input pool. Always <= poolsize. 1678 * 1679 * - write_wakeup_threshold - the amount of entropy in the input pool 1680 * below which write polls to /dev/random will unblock, requesting 1681 * more entropy, tied to the POOL_MIN_BITS constant. It is writable 1682 * to avoid breaking old userspaces, but writing to it does not 1683 * change any behavior of the RNG. 1684 * 1685 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. 1686 * It is writable to avoid breaking old userspaces, but writing 1687 * to it does not change any behavior of the RNG. 1688 * 1689 ********************************************************************/ 1690 1691 #ifdef CONFIG_SYSCTL 1692 1693 #include <linux/sysctl.h> 1694 1695 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; 1696 static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS; 1697 static int sysctl_poolsize = POOL_BITS; 1698 static u8 sysctl_bootid[UUID_SIZE]; 1699 1700 /* 1701 * This function is used to return both the bootid UUID, and random 1702 * UUID. The difference is in whether table->data is NULL; if it is, 1703 * then a new UUID is generated and returned to the user. 1704 */ 1705 static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, 1706 size_t *lenp, loff_t *ppos) 1707 { 1708 u8 tmp_uuid[UUID_SIZE], *uuid; 1709 char uuid_string[UUID_STRING_LEN + 1]; 1710 struct ctl_table fake_table = { 1711 .data = uuid_string, 1712 .maxlen = UUID_STRING_LEN 1713 }; 1714 1715 if (write) 1716 return -EPERM; 1717 1718 uuid = table->data; 1719 if (!uuid) { 1720 uuid = tmp_uuid; 1721 generate_random_uuid(uuid); 1722 } else { 1723 static DEFINE_SPINLOCK(bootid_spinlock); 1724 1725 spin_lock(&bootid_spinlock); 1726 if (!uuid[8]) 1727 generate_random_uuid(uuid); 1728 spin_unlock(&bootid_spinlock); 1729 } 1730 1731 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); 1732 return proc_dostring(&fake_table, 0, buffer, lenp, ppos); 1733 } 1734 1735 /* The same as proc_dointvec, but writes don't change anything. */ 1736 static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer, 1737 size_t *lenp, loff_t *ppos) 1738 { 1739 return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos); 1740 } 1741 1742 static struct ctl_table random_table[] = { 1743 { 1744 .procname = "poolsize", 1745 .data = &sysctl_poolsize, 1746 .maxlen = sizeof(int), 1747 .mode = 0444, 1748 .proc_handler = proc_dointvec, 1749 }, 1750 { 1751 .procname = "entropy_avail", 1752 .data = &input_pool.entropy_count, 1753 .maxlen = sizeof(int), 1754 .mode = 0444, 1755 .proc_handler = proc_dointvec, 1756 }, 1757 { 1758 .procname = "write_wakeup_threshold", 1759 .data = &sysctl_random_write_wakeup_bits, 1760 .maxlen = sizeof(int), 1761 .mode = 0644, 1762 .proc_handler = proc_do_rointvec, 1763 }, 1764 { 1765 .procname = "urandom_min_reseed_secs", 1766 .data = &sysctl_random_min_urandom_seed, 1767 .maxlen = sizeof(int), 1768 .mode = 0644, 1769 .proc_handler = proc_do_rointvec, 1770 }, 1771 { 1772 .procname = "boot_id", 1773 .data = &sysctl_bootid, 1774 .mode = 0444, 1775 .proc_handler = proc_do_uuid, 1776 }, 1777 { 1778 .procname = "uuid", 1779 .mode = 0444, 1780 .proc_handler = proc_do_uuid, 1781 }, 1782 { } 1783 }; 1784 1785 /* 1786 * rand_initialize() is called before sysctl_init(), 1787 * so we cannot call register_sysctl_init() in rand_initialize() 1788 */ 1789 static int __init random_sysctls_init(void) 1790 { 1791 register_sysctl_init("kernel/random", random_table); 1792 return 0; 1793 } 1794 device_initcall(random_sysctls_init); 1795 #endif 1796