11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * random.c -- A strong random number generator 31da177e4SLinus Torvalds * 49f9eff85SJason A. Donenfeld * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 5b169c13dSJason A. Donenfeld * 69e95ce27SMatt Mackall * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All 91da177e4SLinus Torvalds * rights reserved. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Redistribution and use in source and binary forms, with or without 121da177e4SLinus Torvalds * modification, are permitted provided that the following conditions 131da177e4SLinus Torvalds * are met: 141da177e4SLinus Torvalds * 1. Redistributions of source code must retain the above copyright 151da177e4SLinus Torvalds * notice, and the entire permission notice in its entirety, 161da177e4SLinus Torvalds * including the disclaimer of warranties. 171da177e4SLinus Torvalds * 2. Redistributions in binary form must reproduce the above copyright 181da177e4SLinus Torvalds * notice, this list of conditions and the following disclaimer in the 191da177e4SLinus Torvalds * documentation and/or other materials provided with the distribution. 201da177e4SLinus Torvalds * 3. The name of the author may not be used to endorse or promote 211da177e4SLinus Torvalds * products derived from this software without specific prior 221da177e4SLinus Torvalds * written permission. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * ALTERNATIVELY, this product may be distributed under the terms of 251da177e4SLinus Torvalds * the GNU General Public License, in which case the provisions of the GPL are 261da177e4SLinus Torvalds * required INSTEAD OF the above restrictions. (This clause is 271da177e4SLinus Torvalds * necessary due to a potential bad interaction between the GPL and 281da177e4SLinus Torvalds * the restrictions contained in a BSD-style copyright.) 291da177e4SLinus Torvalds * 301da177e4SLinus Torvalds * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 311da177e4SLinus Torvalds * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 321da177e4SLinus Torvalds * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF 331da177e4SLinus Torvalds * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE 341da177e4SLinus Torvalds * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 351da177e4SLinus Torvalds * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 361da177e4SLinus Torvalds * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 371da177e4SLinus Torvalds * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 381da177e4SLinus Torvalds * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 391da177e4SLinus Torvalds * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 401da177e4SLinus Torvalds * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH 411da177e4SLinus Torvalds * DAMAGE. 421da177e4SLinus Torvalds */ 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds /* 451da177e4SLinus Torvalds * Exported interfaces ---- output 461da177e4SLinus Torvalds * =============================== 471da177e4SLinus Torvalds * 4892e507d2SGeorge Spelvin * There are four exported interfaces; two for use within the kernel, 49c0a8a61eSSchspa Shi * and two for use from userspace. 501da177e4SLinus Torvalds * 5192e507d2SGeorge Spelvin * Exported interfaces ---- userspace output 5292e507d2SGeorge Spelvin * ----------------------------------------- 531da177e4SLinus Torvalds * 5492e507d2SGeorge Spelvin * The userspace interfaces are two character devices /dev/random and 551da177e4SLinus Torvalds * /dev/urandom. /dev/random is suitable for use when very high 561da177e4SLinus Torvalds * quality randomness is desired (for example, for key generation or 571da177e4SLinus Torvalds * one-time pads), as it will only return a maximum of the number of 581da177e4SLinus Torvalds * bits of randomness (as estimated by the random number generator) 591da177e4SLinus Torvalds * contained in the entropy pool. 601da177e4SLinus Torvalds * 611da177e4SLinus Torvalds * The /dev/urandom device does not have this limit, and will return 621da177e4SLinus Torvalds * as many bytes as are requested. As more and more random bytes are 631da177e4SLinus Torvalds * requested without giving time for the entropy pool to recharge, 641da177e4SLinus Torvalds * this will result in random numbers that are merely cryptographically 651da177e4SLinus Torvalds * strong. For many applications, however, this is acceptable. 661da177e4SLinus Torvalds * 6792e507d2SGeorge Spelvin * Exported interfaces ---- kernel output 6892e507d2SGeorge Spelvin * -------------------------------------- 6992e507d2SGeorge Spelvin * 7092e507d2SGeorge Spelvin * The primary kernel interface is 7192e507d2SGeorge Spelvin * 7292e507d2SGeorge Spelvin * void get_random_bytes(void *buf, int nbytes); 7392e507d2SGeorge Spelvin * 7492e507d2SGeorge Spelvin * This interface will return the requested number of random bytes, 7592e507d2SGeorge Spelvin * and place it in the requested buffer. This is equivalent to a 7692e507d2SGeorge Spelvin * read from /dev/urandom. 7792e507d2SGeorge Spelvin * 7892e507d2SGeorge Spelvin * For less critical applications, there are the functions: 7992e507d2SGeorge Spelvin * 8092e507d2SGeorge Spelvin * u32 get_random_u32() 8192e507d2SGeorge Spelvin * u64 get_random_u64() 8292e507d2SGeorge Spelvin * unsigned int get_random_int() 8392e507d2SGeorge Spelvin * unsigned long get_random_long() 8492e507d2SGeorge Spelvin * 8592e507d2SGeorge Spelvin * These are produced by a cryptographic RNG seeded from get_random_bytes, 8692e507d2SGeorge Spelvin * and so do not deplete the entropy pool as much. These are recommended 8792e507d2SGeorge Spelvin * for most in-kernel operations *if the result is going to be stored in 8892e507d2SGeorge Spelvin * the kernel*. 8992e507d2SGeorge Spelvin * 9092e507d2SGeorge Spelvin * Specifically, the get_random_int() family do not attempt to do 9192e507d2SGeorge Spelvin * "anti-backtracking". If you capture the state of the kernel (e.g. 9292e507d2SGeorge Spelvin * by snapshotting the VM), you can figure out previous get_random_int() 9392e507d2SGeorge Spelvin * return values. But if the value is stored in the kernel anyway, 9492e507d2SGeorge Spelvin * this is not a problem. 9592e507d2SGeorge Spelvin * 9692e507d2SGeorge Spelvin * It *is* safe to expose get_random_int() output to attackers (e.g. as 9792e507d2SGeorge Spelvin * network cookies); given outputs 1..n, it's not feasible to predict 9892e507d2SGeorge Spelvin * outputs 0 or n+1. The only concern is an attacker who breaks into 9992e507d2SGeorge Spelvin * the kernel later; the get_random_int() engine is not reseeded as 10092e507d2SGeorge Spelvin * often as the get_random_bytes() one. 10192e507d2SGeorge Spelvin * 10292e507d2SGeorge Spelvin * get_random_bytes() is needed for keys that need to stay secret after 10392e507d2SGeorge Spelvin * they are erased from the kernel. For example, any key that will 10492e507d2SGeorge Spelvin * be wrapped and stored encrypted. And session encryption keys: we'd 10592e507d2SGeorge Spelvin * like to know that after the session is closed and the keys erased, 10692e507d2SGeorge Spelvin * the plaintext is unrecoverable to someone who recorded the ciphertext. 10792e507d2SGeorge Spelvin * 10892e507d2SGeorge Spelvin * But for network ports/cookies, stack canaries, PRNG seeds, address 10992e507d2SGeorge Spelvin * space layout randomization, session *authentication* keys, or other 11092e507d2SGeorge Spelvin * applications where the sensitive data is stored in the kernel in 11192e507d2SGeorge Spelvin * plaintext for as long as it's sensitive, the get_random_int() family 11292e507d2SGeorge Spelvin * is just fine. 11392e507d2SGeorge Spelvin * 11492e507d2SGeorge Spelvin * Consider ASLR. We want to keep the address space secret from an 11592e507d2SGeorge Spelvin * outside attacker while the process is running, but once the address 11692e507d2SGeorge Spelvin * space is torn down, it's of no use to an attacker any more. And it's 11792e507d2SGeorge Spelvin * stored in kernel data structures as long as it's alive, so worrying 11892e507d2SGeorge Spelvin * about an attacker's ability to extrapolate it from the get_random_int() 11992e507d2SGeorge Spelvin * CRNG is silly. 12092e507d2SGeorge Spelvin * 12192e507d2SGeorge Spelvin * Even some cryptographic keys are safe to generate with get_random_int(). 12292e507d2SGeorge Spelvin * In particular, keys for SipHash are generally fine. Here, knowledge 12392e507d2SGeorge Spelvin * of the key authorizes you to do something to a kernel object (inject 12492e507d2SGeorge Spelvin * packets to a network connection, or flood a hash table), and the 12592e507d2SGeorge Spelvin * key is stored with the object being protected. Once it goes away, 12692e507d2SGeorge Spelvin * we no longer care if anyone knows the key. 12792e507d2SGeorge Spelvin * 12892e507d2SGeorge Spelvin * prandom_u32() 12992e507d2SGeorge Spelvin * ------------- 13092e507d2SGeorge Spelvin * 13192e507d2SGeorge Spelvin * For even weaker applications, see the pseudorandom generator 13292e507d2SGeorge Spelvin * prandom_u32(), prandom_max(), and prandom_bytes(). If the random 13392e507d2SGeorge Spelvin * numbers aren't security-critical at all, these are *far* cheaper. 13492e507d2SGeorge Spelvin * Useful for self-tests, random error simulation, randomized backoffs, 13592e507d2SGeorge Spelvin * and any other application where you trust that nobody is trying to 13692e507d2SGeorge Spelvin * maliciously mess with you by guessing the "random" numbers. 13792e507d2SGeorge Spelvin * 1381da177e4SLinus Torvalds * Exported interfaces ---- input 1391da177e4SLinus Torvalds * ============================== 1401da177e4SLinus Torvalds * 1411da177e4SLinus Torvalds * The current exported interfaces for gathering environmental noise 1421da177e4SLinus Torvalds * from the devices are: 1431da177e4SLinus Torvalds * 144a2080a67SLinus Torvalds * void add_device_randomness(const void *buf, unsigned int size); 1451da177e4SLinus Torvalds * void add_input_randomness(unsigned int type, unsigned int code, 1461da177e4SLinus Torvalds * unsigned int value); 147703f7066SSebastian Andrzej Siewior * void add_interrupt_randomness(int irq); 148442a4fffSJarod Wilson * void add_disk_randomness(struct gendisk *disk); 1492b6c6e3dSMark Brown * void add_hwgenerator_randomness(const char *buffer, size_t count, 1502b6c6e3dSMark Brown * size_t entropy); 1512b6c6e3dSMark Brown * void add_bootloader_randomness(const void *buf, unsigned int size); 1521da177e4SLinus Torvalds * 153a2080a67SLinus Torvalds * add_device_randomness() is for adding data to the random pool that 154a2080a67SLinus Torvalds * is likely to differ between two devices (or possibly even per boot). 155a2080a67SLinus Torvalds * This would be things like MAC addresses or serial numbers, or the 156a2080a67SLinus Torvalds * read-out of the RTC. This does *not* add any actual entropy to the 157a2080a67SLinus Torvalds * pool, but it initializes the pool to different values for devices 158a2080a67SLinus Torvalds * that might otherwise be identical and have very little entropy 159a2080a67SLinus Torvalds * available to them (particularly common in the embedded world). 160a2080a67SLinus Torvalds * 1611da177e4SLinus Torvalds * add_input_randomness() uses the input layer interrupt timing, as well as 1621da177e4SLinus Torvalds * the event type information from the hardware. 1631da177e4SLinus Torvalds * 164775f4b29STheodore Ts'o * add_interrupt_randomness() uses the interrupt timing as random 165775f4b29STheodore Ts'o * inputs to the entropy pool. Using the cycle counters and the irq source 166775f4b29STheodore Ts'o * as inputs, it feeds the randomness roughly once a second. 167442a4fffSJarod Wilson * 168442a4fffSJarod Wilson * add_disk_randomness() uses what amounts to the seek time of block 169442a4fffSJarod Wilson * layer request events, on a per-disk_devt basis, as input to the 170442a4fffSJarod Wilson * entropy pool. Note that high-speed solid state drives with very low 171442a4fffSJarod Wilson * seek times do not make for good sources of entropy, as their seek 172442a4fffSJarod Wilson * times are usually fairly consistent. 1731da177e4SLinus Torvalds * 1741da177e4SLinus Torvalds * All of these routines try to estimate how many bits of randomness a 1751da177e4SLinus Torvalds * particular randomness source. They do this by keeping track of the 1761da177e4SLinus Torvalds * first and second order deltas of the event timings. 1771da177e4SLinus Torvalds * 1782b6c6e3dSMark Brown * add_hwgenerator_randomness() is for true hardware RNGs, and will credit 1792b6c6e3dSMark Brown * entropy as specified by the caller. If the entropy pool is full it will 1802b6c6e3dSMark Brown * block until more entropy is needed. 1812b6c6e3dSMark Brown * 1822b6c6e3dSMark Brown * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or 1832b6c6e3dSMark Brown * add_device_randomness(), depending on whether or not the configuration 1842b6c6e3dSMark Brown * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. 1852b6c6e3dSMark Brown * 1861da177e4SLinus Torvalds * Ensuring unpredictability at system startup 1871da177e4SLinus Torvalds * ============================================ 1881da177e4SLinus Torvalds * 1891da177e4SLinus Torvalds * When any operating system starts up, it will go through a sequence 1901da177e4SLinus Torvalds * of actions that are fairly predictable by an adversary, especially 1911da177e4SLinus Torvalds * if the start-up does not involve interaction with a human operator. 1921da177e4SLinus Torvalds * This reduces the actual number of bits of unpredictability in the 1931da177e4SLinus Torvalds * entropy pool below the value in entropy_count. In order to 1941da177e4SLinus Torvalds * counteract this effect, it helps to carry information in the 1951da177e4SLinus Torvalds * entropy pool across shut-downs and start-ups. To do this, put the 1961da177e4SLinus Torvalds * following lines an appropriate script which is run during the boot 1971da177e4SLinus Torvalds * sequence: 1981da177e4SLinus Torvalds * 1991da177e4SLinus Torvalds * echo "Initializing random number generator..." 2001da177e4SLinus Torvalds * random_seed=/var/run/random-seed 2011da177e4SLinus Torvalds * # Carry a random seed from start-up to start-up 2021da177e4SLinus Torvalds * # Load and then save the whole entropy pool 2031da177e4SLinus Torvalds * if [ -f $random_seed ]; then 2041da177e4SLinus Torvalds * cat $random_seed >/dev/urandom 2051da177e4SLinus Torvalds * else 2061da177e4SLinus Torvalds * touch $random_seed 2071da177e4SLinus Torvalds * fi 2081da177e4SLinus Torvalds * chmod 600 $random_seed 2091da177e4SLinus Torvalds * dd if=/dev/urandom of=$random_seed count=1 bs=512 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * and the following lines in an appropriate script which is run as 2121da177e4SLinus Torvalds * the system is shutdown: 2131da177e4SLinus Torvalds * 2141da177e4SLinus Torvalds * # Carry a random seed from shut-down to start-up 2151da177e4SLinus Torvalds * # Save the whole entropy pool 2161da177e4SLinus Torvalds * echo "Saving random seed..." 2171da177e4SLinus Torvalds * random_seed=/var/run/random-seed 2181da177e4SLinus Torvalds * touch $random_seed 2191da177e4SLinus Torvalds * chmod 600 $random_seed 2201da177e4SLinus Torvalds * dd if=/dev/urandom of=$random_seed count=1 bs=512 2211da177e4SLinus Torvalds * 2221da177e4SLinus Torvalds * For example, on most modern systems using the System V init 2231da177e4SLinus Torvalds * scripts, such code fragments would be found in 2241da177e4SLinus Torvalds * /etc/rc.d/init.d/random. On older Linux systems, the correct script 2251da177e4SLinus Torvalds * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. 2261da177e4SLinus Torvalds * 2271da177e4SLinus Torvalds * Effectively, these commands cause the contents of the entropy pool 2281da177e4SLinus Torvalds * to be saved at shut-down time and reloaded into the entropy pool at 2291da177e4SLinus Torvalds * start-up. (The 'dd' in the addition to the bootup script is to 2301da177e4SLinus Torvalds * make sure that /etc/random-seed is different for every start-up, 2311da177e4SLinus Torvalds * even if the system crashes without executing rc.0.) Even with 2321da177e4SLinus Torvalds * complete knowledge of the start-up activities, predicting the state 2331da177e4SLinus Torvalds * of the entropy pool requires knowledge of the previous history of 2341da177e4SLinus Torvalds * the system. 2351da177e4SLinus Torvalds * 2361da177e4SLinus Torvalds * Configuring the /dev/random driver under Linux 2371da177e4SLinus Torvalds * ============================================== 2381da177e4SLinus Torvalds * 2391da177e4SLinus Torvalds * The /dev/random driver under Linux uses minor numbers 8 and 9 of 2401da177e4SLinus Torvalds * the /dev/mem major number (#1). So if your system does not have 2411da177e4SLinus Torvalds * /dev/random and /dev/urandom created already, they can be created 2421da177e4SLinus Torvalds * by using the commands: 2431da177e4SLinus Torvalds * 2441da177e4SLinus Torvalds * mknod /dev/random c 1 8 2451da177e4SLinus Torvalds * mknod /dev/urandom c 1 9 2461da177e4SLinus Torvalds */ 2471da177e4SLinus Torvalds 24812cd53afSYangtao Li #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24912cd53afSYangtao Li 2501da177e4SLinus Torvalds #include <linux/utsname.h> 2511da177e4SLinus Torvalds #include <linux/module.h> 2521da177e4SLinus Torvalds #include <linux/kernel.h> 2531da177e4SLinus Torvalds #include <linux/major.h> 2541da177e4SLinus Torvalds #include <linux/string.h> 2551da177e4SLinus Torvalds #include <linux/fcntl.h> 2561da177e4SLinus Torvalds #include <linux/slab.h> 2571da177e4SLinus Torvalds #include <linux/random.h> 2581da177e4SLinus Torvalds #include <linux/poll.h> 2591da177e4SLinus Torvalds #include <linux/init.h> 2601da177e4SLinus Torvalds #include <linux/fs.h> 2611da177e4SLinus Torvalds #include <linux/genhd.h> 2621da177e4SLinus Torvalds #include <linux/interrupt.h> 26327ac792cSAndrea Righi #include <linux/mm.h> 264dd0f0cf5SMichael Ellerman #include <linux/nodemask.h> 2651da177e4SLinus Torvalds #include <linux/spinlock.h> 266c84dbf61STorsten Duwe #include <linux/kthread.h> 2671da177e4SLinus Torvalds #include <linux/percpu.h> 268775f4b29STheodore Ts'o #include <linux/ptrace.h> 2696265e169STheodore Ts'o #include <linux/workqueue.h> 270d178a1ebSYinghai Lu #include <linux/irq.h> 2714e00b339STheodore Ts'o #include <linux/ratelimit.h> 272c6e9d6f3STheodore Ts'o #include <linux/syscalls.h> 273c6e9d6f3STheodore Ts'o #include <linux/completion.h> 2748da4b8c4SAndy Shevchenko #include <linux/uuid.h> 2751ca1b917SEric Biggers #include <crypto/chacha.h> 2769f9eff85SJason A. Donenfeld #include <crypto/blake2s.h> 277d178a1ebSYinghai Lu 2781da177e4SLinus Torvalds #include <asm/processor.h> 2797c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2801da177e4SLinus Torvalds #include <asm/irq.h> 281775f4b29STheodore Ts'o #include <asm/irq_regs.h> 2821da177e4SLinus Torvalds #include <asm/io.h> 2831da177e4SLinus Torvalds 28400ce1db1STheodore Ts'o #define CREATE_TRACE_POINTS 28500ce1db1STheodore Ts'o #include <trace/events/random.h> 28600ce1db1STheodore Ts'o 28743759d4fSTheodore Ts'o /* #define ADD_INTERRUPT_BENCH */ 28843759d4fSTheodore Ts'o 289c5704490SJason A. Donenfeld enum { 2906e8ec255SJason A. Donenfeld POOL_BITS = BLAKE2S_HASH_SIZE * 8, 291c5704490SJason A. Donenfeld POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */ 2921da177e4SLinus Torvalds }; 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds /* 2951da177e4SLinus Torvalds * Static global variables 2961da177e4SLinus Torvalds */ 297a11e1d43SLinus Torvalds static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 2989a6f70bbSJeff Dike static struct fasync_struct *fasync; 2991da177e4SLinus Torvalds 300205a525cSHerbert Xu static DEFINE_SPINLOCK(random_ready_list_lock); 301205a525cSHerbert Xu static LIST_HEAD(random_ready_list); 302205a525cSHerbert Xu 303e192be9dSTheodore Ts'o struct crng_state { 304d38bb085SJason A. Donenfeld u32 state[16]; 305e192be9dSTheodore Ts'o unsigned long init_time; 306e192be9dSTheodore Ts'o spinlock_t lock; 307e192be9dSTheodore Ts'o }; 308e192be9dSTheodore Ts'o 309764ed189SRasmus Villemoes static struct crng_state primary_crng = { 310e192be9dSTheodore Ts'o .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), 31196562f28SDominik Brodowski .state[0] = CHACHA_CONSTANT_EXPA, 31296562f28SDominik Brodowski .state[1] = CHACHA_CONSTANT_ND_3, 31396562f28SDominik Brodowski .state[2] = CHACHA_CONSTANT_2_BY, 31496562f28SDominik Brodowski .state[3] = CHACHA_CONSTANT_TE_K, 315e192be9dSTheodore Ts'o }; 316e192be9dSTheodore Ts'o 317e192be9dSTheodore Ts'o /* 318e192be9dSTheodore Ts'o * crng_init = 0 --> Uninitialized 319e192be9dSTheodore Ts'o * 1 --> Initialized 320e192be9dSTheodore Ts'o * 2 --> Initialized from input_pool 321e192be9dSTheodore Ts'o * 322e192be9dSTheodore Ts'o * crng_init is protected by primary_crng->lock, and only increases 323e192be9dSTheodore Ts'o * its value (from 0->1->2). 324e192be9dSTheodore Ts'o */ 325e192be9dSTheodore Ts'o static int crng_init = 0; 32643838a23STheodore Ts'o #define crng_ready() (likely(crng_init > 1)) 327e192be9dSTheodore Ts'o static int crng_init_cnt = 0; 3281ca1b917SEric Biggers #define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE) 329*a9412d51SJason A. Donenfeld static void extract_crng(u8 out[CHACHA_BLOCK_SIZE]); 330*a9412d51SJason A. Donenfeld static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used); 331e192be9dSTheodore Ts'o static void process_random_ready_list(void); 332eecabf56STheodore Ts'o static void _get_random_bytes(void *buf, int nbytes); 333e192be9dSTheodore Ts'o 3344e00b339STheodore Ts'o static struct ratelimit_state unseeded_warning = 3354e00b339STheodore Ts'o RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); 3364e00b339STheodore Ts'o static struct ratelimit_state urandom_warning = 3374e00b339STheodore Ts'o RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); 3384e00b339STheodore Ts'o 3394e00b339STheodore Ts'o static int ratelimit_disable __read_mostly; 3404e00b339STheodore Ts'o 3414e00b339STheodore Ts'o module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); 3424e00b339STheodore Ts'o MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); 3434e00b339STheodore Ts'o 3441da177e4SLinus Torvalds /********************************************************************** 3451da177e4SLinus Torvalds * 3461da177e4SLinus Torvalds * OS independent entropy store. Here are the functions which handle 3471da177e4SLinus Torvalds * storing entropy in an entropy pool. 3481da177e4SLinus Torvalds * 3491da177e4SLinus Torvalds **********************************************************************/ 3501da177e4SLinus Torvalds 35190ed1e67SJason A. Donenfeld static struct { 3526e8ec255SJason A. Donenfeld struct blake2s_state hash; 35343358209SMatt Mackall spinlock_t lock; 354cda796a3SMatt Mackall int entropy_count; 35590ed1e67SJason A. Donenfeld } input_pool = { 3566e8ec255SJason A. Donenfeld .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), 3576e8ec255SJason A. Donenfeld BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, 3586e8ec255SJason A. Donenfeld BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, 3596e8ec255SJason A. Donenfeld .hash.outlen = BLAKE2S_HASH_SIZE, 360eece09ecSThomas Gleixner .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), 3611da177e4SLinus Torvalds }; 3621da177e4SLinus Torvalds 3639c07f578SJason A. Donenfeld static void extract_entropy(void *buf, size_t nbytes); 36490ed1e67SJason A. Donenfeld 365*a9412d51SJason A. Donenfeld static void crng_reseed(void); 36690ed1e67SJason A. Donenfeld 3671da177e4SLinus Torvalds /* 368e68e5b66SMatt Mackall * This function adds bytes into the entropy "pool". It does not 3691da177e4SLinus Torvalds * update the entropy estimate. The caller should call 370adc782daSMatt Mackall * credit_entropy_bits if this is appropriate. 3711da177e4SLinus Torvalds */ 37290ed1e67SJason A. Donenfeld static void _mix_pool_bytes(const void *in, int nbytes) 3731da177e4SLinus Torvalds { 3746e8ec255SJason A. Donenfeld blake2s_update(&input_pool.hash, in, nbytes); 3751da177e4SLinus Torvalds } 3761da177e4SLinus Torvalds 37790ed1e67SJason A. Donenfeld static void __mix_pool_bytes(const void *in, int nbytes) 37800ce1db1STheodore Ts'o { 37990ed1e67SJason A. Donenfeld trace_mix_pool_bytes_nolock(nbytes, _RET_IP_); 38090ed1e67SJason A. Donenfeld _mix_pool_bytes(in, nbytes); 38100ce1db1STheodore Ts'o } 38200ce1db1STheodore Ts'o 38390ed1e67SJason A. Donenfeld static void mix_pool_bytes(const void *in, int nbytes) 3841da177e4SLinus Torvalds { 385902c098aSTheodore Ts'o unsigned long flags; 386902c098aSTheodore Ts'o 38790ed1e67SJason A. Donenfeld trace_mix_pool_bytes(nbytes, _RET_IP_); 38890ed1e67SJason A. Donenfeld spin_lock_irqsave(&input_pool.lock, flags); 38990ed1e67SJason A. Donenfeld _mix_pool_bytes(in, nbytes); 39090ed1e67SJason A. Donenfeld spin_unlock_irqrestore(&input_pool.lock, flags); 3911da177e4SLinus Torvalds } 3921da177e4SLinus Torvalds 393775f4b29STheodore Ts'o struct fast_pool { 394d38bb085SJason A. Donenfeld u32 pool[4]; 395775f4b29STheodore Ts'o unsigned long last; 396d38bb085SJason A. Donenfeld u16 reg_idx; 397d38bb085SJason A. Donenfeld u8 count; 398775f4b29STheodore Ts'o }; 399775f4b29STheodore Ts'o 400775f4b29STheodore Ts'o /* 401775f4b29STheodore Ts'o * This is a fast mixing routine used by the interrupt randomness 402775f4b29STheodore Ts'o * collector. It's hardcoded for an 128 bit pool and assumes that any 403775f4b29STheodore Ts'o * locks that might be needed are taken by the caller. 404775f4b29STheodore Ts'o */ 40543759d4fSTheodore Ts'o static void fast_mix(struct fast_pool *f) 406775f4b29STheodore Ts'o { 407d38bb085SJason A. Donenfeld u32 a = f->pool[0], b = f->pool[1]; 408d38bb085SJason A. Donenfeld u32 c = f->pool[2], d = f->pool[3]; 409775f4b29STheodore Ts'o 41043759d4fSTheodore Ts'o a += b; c += d; 41119acc77aSGeorge Spelvin b = rol32(b, 6); d = rol32(d, 27); 41243759d4fSTheodore Ts'o d ^= a; b ^= c; 413655b2264STheodore Ts'o 41443759d4fSTheodore Ts'o a += b; c += d; 41519acc77aSGeorge Spelvin b = rol32(b, 16); d = rol32(d, 14); 41643759d4fSTheodore Ts'o d ^= a; b ^= c; 41743759d4fSTheodore Ts'o 41843759d4fSTheodore Ts'o a += b; c += d; 41919acc77aSGeorge Spelvin b = rol32(b, 6); d = rol32(d, 27); 42043759d4fSTheodore Ts'o d ^= a; b ^= c; 42143759d4fSTheodore Ts'o 42243759d4fSTheodore Ts'o a += b; c += d; 42319acc77aSGeorge Spelvin b = rol32(b, 16); d = rol32(d, 14); 42443759d4fSTheodore Ts'o d ^= a; b ^= c; 42543759d4fSTheodore Ts'o 42643759d4fSTheodore Ts'o f->pool[0] = a; f->pool[1] = b; 42743759d4fSTheodore Ts'o f->pool[2] = c; f->pool[3] = d; 428655b2264STheodore Ts'o f->count++; 429775f4b29STheodore Ts'o } 430775f4b29STheodore Ts'o 431205a525cSHerbert Xu static void process_random_ready_list(void) 432205a525cSHerbert Xu { 433205a525cSHerbert Xu unsigned long flags; 434205a525cSHerbert Xu struct random_ready_callback *rdy, *tmp; 435205a525cSHerbert Xu 436205a525cSHerbert Xu spin_lock_irqsave(&random_ready_list_lock, flags); 437205a525cSHerbert Xu list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { 438205a525cSHerbert Xu struct module *owner = rdy->owner; 439205a525cSHerbert Xu 440205a525cSHerbert Xu list_del_init(&rdy->list); 441205a525cSHerbert Xu rdy->func(rdy); 442205a525cSHerbert Xu module_put(owner); 443205a525cSHerbert Xu } 444205a525cSHerbert Xu spin_unlock_irqrestore(&random_ready_list_lock, flags); 445205a525cSHerbert Xu } 446205a525cSHerbert Xu 44790ed1e67SJason A. Donenfeld static void credit_entropy_bits(int nbits) 4481da177e4SLinus Torvalds { 4499c07f578SJason A. Donenfeld int entropy_count, orig; 45018263c4eSJason A. Donenfeld 451a49c010eSJason A. Donenfeld if (nbits <= 0) 452adc782daSMatt Mackall return; 453adc782daSMatt Mackall 454a49c010eSJason A. Donenfeld nbits = min(nbits, POOL_BITS); 455a49c010eSJason A. Donenfeld 45630e37ec5SH. Peter Anvin do { 457c5704490SJason A. Donenfeld orig = READ_ONCE(input_pool.entropy_count); 458c5704490SJason A. Donenfeld entropy_count = min(POOL_BITS, orig + nbits); 459c5704490SJason A. Donenfeld } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); 46030e37ec5SH. Peter Anvin 461c5704490SJason A. Donenfeld trace_credit_entropy_bits(nbits, entropy_count, _RET_IP_); 46200ce1db1STheodore Ts'o 463c5704490SJason A. Donenfeld if (crng_init < 2 && entropy_count >= POOL_MIN_BITS) 464*a9412d51SJason A. Donenfeld crng_reseed(); 4651da177e4SLinus Torvalds } 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds /********************************************************************* 4681da177e4SLinus Torvalds * 469e192be9dSTheodore Ts'o * CRNG using CHACHA20 470e192be9dSTheodore Ts'o * 471e192be9dSTheodore Ts'o *********************************************************************/ 472e192be9dSTheodore Ts'o 473e192be9dSTheodore Ts'o #define CRNG_RESEED_INTERVAL (300 * HZ) 474e192be9dSTheodore Ts'o 475e192be9dSTheodore Ts'o static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); 476e192be9dSTheodore Ts'o 477b169c13dSJason A. Donenfeld static void invalidate_batched_entropy(void); 478b169c13dSJason A. Donenfeld 4799b254366SKees Cook static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); 4809b254366SKees Cook static int __init parse_trust_cpu(char *arg) 4819b254366SKees Cook { 4829b254366SKees Cook return kstrtobool(arg, &trust_cpu); 4839b254366SKees Cook } 4849b254366SKees Cook early_param("random.trust_cpu", parse_trust_cpu); 4859b254366SKees Cook 486ebf76063SDominik Brodowski static bool __init crng_init_try_arch_early(void) 487253d3194SMark Rutland { 488253d3194SMark Rutland int i; 489253d3194SMark Rutland bool arch_init = true; 490253d3194SMark Rutland unsigned long rv; 491253d3194SMark Rutland 492253d3194SMark Rutland for (i = 4; i < 16; i++) { 493253d3194SMark Rutland if (!arch_get_random_seed_long_early(&rv) && 494253d3194SMark Rutland !arch_get_random_long_early(&rv)) { 495253d3194SMark Rutland rv = random_get_entropy(); 496253d3194SMark Rutland arch_init = false; 497253d3194SMark Rutland } 498ebf76063SDominik Brodowski primary_crng.state[i] ^= rv; 499253d3194SMark Rutland } 500253d3194SMark Rutland 501253d3194SMark Rutland return arch_init; 502253d3194SMark Rutland } 503253d3194SMark Rutland 504*a9412d51SJason A. Donenfeld static void __init crng_initialize(void) 5055cbe0f13SMark Rutland { 5069c07f578SJason A. Donenfeld extract_entropy(&primary_crng.state[4], sizeof(u32) * 12); 507ebf76063SDominik Brodowski if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) { 508fe6f1a6aSJon DeVree invalidate_batched_entropy(); 50939a8883aSTheodore Ts'o crng_init = 2; 510161212c7SDominik Brodowski pr_notice("crng init done (trusting CPU's manufacturer)\n"); 51139a8883aSTheodore Ts'o } 512ebf76063SDominik Brodowski primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 513e192be9dSTheodore Ts'o } 514e192be9dSTheodore Ts'o 515dc12baacSTheodore Ts'o /* 516dc12baacSTheodore Ts'o * crng_fast_load() can be called by code in the interrupt service 51773c7733fSJason A. Donenfeld * path. So we can't afford to dilly-dally. Returns the number of 51873c7733fSJason A. Donenfeld * bytes processed from cp. 519dc12baacSTheodore Ts'o */ 520d38bb085SJason A. Donenfeld static size_t crng_fast_load(const u8 *cp, size_t len) 521e192be9dSTheodore Ts'o { 522e192be9dSTheodore Ts'o unsigned long flags; 523d38bb085SJason A. Donenfeld u8 *p; 52473c7733fSJason A. Donenfeld size_t ret = 0; 525e192be9dSTheodore Ts'o 526e192be9dSTheodore Ts'o if (!spin_trylock_irqsave(&primary_crng.lock, flags)) 527e192be9dSTheodore Ts'o return 0; 52843838a23STheodore Ts'o if (crng_init != 0) { 529e192be9dSTheodore Ts'o spin_unlock_irqrestore(&primary_crng.lock, flags); 530e192be9dSTheodore Ts'o return 0; 531e192be9dSTheodore Ts'o } 532d38bb085SJason A. Donenfeld p = (u8 *)&primary_crng.state[4]; 533e192be9dSTheodore Ts'o while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { 5341ca1b917SEric Biggers p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; 53573c7733fSJason A. Donenfeld cp++; crng_init_cnt++; len--; ret++; 536e192be9dSTheodore Ts'o } 537e192be9dSTheodore Ts'o if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 538b169c13dSJason A. Donenfeld invalidate_batched_entropy(); 539e192be9dSTheodore Ts'o crng_init = 1; 540e192be9dSTheodore Ts'o } 5417c2fe2b3SDominik Brodowski spin_unlock_irqrestore(&primary_crng.lock, flags); 5427c2fe2b3SDominik Brodowski if (crng_init == 1) 5437c2fe2b3SDominik Brodowski pr_notice("fast init done\n"); 54473c7733fSJason A. Donenfeld return ret; 545e192be9dSTheodore Ts'o } 546e192be9dSTheodore Ts'o 547dc12baacSTheodore Ts'o /* 548dc12baacSTheodore Ts'o * crng_slow_load() is called by add_device_randomness, which has two 549dc12baacSTheodore Ts'o * attributes. (1) We can't trust the buffer passed to it is 550dc12baacSTheodore Ts'o * guaranteed to be unpredictable (so it might not have any entropy at 551dc12baacSTheodore Ts'o * all), and (2) it doesn't have the performance constraints of 552dc12baacSTheodore Ts'o * crng_fast_load(). 553dc12baacSTheodore Ts'o * 554dc12baacSTheodore Ts'o * So we do something more comprehensive which is guaranteed to touch 555dc12baacSTheodore Ts'o * all of the primary_crng's state, and which uses a LFSR with a 556dc12baacSTheodore Ts'o * period of 255 as part of the mixing algorithm. Finally, we do 557dc12baacSTheodore Ts'o * *not* advance crng_init_cnt since buffer we may get may be something 558dc12baacSTheodore Ts'o * like a fixed DMI table (for example), which might very well be 559dc12baacSTheodore Ts'o * unique to the machine, but is otherwise unvarying. 560dc12baacSTheodore Ts'o */ 561d38bb085SJason A. Donenfeld static int crng_slow_load(const u8 *cp, size_t len) 562dc12baacSTheodore Ts'o { 563dc12baacSTheodore Ts'o unsigned long flags; 564d38bb085SJason A. Donenfeld static u8 lfsr = 1; 565d38bb085SJason A. Donenfeld u8 tmp; 566d38bb085SJason A. Donenfeld unsigned int i, max = CHACHA_KEY_SIZE; 567d38bb085SJason A. Donenfeld const u8 *src_buf = cp; 568d38bb085SJason A. Donenfeld u8 *dest_buf = (u8 *)&primary_crng.state[4]; 569dc12baacSTheodore Ts'o 570dc12baacSTheodore Ts'o if (!spin_trylock_irqsave(&primary_crng.lock, flags)) 571dc12baacSTheodore Ts'o return 0; 572dc12baacSTheodore Ts'o if (crng_init != 0) { 573dc12baacSTheodore Ts'o spin_unlock_irqrestore(&primary_crng.lock, flags); 574dc12baacSTheodore Ts'o return 0; 575dc12baacSTheodore Ts'o } 576dc12baacSTheodore Ts'o if (len > max) 577dc12baacSTheodore Ts'o max = len; 578dc12baacSTheodore Ts'o 579dc12baacSTheodore Ts'o for (i = 0; i < max; i++) { 580dc12baacSTheodore Ts'o tmp = lfsr; 581dc12baacSTheodore Ts'o lfsr >>= 1; 582dc12baacSTheodore Ts'o if (tmp & 1) 583dc12baacSTheodore Ts'o lfsr ^= 0xE1; 5841ca1b917SEric Biggers tmp = dest_buf[i % CHACHA_KEY_SIZE]; 5851ca1b917SEric Biggers dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; 586dc12baacSTheodore Ts'o lfsr += (tmp << 3) | (tmp >> 5); 587dc12baacSTheodore Ts'o } 588dc12baacSTheodore Ts'o spin_unlock_irqrestore(&primary_crng.lock, flags); 589dc12baacSTheodore Ts'o return 1; 590dc12baacSTheodore Ts'o } 591dc12baacSTheodore Ts'o 592*a9412d51SJason A. Donenfeld static void crng_reseed(void) 593e192be9dSTheodore Ts'o { 594e192be9dSTheodore Ts'o unsigned long flags; 595*a9412d51SJason A. Donenfeld int i, entropy_count; 596e192be9dSTheodore Ts'o union { 597d38bb085SJason A. Donenfeld u8 block[CHACHA_BLOCK_SIZE]; 598d38bb085SJason A. Donenfeld u32 key[8]; 599e192be9dSTheodore Ts'o } buf; 600e192be9dSTheodore Ts'o 6019c07f578SJason A. Donenfeld do { 6029c07f578SJason A. Donenfeld entropy_count = READ_ONCE(input_pool.entropy_count); 603c5704490SJason A. Donenfeld if (entropy_count < POOL_MIN_BITS) 604e192be9dSTheodore Ts'o return; 6059c07f578SJason A. Donenfeld } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); 6069c07f578SJason A. Donenfeld extract_entropy(buf.key, sizeof(buf.key)); 6079c07f578SJason A. Donenfeld wake_up_interruptible(&random_write_wait); 6089c07f578SJason A. Donenfeld kill_fasync(&fasync, SIGIO, POLL_OUT); 609*a9412d51SJason A. Donenfeld 610*a9412d51SJason A. Donenfeld spin_lock_irqsave(&primary_crng.lock, flags); 61128f425e5SJason A. Donenfeld for (i = 0; i < 8; i++) 612*a9412d51SJason A. Donenfeld primary_crng.state[i + 4] ^= buf.key[i]; 613e192be9dSTheodore Ts'o memzero_explicit(&buf, sizeof(buf)); 614*a9412d51SJason A. Donenfeld WRITE_ONCE(primary_crng.init_time, jiffies); 615*a9412d51SJason A. Donenfeld spin_unlock_irqrestore(&primary_crng.lock, flags); 616*a9412d51SJason A. Donenfeld if (crng_init < 2) { 617*a9412d51SJason A. Donenfeld invalidate_batched_entropy(); 618*a9412d51SJason A. Donenfeld crng_init = 2; 619*a9412d51SJason A. Donenfeld process_random_ready_list(); 620*a9412d51SJason A. Donenfeld wake_up_interruptible(&crng_init_wait); 621*a9412d51SJason A. Donenfeld kill_fasync(&fasync, SIGIO, POLL_IN); 622*a9412d51SJason A. Donenfeld pr_notice("crng init done\n"); 623*a9412d51SJason A. Donenfeld if (unseeded_warning.missed) { 624*a9412d51SJason A. Donenfeld pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", 625*a9412d51SJason A. Donenfeld unseeded_warning.missed); 626*a9412d51SJason A. Donenfeld unseeded_warning.missed = 0; 627e192be9dSTheodore Ts'o } 628*a9412d51SJason A. Donenfeld if (urandom_warning.missed) { 629*a9412d51SJason A. Donenfeld pr_notice("%d urandom warning(s) missed due to ratelimiting\n", 630*a9412d51SJason A. Donenfeld urandom_warning.missed); 631*a9412d51SJason A. Donenfeld urandom_warning.missed = 0; 632009ba856SEric Biggers } 633*a9412d51SJason A. Donenfeld } 634e192be9dSTheodore Ts'o } 635e192be9dSTheodore Ts'o 636d38bb085SJason A. Donenfeld static void extract_crng(u8 out[CHACHA_BLOCK_SIZE]) 6371e7f583aSTheodore Ts'o { 638*a9412d51SJason A. Donenfeld unsigned long flags, init_time; 639*a9412d51SJason A. Donenfeld 640*a9412d51SJason A. Donenfeld if (crng_ready()) { 641*a9412d51SJason A. Donenfeld init_time = READ_ONCE(primary_crng.init_time); 642*a9412d51SJason A. Donenfeld if (time_after(jiffies, init_time + CRNG_RESEED_INTERVAL)) 643*a9412d51SJason A. Donenfeld crng_reseed(); 644*a9412d51SJason A. Donenfeld } 645*a9412d51SJason A. Donenfeld spin_lock_irqsave(&primary_crng.lock, flags); 646*a9412d51SJason A. Donenfeld chacha20_block(&primary_crng.state[0], out); 647*a9412d51SJason A. Donenfeld if (primary_crng.state[12] == 0) 648*a9412d51SJason A. Donenfeld primary_crng.state[13]++; 649*a9412d51SJason A. Donenfeld spin_unlock_irqrestore(&primary_crng.lock, flags); 6501e7f583aSTheodore Ts'o } 6511e7f583aSTheodore Ts'o 652c92e040dSTheodore Ts'o /* 653c92e040dSTheodore Ts'o * Use the leftover bytes from the CRNG block output (if there is 654c92e040dSTheodore Ts'o * enough) to mutate the CRNG key to provide backtracking protection. 655c92e040dSTheodore Ts'o */ 656*a9412d51SJason A. Donenfeld static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used) 657c92e040dSTheodore Ts'o { 658c92e040dSTheodore Ts'o unsigned long flags; 659d38bb085SJason A. Donenfeld u32 *s, *d; 660c92e040dSTheodore Ts'o int i; 661c92e040dSTheodore Ts'o 662d38bb085SJason A. Donenfeld used = round_up(used, sizeof(u32)); 6631ca1b917SEric Biggers if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) { 664c92e040dSTheodore Ts'o extract_crng(tmp); 665c92e040dSTheodore Ts'o used = 0; 666c92e040dSTheodore Ts'o } 667*a9412d51SJason A. Donenfeld spin_lock_irqsave(&primary_crng.lock, flags); 668d38bb085SJason A. Donenfeld s = (u32 *)&tmp[used]; 669*a9412d51SJason A. Donenfeld d = &primary_crng.state[4]; 670c92e040dSTheodore Ts'o for (i = 0; i < 8; i++) 671c92e040dSTheodore Ts'o *d++ ^= *s++; 672*a9412d51SJason A. Donenfeld spin_unlock_irqrestore(&primary_crng.lock, flags); 673c92e040dSTheodore Ts'o } 674c92e040dSTheodore Ts'o 675e192be9dSTheodore Ts'o static ssize_t extract_crng_user(void __user *buf, size_t nbytes) 676e192be9dSTheodore Ts'o { 6771ca1b917SEric Biggers ssize_t ret = 0, i = CHACHA_BLOCK_SIZE; 678d38bb085SJason A. Donenfeld u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); 679e192be9dSTheodore Ts'o int large_request = (nbytes > 256); 680e192be9dSTheodore Ts'o 681e192be9dSTheodore Ts'o while (nbytes) { 682e192be9dSTheodore Ts'o if (large_request && need_resched()) { 683e192be9dSTheodore Ts'o if (signal_pending(current)) { 684e192be9dSTheodore Ts'o if (ret == 0) 685e192be9dSTheodore Ts'o ret = -ERESTARTSYS; 686e192be9dSTheodore Ts'o break; 687e192be9dSTheodore Ts'o } 688e192be9dSTheodore Ts'o schedule(); 689e192be9dSTheodore Ts'o } 690e192be9dSTheodore Ts'o 691e192be9dSTheodore Ts'o extract_crng(tmp); 6921ca1b917SEric Biggers i = min_t(int, nbytes, CHACHA_BLOCK_SIZE); 693e192be9dSTheodore Ts'o if (copy_to_user(buf, tmp, i)) { 694e192be9dSTheodore Ts'o ret = -EFAULT; 695e192be9dSTheodore Ts'o break; 696e192be9dSTheodore Ts'o } 697e192be9dSTheodore Ts'o 698e192be9dSTheodore Ts'o nbytes -= i; 699e192be9dSTheodore Ts'o buf += i; 700e192be9dSTheodore Ts'o ret += i; 701e192be9dSTheodore Ts'o } 702c92e040dSTheodore Ts'o crng_backtrack_protect(tmp, i); 703e192be9dSTheodore Ts'o 704e192be9dSTheodore Ts'o /* Wipe data just written to memory */ 705e192be9dSTheodore Ts'o memzero_explicit(tmp, sizeof(tmp)); 706e192be9dSTheodore Ts'o 707e192be9dSTheodore Ts'o return ret; 708e192be9dSTheodore Ts'o } 709e192be9dSTheodore Ts'o 710e192be9dSTheodore Ts'o /********************************************************************* 711e192be9dSTheodore Ts'o * 7121da177e4SLinus Torvalds * Entropy input management 7131da177e4SLinus Torvalds * 7141da177e4SLinus Torvalds *********************************************************************/ 7151da177e4SLinus Torvalds 7161da177e4SLinus Torvalds /* There is one of these per entropy source */ 7171da177e4SLinus Torvalds struct timer_rand_state { 7181da177e4SLinus Torvalds cycles_t last_time; 7191da177e4SLinus Torvalds long last_delta, last_delta2; 7201da177e4SLinus Torvalds }; 7211da177e4SLinus Torvalds 722644008dfSTheodore Ts'o #define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; 723644008dfSTheodore Ts'o 724a2080a67SLinus Torvalds /* 725e192be9dSTheodore Ts'o * Add device- or boot-specific data to the input pool to help 726e192be9dSTheodore Ts'o * initialize it. 727a2080a67SLinus Torvalds * 728e192be9dSTheodore Ts'o * None of this adds any entropy; it is meant to avoid the problem of 729e192be9dSTheodore Ts'o * the entropy pool having similar initial state across largely 730e192be9dSTheodore Ts'o * identical devices. 731a2080a67SLinus Torvalds */ 732a2080a67SLinus Torvalds void add_device_randomness(const void *buf, unsigned int size) 733a2080a67SLinus Torvalds { 73461875f30STheodore Ts'o unsigned long time = random_get_entropy() ^ jiffies; 7353ef4cb2dSTheodore Ts'o unsigned long flags; 736a2080a67SLinus Torvalds 737dc12baacSTheodore Ts'o if (!crng_ready() && size) 738dc12baacSTheodore Ts'o crng_slow_load(buf, size); 739ee7998c5SKees Cook 7405910895fSTheodore Ts'o trace_add_device_randomness(size, _RET_IP_); 7413ef4cb2dSTheodore Ts'o spin_lock_irqsave(&input_pool.lock, flags); 74290ed1e67SJason A. Donenfeld _mix_pool_bytes(buf, size); 74390ed1e67SJason A. Donenfeld _mix_pool_bytes(&time, sizeof(time)); 7443ef4cb2dSTheodore Ts'o spin_unlock_irqrestore(&input_pool.lock, flags); 745a2080a67SLinus Torvalds } 746a2080a67SLinus Torvalds EXPORT_SYMBOL(add_device_randomness); 747a2080a67SLinus Torvalds 748644008dfSTheodore Ts'o static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; 7493060d6feSYinghai Lu 7501da177e4SLinus Torvalds /* 7511da177e4SLinus Torvalds * This function adds entropy to the entropy "pool" by using timing 7521da177e4SLinus Torvalds * delays. It uses the timer_rand_state structure to make an estimate 7531da177e4SLinus Torvalds * of how many bits of entropy this call has added to the pool. 7541da177e4SLinus Torvalds * 7551da177e4SLinus Torvalds * The number "num" is also added to the pool - it should somehow describe 7561da177e4SLinus Torvalds * the type of event which just happened. This is currently 0-255 for 7571da177e4SLinus Torvalds * keyboard scan codes, and 256 upwards for interrupts. 7581da177e4SLinus Torvalds * 7591da177e4SLinus Torvalds */ 7601da177e4SLinus Torvalds static void add_timer_randomness(struct timer_rand_state *state, unsigned num) 7611da177e4SLinus Torvalds { 7621da177e4SLinus Torvalds struct { 7631da177e4SLinus Torvalds long jiffies; 764d38bb085SJason A. Donenfeld unsigned int cycles; 765d38bb085SJason A. Donenfeld unsigned int num; 7661da177e4SLinus Torvalds } sample; 7671da177e4SLinus Torvalds long delta, delta2, delta3; 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds sample.jiffies = jiffies; 77061875f30STheodore Ts'o sample.cycles = random_get_entropy(); 7711da177e4SLinus Torvalds sample.num = num; 77290ed1e67SJason A. Donenfeld mix_pool_bytes(&sample, sizeof(sample)); 7731da177e4SLinus Torvalds 7741da177e4SLinus Torvalds /* 7751da177e4SLinus Torvalds * Calculate number of bits of randomness we probably added. 7761da177e4SLinus Torvalds * We take into account the first, second and third-order deltas 7771da177e4SLinus Torvalds * in order to make our estimate. 7781da177e4SLinus Torvalds */ 779e00d996aSQian Cai delta = sample.jiffies - READ_ONCE(state->last_time); 780e00d996aSQian Cai WRITE_ONCE(state->last_time, sample.jiffies); 7811da177e4SLinus Torvalds 782e00d996aSQian Cai delta2 = delta - READ_ONCE(state->last_delta); 783e00d996aSQian Cai WRITE_ONCE(state->last_delta, delta); 7841da177e4SLinus Torvalds 785e00d996aSQian Cai delta3 = delta2 - READ_ONCE(state->last_delta2); 786e00d996aSQian Cai WRITE_ONCE(state->last_delta2, delta2); 7871da177e4SLinus Torvalds 7881da177e4SLinus Torvalds if (delta < 0) 7891da177e4SLinus Torvalds delta = -delta; 7901da177e4SLinus Torvalds if (delta2 < 0) 7911da177e4SLinus Torvalds delta2 = -delta2; 7921da177e4SLinus Torvalds if (delta3 < 0) 7931da177e4SLinus Torvalds delta3 = -delta3; 7941da177e4SLinus Torvalds if (delta > delta2) 7951da177e4SLinus Torvalds delta = delta2; 7961da177e4SLinus Torvalds if (delta > delta3) 7971da177e4SLinus Torvalds delta = delta3; 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds /* 8001da177e4SLinus Torvalds * delta is now minimum absolute delta. 8011da177e4SLinus Torvalds * Round down by 1 bit on general principles, 802727d499aSYangtao Li * and limit entropy estimate to 12 bits. 8031da177e4SLinus Torvalds */ 80490ed1e67SJason A. Donenfeld credit_entropy_bits(min_t(int, fls(delta >> 1), 11)); 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds 807d251575aSStephen Hemminger void add_input_randomness(unsigned int type, unsigned int code, 8081da177e4SLinus Torvalds unsigned int value) 8091da177e4SLinus Torvalds { 8101da177e4SLinus Torvalds static unsigned char last_value; 8111da177e4SLinus Torvalds 8121da177e4SLinus Torvalds /* ignore autorepeat and the like */ 8131da177e4SLinus Torvalds if (value == last_value) 8141da177e4SLinus Torvalds return; 8151da177e4SLinus Torvalds 8161da177e4SLinus Torvalds last_value = value; 8171da177e4SLinus Torvalds add_timer_randomness(&input_timer_state, 8181da177e4SLinus Torvalds (type << 4) ^ code ^ (code >> 4) ^ value); 819c5704490SJason A. Donenfeld trace_add_input_randomness(input_pool.entropy_count); 8201da177e4SLinus Torvalds } 82180fc9f53SDmitry Torokhov EXPORT_SYMBOL_GPL(add_input_randomness); 8221da177e4SLinus Torvalds 823775f4b29STheodore Ts'o static DEFINE_PER_CPU(struct fast_pool, irq_randomness); 824775f4b29STheodore Ts'o 82543759d4fSTheodore Ts'o #ifdef ADD_INTERRUPT_BENCH 82643759d4fSTheodore Ts'o static unsigned long avg_cycles, avg_deviation; 82743759d4fSTheodore Ts'o 82843759d4fSTheodore Ts'o #define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ 82943759d4fSTheodore Ts'o #define FIXED_1_2 (1 << (AVG_SHIFT - 1)) 83043759d4fSTheodore Ts'o 83143759d4fSTheodore Ts'o static void add_interrupt_bench(cycles_t start) 83243759d4fSTheodore Ts'o { 83343759d4fSTheodore Ts'o long delta = random_get_entropy() - start; 83443759d4fSTheodore Ts'o 83543759d4fSTheodore Ts'o /* Use a weighted moving average */ 83643759d4fSTheodore Ts'o delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); 83743759d4fSTheodore Ts'o avg_cycles += delta; 83843759d4fSTheodore Ts'o /* And average deviation */ 83943759d4fSTheodore Ts'o delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); 84043759d4fSTheodore Ts'o avg_deviation += delta; 84143759d4fSTheodore Ts'o } 84243759d4fSTheodore Ts'o #else 84343759d4fSTheodore Ts'o #define add_interrupt_bench(x) 84443759d4fSTheodore Ts'o #endif 84543759d4fSTheodore Ts'o 846d38bb085SJason A. Donenfeld static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 847ee3e00e9STheodore Ts'o { 848d38bb085SJason A. Donenfeld u32 *ptr = (u32 *)regs; 84992e75428STheodore Ts'o unsigned int idx; 850ee3e00e9STheodore Ts'o 851ee3e00e9STheodore Ts'o if (regs == NULL) 852ee3e00e9STheodore Ts'o return 0; 85392e75428STheodore Ts'o idx = READ_ONCE(f->reg_idx); 854d38bb085SJason A. Donenfeld if (idx >= sizeof(struct pt_regs) / sizeof(u32)) 85592e75428STheodore Ts'o idx = 0; 85692e75428STheodore Ts'o ptr += idx++; 85792e75428STheodore Ts'o WRITE_ONCE(f->reg_idx, idx); 8589dfa7bbaSMichael Schmitz return *ptr; 859ee3e00e9STheodore Ts'o } 860ee3e00e9STheodore Ts'o 861703f7066SSebastian Andrzej Siewior void add_interrupt_randomness(int irq) 8621da177e4SLinus Torvalds { 8631b2a1a7eSChristoph Lameter struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); 864775f4b29STheodore Ts'o struct pt_regs *regs = get_irq_regs(); 865775f4b29STheodore Ts'o unsigned long now = jiffies; 866655b2264STheodore Ts'o cycles_t cycles = random_get_entropy(); 867d38bb085SJason A. Donenfeld u32 c_high, j_high; 868d38bb085SJason A. Donenfeld u64 ip; 8693060d6feSYinghai Lu 870ee3e00e9STheodore Ts'o if (cycles == 0) 871ee3e00e9STheodore Ts'o cycles = get_reg(fast_pool, regs); 872655b2264STheodore Ts'o c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; 873655b2264STheodore Ts'o j_high = (sizeof(now) > 4) ? now >> 32 : 0; 87443759d4fSTheodore Ts'o fast_pool->pool[0] ^= cycles ^ j_high ^ irq; 87543759d4fSTheodore Ts'o fast_pool->pool[1] ^= now ^ c_high; 876655b2264STheodore Ts'o ip = regs ? instruction_pointer(regs) : _RET_IP_; 87743759d4fSTheodore Ts'o fast_pool->pool[2] ^= ip; 878248045b8SJason A. Donenfeld fast_pool->pool[3] ^= 879248045b8SJason A. Donenfeld (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs); 8803060d6feSYinghai Lu 88143759d4fSTheodore Ts'o fast_mix(fast_pool); 88243759d4fSTheodore Ts'o add_interrupt_bench(cycles); 883775f4b29STheodore Ts'o 88443838a23STheodore Ts'o if (unlikely(crng_init == 0)) { 885e192be9dSTheodore Ts'o if ((fast_pool->count >= 64) && 886d38bb085SJason A. Donenfeld crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) { 887e192be9dSTheodore Ts'o fast_pool->count = 0; 888e192be9dSTheodore Ts'o fast_pool->last = now; 889e192be9dSTheodore Ts'o } 890e192be9dSTheodore Ts'o return; 891e192be9dSTheodore Ts'o } 892e192be9dSTheodore Ts'o 893248045b8SJason A. Donenfeld if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ)) 8941da177e4SLinus Torvalds return; 895840f9507STheodore Ts'o 89690ed1e67SJason A. Donenfeld if (!spin_trylock(&input_pool.lock)) 8971da177e4SLinus Torvalds return; 8981da177e4SLinus Torvalds 899775f4b29STheodore Ts'o fast_pool->last = now; 90090ed1e67SJason A. Donenfeld __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool)); 90190ed1e67SJason A. Donenfeld spin_unlock(&input_pool.lock); 90283664a69SH. Peter Anvin 903ee3e00e9STheodore Ts'o fast_pool->count = 0; 904840f9507STheodore Ts'o 905ee3e00e9STheodore Ts'o /* award one bit for the contents of the fast pool */ 90690ed1e67SJason A. Donenfeld credit_entropy_bits(1); 9071da177e4SLinus Torvalds } 9084b44f2d1SStephan Mueller EXPORT_SYMBOL_GPL(add_interrupt_randomness); 9091da177e4SLinus Torvalds 9109361401eSDavid Howells #ifdef CONFIG_BLOCK 9111da177e4SLinus Torvalds void add_disk_randomness(struct gendisk *disk) 9121da177e4SLinus Torvalds { 9131da177e4SLinus Torvalds if (!disk || !disk->random) 9141da177e4SLinus Torvalds return; 9151da177e4SLinus Torvalds /* first major is 1, so we get >= 0x200 here */ 916f331c029STejun Heo add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); 917c5704490SJason A. Donenfeld trace_add_disk_randomness(disk_devt(disk), input_pool.entropy_count); 9181da177e4SLinus Torvalds } 919bdcfa3e5SChristoph Hellwig EXPORT_SYMBOL_GPL(add_disk_randomness); 9209361401eSDavid Howells #endif 9211da177e4SLinus Torvalds 9221da177e4SLinus Torvalds /********************************************************************* 9231da177e4SLinus Torvalds * 9241da177e4SLinus Torvalds * Entropy extraction routines 9251da177e4SLinus Torvalds * 9261da177e4SLinus Torvalds *********************************************************************/ 9271da177e4SLinus Torvalds 9281da177e4SLinus Torvalds /* 9296e8ec255SJason A. Donenfeld * This is an HKDF-like construction for using the hashed collected entropy 9306e8ec255SJason A. Donenfeld * as a PRF key, that's then expanded block-by-block. 93119fa5be1SGreg Price */ 9329c07f578SJason A. Donenfeld static void extract_entropy(void *buf, size_t nbytes) 9331da177e4SLinus Torvalds { 934902c098aSTheodore Ts'o unsigned long flags; 9356e8ec255SJason A. Donenfeld u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; 9366e8ec255SJason A. Donenfeld struct { 93728f425e5SJason A. Donenfeld unsigned long rdseed[32 / sizeof(long)]; 9386e8ec255SJason A. Donenfeld size_t counter; 9396e8ec255SJason A. Donenfeld } block; 9406e8ec255SJason A. Donenfeld size_t i; 9411da177e4SLinus Torvalds 942c5704490SJason A. Donenfeld trace_extract_entropy(nbytes, input_pool.entropy_count); 9439c07f578SJason A. Donenfeld 94428f425e5SJason A. Donenfeld for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { 94528f425e5SJason A. Donenfeld if (!arch_get_random_seed_long(&block.rdseed[i]) && 94628f425e5SJason A. Donenfeld !arch_get_random_long(&block.rdseed[i])) 94728f425e5SJason A. Donenfeld block.rdseed[i] = random_get_entropy(); 94885a1f777STheodore Ts'o } 94985a1f777STheodore Ts'o 95090ed1e67SJason A. Donenfeld spin_lock_irqsave(&input_pool.lock, flags); 95146884442STheodore Ts'o 9526e8ec255SJason A. Donenfeld /* seed = HASHPRF(last_key, entropy_input) */ 9536e8ec255SJason A. Donenfeld blake2s_final(&input_pool.hash, seed); 9546e8ec255SJason A. Donenfeld 95528f425e5SJason A. Donenfeld /* next_key = HASHPRF(seed, RDSEED || 0) */ 9566e8ec255SJason A. Donenfeld block.counter = 0; 9576e8ec255SJason A. Donenfeld blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); 9586e8ec255SJason A. Donenfeld blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); 9596e8ec255SJason A. Donenfeld 96090ed1e67SJason A. Donenfeld spin_unlock_irqrestore(&input_pool.lock, flags); 9616e8ec255SJason A. Donenfeld memzero_explicit(next_key, sizeof(next_key)); 962e192be9dSTheodore Ts'o 963e192be9dSTheodore Ts'o while (nbytes) { 9646e8ec255SJason A. Donenfeld i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); 96528f425e5SJason A. Donenfeld /* output = HASHPRF(seed, RDSEED || ++counter) */ 9666e8ec255SJason A. Donenfeld ++block.counter; 9676e8ec255SJason A. Donenfeld blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); 968e192be9dSTheodore Ts'o nbytes -= i; 969e192be9dSTheodore Ts'o buf += i; 970e192be9dSTheodore Ts'o } 971e192be9dSTheodore Ts'o 9726e8ec255SJason A. Donenfeld memzero_explicit(seed, sizeof(seed)); 9736e8ec255SJason A. Donenfeld memzero_explicit(&block, sizeof(block)); 974e192be9dSTheodore Ts'o } 975e192be9dSTheodore Ts'o 976eecabf56STheodore Ts'o #define warn_unseeded_randomness(previous) \ 977eecabf56STheodore Ts'o _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) 978eecabf56STheodore Ts'o 979248045b8SJason A. Donenfeld static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) 980eecabf56STheodore Ts'o { 981eecabf56STheodore Ts'o #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM 982eecabf56STheodore Ts'o const bool print_once = false; 983eecabf56STheodore Ts'o #else 984eecabf56STheodore Ts'o static bool print_once __read_mostly; 985eecabf56STheodore Ts'o #endif 986eecabf56STheodore Ts'o 987248045b8SJason A. Donenfeld if (print_once || crng_ready() || 988eecabf56STheodore Ts'o (previous && (caller == READ_ONCE(*previous)))) 989eecabf56STheodore Ts'o return; 990eecabf56STheodore Ts'o WRITE_ONCE(*previous, caller); 991eecabf56STheodore Ts'o #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 992eecabf56STheodore Ts'o print_once = true; 993eecabf56STheodore Ts'o #endif 9944e00b339STheodore Ts'o if (__ratelimit(&unseeded_warning)) 995248045b8SJason A. Donenfeld printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", 996248045b8SJason A. Donenfeld func_name, caller, crng_init); 997eecabf56STheodore Ts'o } 998eecabf56STheodore Ts'o 9991da177e4SLinus Torvalds /* 10001da177e4SLinus Torvalds * This function is the exported kernel interface. It returns some 1001c2557a30STheodore Ts'o * number of good random numbers, suitable for key generation, seeding 100218e9cea7SGreg Price * TCP sequence numbers, etc. It does not rely on the hardware random 100318e9cea7SGreg Price * number generator. For random bytes direct from the hardware RNG 1004e297a783SJason A. Donenfeld * (when available), use get_random_bytes_arch(). In order to ensure 1005e297a783SJason A. Donenfeld * that the randomness provided by this function is okay, the function 1006e297a783SJason A. Donenfeld * wait_for_random_bytes() should be called and return 0 at least once 1007e297a783SJason A. Donenfeld * at any point prior. 10081da177e4SLinus Torvalds */ 1009eecabf56STheodore Ts'o static void _get_random_bytes(void *buf, int nbytes) 10101da177e4SLinus Torvalds { 1011d38bb085SJason A. Donenfeld u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); 1012e192be9dSTheodore Ts'o 10135910895fSTheodore Ts'o trace_get_random_bytes(nbytes, _RET_IP_); 1014e192be9dSTheodore Ts'o 10151ca1b917SEric Biggers while (nbytes >= CHACHA_BLOCK_SIZE) { 1016e192be9dSTheodore Ts'o extract_crng(buf); 10171ca1b917SEric Biggers buf += CHACHA_BLOCK_SIZE; 10181ca1b917SEric Biggers nbytes -= CHACHA_BLOCK_SIZE; 1019e192be9dSTheodore Ts'o } 1020e192be9dSTheodore Ts'o 1021e192be9dSTheodore Ts'o if (nbytes > 0) { 1022e192be9dSTheodore Ts'o extract_crng(tmp); 1023e192be9dSTheodore Ts'o memcpy(buf, tmp, nbytes); 1024c92e040dSTheodore Ts'o crng_backtrack_protect(tmp, nbytes); 1025c92e040dSTheodore Ts'o } else 10261ca1b917SEric Biggers crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE); 1027c92e040dSTheodore Ts'o memzero_explicit(tmp, sizeof(tmp)); 1028c2557a30STheodore Ts'o } 1029eecabf56STheodore Ts'o 1030eecabf56STheodore Ts'o void get_random_bytes(void *buf, int nbytes) 1031eecabf56STheodore Ts'o { 1032eecabf56STheodore Ts'o static void *previous; 1033eecabf56STheodore Ts'o 1034eecabf56STheodore Ts'o warn_unseeded_randomness(&previous); 1035eecabf56STheodore Ts'o _get_random_bytes(buf, nbytes); 1036eecabf56STheodore Ts'o } 1037c2557a30STheodore Ts'o EXPORT_SYMBOL(get_random_bytes); 1038c2557a30STheodore Ts'o 103950ee7529SLinus Torvalds /* 104050ee7529SLinus Torvalds * Each time the timer fires, we expect that we got an unpredictable 104150ee7529SLinus Torvalds * jump in the cycle counter. Even if the timer is running on another 104250ee7529SLinus Torvalds * CPU, the timer activity will be touching the stack of the CPU that is 104350ee7529SLinus Torvalds * generating entropy.. 104450ee7529SLinus Torvalds * 104550ee7529SLinus Torvalds * Note that we don't re-arm the timer in the timer itself - we are 104650ee7529SLinus Torvalds * happy to be scheduled away, since that just makes the load more 104750ee7529SLinus Torvalds * complex, but we do not want the timer to keep ticking unless the 104850ee7529SLinus Torvalds * entropy loop is running. 104950ee7529SLinus Torvalds * 105050ee7529SLinus Torvalds * So the re-arming always happens in the entropy loop itself. 105150ee7529SLinus Torvalds */ 105250ee7529SLinus Torvalds static void entropy_timer(struct timer_list *t) 105350ee7529SLinus Torvalds { 105490ed1e67SJason A. Donenfeld credit_entropy_bits(1); 105550ee7529SLinus Torvalds } 105650ee7529SLinus Torvalds 105750ee7529SLinus Torvalds /* 105850ee7529SLinus Torvalds * If we have an actual cycle counter, see if we can 105950ee7529SLinus Torvalds * generate enough entropy with timing noise 106050ee7529SLinus Torvalds */ 106150ee7529SLinus Torvalds static void try_to_generate_entropy(void) 106250ee7529SLinus Torvalds { 106350ee7529SLinus Torvalds struct { 106450ee7529SLinus Torvalds unsigned long now; 106550ee7529SLinus Torvalds struct timer_list timer; 106650ee7529SLinus Torvalds } stack; 106750ee7529SLinus Torvalds 106850ee7529SLinus Torvalds stack.now = random_get_entropy(); 106950ee7529SLinus Torvalds 107050ee7529SLinus Torvalds /* Slow counter - or none. Don't even bother */ 107150ee7529SLinus Torvalds if (stack.now == random_get_entropy()) 107250ee7529SLinus Torvalds return; 107350ee7529SLinus Torvalds 107450ee7529SLinus Torvalds timer_setup_on_stack(&stack.timer, entropy_timer, 0); 107550ee7529SLinus Torvalds while (!crng_ready()) { 107650ee7529SLinus Torvalds if (!timer_pending(&stack.timer)) 107750ee7529SLinus Torvalds mod_timer(&stack.timer, jiffies + 1); 107890ed1e67SJason A. Donenfeld mix_pool_bytes(&stack.now, sizeof(stack.now)); 107950ee7529SLinus Torvalds schedule(); 108050ee7529SLinus Torvalds stack.now = random_get_entropy(); 108150ee7529SLinus Torvalds } 108250ee7529SLinus Torvalds 108350ee7529SLinus Torvalds del_timer_sync(&stack.timer); 108450ee7529SLinus Torvalds destroy_timer_on_stack(&stack.timer); 108590ed1e67SJason A. Donenfeld mix_pool_bytes(&stack.now, sizeof(stack.now)); 108650ee7529SLinus Torvalds } 108750ee7529SLinus Torvalds 1088c2557a30STheodore Ts'o /* 1089e297a783SJason A. Donenfeld * Wait for the urandom pool to be seeded and thus guaranteed to supply 1090e297a783SJason A. Donenfeld * cryptographically secure random numbers. This applies to: the /dev/urandom 1091e297a783SJason A. Donenfeld * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} 1092e297a783SJason A. Donenfeld * family of functions. Using any of these functions without first calling 1093e297a783SJason A. Donenfeld * this function forfeits the guarantee of security. 1094e297a783SJason A. Donenfeld * 1095e297a783SJason A. Donenfeld * Returns: 0 if the urandom pool has been seeded. 1096e297a783SJason A. Donenfeld * -ERESTARTSYS if the function was interrupted by a signal. 1097e297a783SJason A. Donenfeld */ 1098e297a783SJason A. Donenfeld int wait_for_random_bytes(void) 1099e297a783SJason A. Donenfeld { 1100e297a783SJason A. Donenfeld if (likely(crng_ready())) 1101e297a783SJason A. Donenfeld return 0; 110250ee7529SLinus Torvalds 110350ee7529SLinus Torvalds do { 110450ee7529SLinus Torvalds int ret; 110550ee7529SLinus Torvalds ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); 110650ee7529SLinus Torvalds if (ret) 110750ee7529SLinus Torvalds return ret > 0 ? 0 : ret; 110850ee7529SLinus Torvalds 110950ee7529SLinus Torvalds try_to_generate_entropy(); 111050ee7529SLinus Torvalds } while (!crng_ready()); 111150ee7529SLinus Torvalds 111250ee7529SLinus Torvalds return 0; 1113e297a783SJason A. Donenfeld } 1114e297a783SJason A. Donenfeld EXPORT_SYMBOL(wait_for_random_bytes); 1115e297a783SJason A. Donenfeld 1116e297a783SJason A. Donenfeld /* 11179a47249dSJason A. Donenfeld * Returns whether or not the urandom pool has been seeded and thus guaranteed 11189a47249dSJason A. Donenfeld * to supply cryptographically secure random numbers. This applies to: the 11199a47249dSJason A. Donenfeld * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, 11209a47249dSJason A. Donenfeld * ,u64,int,long} family of functions. 11219a47249dSJason A. Donenfeld * 11229a47249dSJason A. Donenfeld * Returns: true if the urandom pool has been seeded. 11239a47249dSJason A. Donenfeld * false if the urandom pool has not been seeded. 11249a47249dSJason A. Donenfeld */ 11259a47249dSJason A. Donenfeld bool rng_is_initialized(void) 11269a47249dSJason A. Donenfeld { 11279a47249dSJason A. Donenfeld return crng_ready(); 11289a47249dSJason A. Donenfeld } 11299a47249dSJason A. Donenfeld EXPORT_SYMBOL(rng_is_initialized); 11309a47249dSJason A. Donenfeld 11319a47249dSJason A. Donenfeld /* 1132205a525cSHerbert Xu * Add a callback function that will be invoked when the nonblocking 1133205a525cSHerbert Xu * pool is initialised. 1134205a525cSHerbert Xu * 1135205a525cSHerbert Xu * returns: 0 if callback is successfully added 1136205a525cSHerbert Xu * -EALREADY if pool is already initialised (callback not called) 1137205a525cSHerbert Xu * -ENOENT if module for callback is not alive 1138205a525cSHerbert Xu */ 1139205a525cSHerbert Xu int add_random_ready_callback(struct random_ready_callback *rdy) 1140205a525cSHerbert Xu { 1141205a525cSHerbert Xu struct module *owner; 1142205a525cSHerbert Xu unsigned long flags; 1143205a525cSHerbert Xu int err = -EALREADY; 1144205a525cSHerbert Xu 1145e192be9dSTheodore Ts'o if (crng_ready()) 1146205a525cSHerbert Xu return err; 1147205a525cSHerbert Xu 1148205a525cSHerbert Xu owner = rdy->owner; 1149205a525cSHerbert Xu if (!try_module_get(owner)) 1150205a525cSHerbert Xu return -ENOENT; 1151205a525cSHerbert Xu 1152205a525cSHerbert Xu spin_lock_irqsave(&random_ready_list_lock, flags); 1153e192be9dSTheodore Ts'o if (crng_ready()) 1154205a525cSHerbert Xu goto out; 1155205a525cSHerbert Xu 1156205a525cSHerbert Xu owner = NULL; 1157205a525cSHerbert Xu 1158205a525cSHerbert Xu list_add(&rdy->list, &random_ready_list); 1159205a525cSHerbert Xu err = 0; 1160205a525cSHerbert Xu 1161205a525cSHerbert Xu out: 1162205a525cSHerbert Xu spin_unlock_irqrestore(&random_ready_list_lock, flags); 1163205a525cSHerbert Xu 1164205a525cSHerbert Xu module_put(owner); 1165205a525cSHerbert Xu 1166205a525cSHerbert Xu return err; 1167205a525cSHerbert Xu } 1168205a525cSHerbert Xu EXPORT_SYMBOL(add_random_ready_callback); 1169205a525cSHerbert Xu 1170205a525cSHerbert Xu /* 1171205a525cSHerbert Xu * Delete a previously registered readiness callback function. 1172205a525cSHerbert Xu */ 1173205a525cSHerbert Xu void del_random_ready_callback(struct random_ready_callback *rdy) 1174205a525cSHerbert Xu { 1175205a525cSHerbert Xu unsigned long flags; 1176205a525cSHerbert Xu struct module *owner = NULL; 1177205a525cSHerbert Xu 1178205a525cSHerbert Xu spin_lock_irqsave(&random_ready_list_lock, flags); 1179205a525cSHerbert Xu if (!list_empty(&rdy->list)) { 1180205a525cSHerbert Xu list_del_init(&rdy->list); 1181205a525cSHerbert Xu owner = rdy->owner; 1182205a525cSHerbert Xu } 1183205a525cSHerbert Xu spin_unlock_irqrestore(&random_ready_list_lock, flags); 1184205a525cSHerbert Xu 1185205a525cSHerbert Xu module_put(owner); 1186205a525cSHerbert Xu } 1187205a525cSHerbert Xu EXPORT_SYMBOL(del_random_ready_callback); 1188205a525cSHerbert Xu 1189205a525cSHerbert Xu /* 1190c2557a30STheodore Ts'o * This function will use the architecture-specific hardware random 1191c2557a30STheodore Ts'o * number generator if it is available. The arch-specific hw RNG will 1192c2557a30STheodore Ts'o * almost certainly be faster than what we can do in software, but it 1193c2557a30STheodore Ts'o * is impossible to verify that it is implemented securely (as 1194c2557a30STheodore Ts'o * opposed, to, say, the AES encryption of a sequence number using a 1195c2557a30STheodore Ts'o * key known by the NSA). So it's useful if we need the speed, but 1196c2557a30STheodore Ts'o * only if we're willing to trust the hardware manufacturer not to 1197c2557a30STheodore Ts'o * have put in a back door. 1198753d433bSTobin C. Harding * 1199753d433bSTobin C. Harding * Return number of bytes filled in. 1200c2557a30STheodore Ts'o */ 1201753d433bSTobin C. Harding int __must_check get_random_bytes_arch(void *buf, int nbytes) 1202c2557a30STheodore Ts'o { 1203753d433bSTobin C. Harding int left = nbytes; 1204d38bb085SJason A. Donenfeld u8 *p = buf; 120563d77173SH. Peter Anvin 1206753d433bSTobin C. Harding trace_get_random_bytes_arch(left, _RET_IP_); 1207753d433bSTobin C. Harding while (left) { 120863d77173SH. Peter Anvin unsigned long v; 1209753d433bSTobin C. Harding int chunk = min_t(int, left, sizeof(unsigned long)); 121063d77173SH. Peter Anvin 121163d77173SH. Peter Anvin if (!arch_get_random_long(&v)) 121263d77173SH. Peter Anvin break; 121363d77173SH. Peter Anvin 1214bd29e568SLuck, Tony memcpy(p, &v, chunk); 121563d77173SH. Peter Anvin p += chunk; 1216753d433bSTobin C. Harding left -= chunk; 121763d77173SH. Peter Anvin } 121863d77173SH. Peter Anvin 1219753d433bSTobin C. Harding return nbytes - left; 12201da177e4SLinus Torvalds } 1221c2557a30STheodore Ts'o EXPORT_SYMBOL(get_random_bytes_arch); 12221da177e4SLinus Torvalds 12231da177e4SLinus Torvalds /* 12241da177e4SLinus Torvalds * init_std_data - initialize pool with system data 12251da177e4SLinus Torvalds * 12261da177e4SLinus Torvalds * This function clears the pool's entropy count and mixes some system 12271da177e4SLinus Torvalds * data into the pool to prepare it for use. The pool is not cleared 12281da177e4SLinus Torvalds * as that can only decrease the entropy in the pool. 12291da177e4SLinus Torvalds */ 123090ed1e67SJason A. Donenfeld static void __init init_std_data(void) 12311da177e4SLinus Torvalds { 12323e88bdffSTheodore Ts'o int i; 1233902c098aSTheodore Ts'o ktime_t now = ktime_get_real(); 1234902c098aSTheodore Ts'o unsigned long rv; 12351da177e4SLinus Torvalds 123690ed1e67SJason A. Donenfeld mix_pool_bytes(&now, sizeof(now)); 12376e8ec255SJason A. Donenfeld for (i = BLAKE2S_BLOCK_SIZE; i > 0; i -= sizeof(rv)) { 123883664a69SH. Peter Anvin if (!arch_get_random_seed_long(&rv) && 123983664a69SH. Peter Anvin !arch_get_random_long(&rv)) 1240ae9ecd92STheodore Ts'o rv = random_get_entropy(); 124190ed1e67SJason A. Donenfeld mix_pool_bytes(&rv, sizeof(rv)); 12423e88bdffSTheodore Ts'o } 124390ed1e67SJason A. Donenfeld mix_pool_bytes(utsname(), sizeof(*(utsname()))); 12441da177e4SLinus Torvalds } 12451da177e4SLinus Torvalds 1246cbc96b75STony Luck /* 1247cbc96b75STony Luck * Note that setup_arch() may call add_device_randomness() 1248cbc96b75STony Luck * long before we get here. This allows seeding of the pools 1249cbc96b75STony Luck * with some platform dependent data very early in the boot 1250cbc96b75STony Luck * process. But it limits our options here. We must use 1251cbc96b75STony Luck * statically allocated structures that already have all 1252cbc96b75STony Luck * initializations complete at compile time. We should also 1253cbc96b75STony Luck * take care not to overwrite the precious per platform data 1254cbc96b75STony Luck * we were given. 1255cbc96b75STony Luck */ 1256d5553523SKees Cook int __init rand_initialize(void) 12571da177e4SLinus Torvalds { 125890ed1e67SJason A. Donenfeld init_std_data(); 1259*a9412d51SJason A. Donenfeld crng_initialize(); 12604e00b339STheodore Ts'o if (ratelimit_disable) { 12614e00b339STheodore Ts'o urandom_warning.interval = 0; 12624e00b339STheodore Ts'o unseeded_warning.interval = 0; 12634e00b339STheodore Ts'o } 12641da177e4SLinus Torvalds return 0; 12651da177e4SLinus Torvalds } 12661da177e4SLinus Torvalds 12679361401eSDavid Howells #ifdef CONFIG_BLOCK 12681da177e4SLinus Torvalds void rand_initialize_disk(struct gendisk *disk) 12691da177e4SLinus Torvalds { 12701da177e4SLinus Torvalds struct timer_rand_state *state; 12711da177e4SLinus Torvalds 12721da177e4SLinus Torvalds /* 1273f8595815SEric Dumazet * If kzalloc returns null, we just won't use that entropy 12741da177e4SLinus Torvalds * source. 12751da177e4SLinus Torvalds */ 1276f8595815SEric Dumazet state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); 1277644008dfSTheodore Ts'o if (state) { 1278644008dfSTheodore Ts'o state->last_time = INITIAL_JIFFIES; 12791da177e4SLinus Torvalds disk->random = state; 12801da177e4SLinus Torvalds } 1281644008dfSTheodore Ts'o } 12829361401eSDavid Howells #endif 12831da177e4SLinus Torvalds 1284248045b8SJason A. Donenfeld static ssize_t urandom_read_nowarn(struct file *file, char __user *buf, 1285248045b8SJason A. Donenfeld size_t nbytes, loff_t *ppos) 1286c6f1deb1SAndy Lutomirski { 1287c6f1deb1SAndy Lutomirski int ret; 1288c6f1deb1SAndy Lutomirski 1289c5704490SJason A. Donenfeld nbytes = min_t(size_t, nbytes, INT_MAX >> 6); 1290c6f1deb1SAndy Lutomirski ret = extract_crng_user(buf, nbytes); 1291c5704490SJason A. Donenfeld trace_urandom_read(8 * nbytes, 0, input_pool.entropy_count); 1292c6f1deb1SAndy Lutomirski return ret; 1293c6f1deb1SAndy Lutomirski } 1294c6f1deb1SAndy Lutomirski 1295248045b8SJason A. Donenfeld static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, 1296248045b8SJason A. Donenfeld loff_t *ppos) 12971da177e4SLinus Torvalds { 12989b4d0087STheodore Ts'o static int maxwarn = 10; 1299301f0595STheodore Ts'o 1300e192be9dSTheodore Ts'o if (!crng_ready() && maxwarn > 0) { 13019b4d0087STheodore Ts'o maxwarn--; 13024e00b339STheodore Ts'o if (__ratelimit(&urandom_warning)) 130312cd53afSYangtao Li pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", 1304e192be9dSTheodore Ts'o current->comm, nbytes); 13059b4d0087STheodore Ts'o } 1306c6f1deb1SAndy Lutomirski 1307c6f1deb1SAndy Lutomirski return urandom_read_nowarn(file, buf, nbytes, ppos); 13081da177e4SLinus Torvalds } 13091da177e4SLinus Torvalds 1310248045b8SJason A. Donenfeld static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, 1311248045b8SJason A. Donenfeld loff_t *ppos) 131230c08efeSAndy Lutomirski { 131330c08efeSAndy Lutomirski int ret; 131430c08efeSAndy Lutomirski 131530c08efeSAndy Lutomirski ret = wait_for_random_bytes(); 131630c08efeSAndy Lutomirski if (ret != 0) 131730c08efeSAndy Lutomirski return ret; 131830c08efeSAndy Lutomirski return urandom_read_nowarn(file, buf, nbytes, ppos); 131930c08efeSAndy Lutomirski } 132030c08efeSAndy Lutomirski 1321248045b8SJason A. Donenfeld static __poll_t random_poll(struct file *file, poll_table *wait) 132289b310a2SChristoph Hellwig { 1323a11e1d43SLinus Torvalds __poll_t mask; 132489b310a2SChristoph Hellwig 132530c08efeSAndy Lutomirski poll_wait(file, &crng_init_wait, wait); 1326a11e1d43SLinus Torvalds poll_wait(file, &random_write_wait, wait); 1327a11e1d43SLinus Torvalds mask = 0; 132830c08efeSAndy Lutomirski if (crng_ready()) 1329a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 1330489c7fc4SJason A. Donenfeld if (input_pool.entropy_count < POOL_MIN_BITS) 1331a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 13321da177e4SLinus Torvalds return mask; 13331da177e4SLinus Torvalds } 13341da177e4SLinus Torvalds 1335248045b8SJason A. Donenfeld static int write_pool(const char __user *buffer, size_t count) 13367f397dcdSMatt Mackall { 13377f397dcdSMatt Mackall size_t bytes; 1338d38bb085SJason A. Donenfeld u32 t, buf[16]; 13397f397dcdSMatt Mackall const char __user *p = buffer; 13407f397dcdSMatt Mackall 13417f397dcdSMatt Mackall while (count > 0) { 134281e69df3STheodore Ts'o int b, i = 0; 134381e69df3STheodore Ts'o 13447f397dcdSMatt Mackall bytes = min(count, sizeof(buf)); 13457f397dcdSMatt Mackall if (copy_from_user(&buf, p, bytes)) 13467f397dcdSMatt Mackall return -EFAULT; 13477f397dcdSMatt Mackall 1348d38bb085SJason A. Donenfeld for (b = bytes; b > 0; b -= sizeof(u32), i++) { 134981e69df3STheodore Ts'o if (!arch_get_random_int(&t)) 135081e69df3STheodore Ts'o break; 135181e69df3STheodore Ts'o buf[i] ^= t; 135281e69df3STheodore Ts'o } 135381e69df3STheodore Ts'o 13547f397dcdSMatt Mackall count -= bytes; 13557f397dcdSMatt Mackall p += bytes; 13567f397dcdSMatt Mackall 135790ed1e67SJason A. Donenfeld mix_pool_bytes(buf, bytes); 135891f3f1e3SMatt Mackall cond_resched(); 13597f397dcdSMatt Mackall } 13607f397dcdSMatt Mackall 13617f397dcdSMatt Mackall return 0; 13627f397dcdSMatt Mackall } 13637f397dcdSMatt Mackall 136490b75ee5SMatt Mackall static ssize_t random_write(struct file *file, const char __user *buffer, 13651da177e4SLinus Torvalds size_t count, loff_t *ppos) 13661da177e4SLinus Torvalds { 13677f397dcdSMatt Mackall size_t ret; 13687f397dcdSMatt Mackall 136990ed1e67SJason A. Donenfeld ret = write_pool(buffer, count); 13707f397dcdSMatt Mackall if (ret) 13717f397dcdSMatt Mackall return ret; 13727f397dcdSMatt Mackall 13737f397dcdSMatt Mackall return (ssize_t)count; 13741da177e4SLinus Torvalds } 13751da177e4SLinus Torvalds 137643ae4860SMatt Mackall static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 13771da177e4SLinus Torvalds { 13781da177e4SLinus Torvalds int size, ent_count; 13791da177e4SLinus Torvalds int __user *p = (int __user *)arg; 13801da177e4SLinus Torvalds int retval; 13811da177e4SLinus Torvalds 13821da177e4SLinus Torvalds switch (cmd) { 13831da177e4SLinus Torvalds case RNDGETENTCNT: 138443ae4860SMatt Mackall /* inherently racy, no point locking */ 1385c5704490SJason A. Donenfeld if (put_user(input_pool.entropy_count, p)) 13861da177e4SLinus Torvalds return -EFAULT; 13871da177e4SLinus Torvalds return 0; 13881da177e4SLinus Torvalds case RNDADDTOENTCNT: 13891da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 13901da177e4SLinus Torvalds return -EPERM; 13911da177e4SLinus Torvalds if (get_user(ent_count, p)) 13921da177e4SLinus Torvalds return -EFAULT; 1393a49c010eSJason A. Donenfeld if (ent_count < 0) 1394a49c010eSJason A. Donenfeld return -EINVAL; 1395a49c010eSJason A. Donenfeld credit_entropy_bits(ent_count); 1396a49c010eSJason A. Donenfeld return 0; 13971da177e4SLinus Torvalds case RNDADDENTROPY: 13981da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 13991da177e4SLinus Torvalds return -EPERM; 14001da177e4SLinus Torvalds if (get_user(ent_count, p++)) 14011da177e4SLinus Torvalds return -EFAULT; 14021da177e4SLinus Torvalds if (ent_count < 0) 14031da177e4SLinus Torvalds return -EINVAL; 14041da177e4SLinus Torvalds if (get_user(size, p++)) 14051da177e4SLinus Torvalds return -EFAULT; 140690ed1e67SJason A. Donenfeld retval = write_pool((const char __user *)p, size); 14071da177e4SLinus Torvalds if (retval < 0) 14081da177e4SLinus Torvalds return retval; 1409a49c010eSJason A. Donenfeld credit_entropy_bits(ent_count); 1410a49c010eSJason A. Donenfeld return 0; 14111da177e4SLinus Torvalds case RNDZAPENTCNT: 14121da177e4SLinus Torvalds case RNDCLEARPOOL: 1413ae9ecd92STheodore Ts'o /* 1414ae9ecd92STheodore Ts'o * Clear the entropy pool counters. We no longer clear 1415ae9ecd92STheodore Ts'o * the entropy pool, as that's silly. 1416ae9ecd92STheodore Ts'o */ 14171da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 14181da177e4SLinus Torvalds return -EPERM; 1419489c7fc4SJason A. Donenfeld if (xchg(&input_pool.entropy_count, 0)) { 1420042e293eSJason A. Donenfeld wake_up_interruptible(&random_write_wait); 1421042e293eSJason A. Donenfeld kill_fasync(&fasync, SIGIO, POLL_OUT); 1422042e293eSJason A. Donenfeld } 14231da177e4SLinus Torvalds return 0; 1424d848e5f8STheodore Ts'o case RNDRESEEDCRNG: 1425d848e5f8STheodore Ts'o if (!capable(CAP_SYS_ADMIN)) 1426d848e5f8STheodore Ts'o return -EPERM; 1427d848e5f8STheodore Ts'o if (crng_init < 2) 1428d848e5f8STheodore Ts'o return -ENODATA; 1429*a9412d51SJason A. Donenfeld crng_reseed(); 1430d848e5f8STheodore Ts'o return 0; 14311da177e4SLinus Torvalds default: 14321da177e4SLinus Torvalds return -EINVAL; 14331da177e4SLinus Torvalds } 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 14369a6f70bbSJeff Dike static int random_fasync(int fd, struct file *filp, int on) 14379a6f70bbSJeff Dike { 14389a6f70bbSJeff Dike return fasync_helper(fd, filp, on, &fasync); 14399a6f70bbSJeff Dike } 14409a6f70bbSJeff Dike 14412b8693c0SArjan van de Ven const struct file_operations random_fops = { 14421da177e4SLinus Torvalds .read = random_read, 14431da177e4SLinus Torvalds .write = random_write, 1444a11e1d43SLinus Torvalds .poll = random_poll, 144543ae4860SMatt Mackall .unlocked_ioctl = random_ioctl, 1446507e4e2bSArnd Bergmann .compat_ioctl = compat_ptr_ioctl, 14479a6f70bbSJeff Dike .fasync = random_fasync, 14486038f373SArnd Bergmann .llseek = noop_llseek, 14491da177e4SLinus Torvalds }; 14501da177e4SLinus Torvalds 14512b8693c0SArjan van de Ven const struct file_operations urandom_fops = { 14521da177e4SLinus Torvalds .read = urandom_read, 14531da177e4SLinus Torvalds .write = random_write, 145443ae4860SMatt Mackall .unlocked_ioctl = random_ioctl, 14554aa37c46SJason A. Donenfeld .compat_ioctl = compat_ptr_ioctl, 14569a6f70bbSJeff Dike .fasync = random_fasync, 14576038f373SArnd Bergmann .llseek = noop_llseek, 14581da177e4SLinus Torvalds }; 14591da177e4SLinus Torvalds 1460248045b8SJason A. Donenfeld SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, 1461248045b8SJason A. Donenfeld flags) 1462c6e9d6f3STheodore Ts'o { 1463e297a783SJason A. Donenfeld int ret; 1464e297a783SJason A. Donenfeld 146575551dbfSAndy Lutomirski if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) 146675551dbfSAndy Lutomirski return -EINVAL; 146775551dbfSAndy Lutomirski 146875551dbfSAndy Lutomirski /* 146975551dbfSAndy Lutomirski * Requesting insecure and blocking randomness at the same time makes 147075551dbfSAndy Lutomirski * no sense. 147175551dbfSAndy Lutomirski */ 147275551dbfSAndy Lutomirski if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) 1473c6e9d6f3STheodore Ts'o return -EINVAL; 1474c6e9d6f3STheodore Ts'o 1475c6e9d6f3STheodore Ts'o if (count > INT_MAX) 1476c6e9d6f3STheodore Ts'o count = INT_MAX; 1477c6e9d6f3STheodore Ts'o 147875551dbfSAndy Lutomirski if (!(flags & GRND_INSECURE) && !crng_ready()) { 1479c6e9d6f3STheodore Ts'o if (flags & GRND_NONBLOCK) 1480c6e9d6f3STheodore Ts'o return -EAGAIN; 1481e297a783SJason A. Donenfeld ret = wait_for_random_bytes(); 1482e297a783SJason A. Donenfeld if (unlikely(ret)) 1483e297a783SJason A. Donenfeld return ret; 1484c6e9d6f3STheodore Ts'o } 1485c6f1deb1SAndy Lutomirski return urandom_read_nowarn(NULL, buf, count, NULL); 1486c6e9d6f3STheodore Ts'o } 1487c6e9d6f3STheodore Ts'o 14881da177e4SLinus Torvalds /******************************************************************** 14891da177e4SLinus Torvalds * 14901da177e4SLinus Torvalds * Sysctl interface 14911da177e4SLinus Torvalds * 14921da177e4SLinus Torvalds ********************************************************************/ 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 14951da177e4SLinus Torvalds 14961da177e4SLinus Torvalds #include <linux/sysctl.h> 14971da177e4SLinus Torvalds 1498db61ffe3SFabio Estevam static int random_min_urandom_seed = 60; 1499489c7fc4SJason A. Donenfeld static int random_write_wakeup_bits = POOL_MIN_BITS; 1500489c7fc4SJason A. Donenfeld static int sysctl_poolsize = POOL_BITS; 15011da177e4SLinus Torvalds static char sysctl_bootid[16]; 15021da177e4SLinus Torvalds 15031da177e4SLinus Torvalds /* 1504f22052b2SGreg Price * This function is used to return both the bootid UUID, and random 15051da177e4SLinus Torvalds * UUID. The difference is in whether table->data is NULL; if it is, 15061da177e4SLinus Torvalds * then a new UUID is generated and returned to the user. 15071da177e4SLinus Torvalds * 1508f22052b2SGreg Price * If the user accesses this via the proc interface, the UUID will be 1509f22052b2SGreg Price * returned as an ASCII string in the standard UUID format; if via the 1510f22052b2SGreg Price * sysctl system call, as 16 bytes of binary data. 15111da177e4SLinus Torvalds */ 1512248045b8SJason A. Donenfeld static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, 1513248045b8SJason A. Donenfeld size_t *lenp, loff_t *ppos) 15141da177e4SLinus Torvalds { 1515a151427eSJoe Perches struct ctl_table fake_table; 15161da177e4SLinus Torvalds unsigned char buf[64], tmp_uuid[16], *uuid; 15171da177e4SLinus Torvalds 15181da177e4SLinus Torvalds uuid = table->data; 15191da177e4SLinus Torvalds if (!uuid) { 15201da177e4SLinus Torvalds uuid = tmp_uuid; 15211da177e4SLinus Torvalds generate_random_uuid(uuid); 152244e4360fSMathieu Desnoyers } else { 152344e4360fSMathieu Desnoyers static DEFINE_SPINLOCK(bootid_spinlock); 152444e4360fSMathieu Desnoyers 152544e4360fSMathieu Desnoyers spin_lock(&bootid_spinlock); 152644e4360fSMathieu Desnoyers if (!uuid[8]) 152744e4360fSMathieu Desnoyers generate_random_uuid(uuid); 152844e4360fSMathieu Desnoyers spin_unlock(&bootid_spinlock); 152944e4360fSMathieu Desnoyers } 15301da177e4SLinus Torvalds 153135900771SJoe Perches sprintf(buf, "%pU", uuid); 153235900771SJoe Perches 15331da177e4SLinus Torvalds fake_table.data = buf; 15341da177e4SLinus Torvalds fake_table.maxlen = sizeof(buf); 15351da177e4SLinus Torvalds 15368d65af78SAlexey Dobriyan return proc_dostring(&fake_table, write, buffer, lenp, ppos); 15371da177e4SLinus Torvalds } 15381da177e4SLinus Torvalds 15395475e8f0SXiaoming Ni static struct ctl_table random_table[] = { 15401da177e4SLinus Torvalds { 15411da177e4SLinus Torvalds .procname = "poolsize", 15421da177e4SLinus Torvalds .data = &sysctl_poolsize, 15431da177e4SLinus Torvalds .maxlen = sizeof(int), 15441da177e4SLinus Torvalds .mode = 0444, 15456d456111SEric W. Biederman .proc_handler = proc_dointvec, 15461da177e4SLinus Torvalds }, 15471da177e4SLinus Torvalds { 15481da177e4SLinus Torvalds .procname = "entropy_avail", 1549c5704490SJason A. Donenfeld .data = &input_pool.entropy_count, 15501da177e4SLinus Torvalds .maxlen = sizeof(int), 15511da177e4SLinus Torvalds .mode = 0444, 1552c5704490SJason A. Donenfeld .proc_handler = proc_dointvec, 15531da177e4SLinus Torvalds }, 15541da177e4SLinus Torvalds { 15551da177e4SLinus Torvalds .procname = "write_wakeup_threshold", 15562132a96fSGreg Price .data = &random_write_wakeup_bits, 15571da177e4SLinus Torvalds .maxlen = sizeof(int), 15581da177e4SLinus Torvalds .mode = 0644, 1559489c7fc4SJason A. Donenfeld .proc_handler = proc_dointvec, 15601da177e4SLinus Torvalds }, 15611da177e4SLinus Torvalds { 1562f5c2742cSTheodore Ts'o .procname = "urandom_min_reseed_secs", 1563f5c2742cSTheodore Ts'o .data = &random_min_urandom_seed, 1564f5c2742cSTheodore Ts'o .maxlen = sizeof(int), 1565f5c2742cSTheodore Ts'o .mode = 0644, 1566f5c2742cSTheodore Ts'o .proc_handler = proc_dointvec, 1567f5c2742cSTheodore Ts'o }, 1568f5c2742cSTheodore Ts'o { 15691da177e4SLinus Torvalds .procname = "boot_id", 15701da177e4SLinus Torvalds .data = &sysctl_bootid, 15711da177e4SLinus Torvalds .maxlen = 16, 15721da177e4SLinus Torvalds .mode = 0444, 15736d456111SEric W. Biederman .proc_handler = proc_do_uuid, 15741da177e4SLinus Torvalds }, 15751da177e4SLinus Torvalds { 15761da177e4SLinus Torvalds .procname = "uuid", 15771da177e4SLinus Torvalds .maxlen = 16, 15781da177e4SLinus Torvalds .mode = 0444, 15796d456111SEric W. Biederman .proc_handler = proc_do_uuid, 15801da177e4SLinus Torvalds }, 158143759d4fSTheodore Ts'o #ifdef ADD_INTERRUPT_BENCH 158243759d4fSTheodore Ts'o { 158343759d4fSTheodore Ts'o .procname = "add_interrupt_avg_cycles", 158443759d4fSTheodore Ts'o .data = &avg_cycles, 158543759d4fSTheodore Ts'o .maxlen = sizeof(avg_cycles), 158643759d4fSTheodore Ts'o .mode = 0444, 158743759d4fSTheodore Ts'o .proc_handler = proc_doulongvec_minmax, 158843759d4fSTheodore Ts'o }, 158943759d4fSTheodore Ts'o { 159043759d4fSTheodore Ts'o .procname = "add_interrupt_avg_deviation", 159143759d4fSTheodore Ts'o .data = &avg_deviation, 159243759d4fSTheodore Ts'o .maxlen = sizeof(avg_deviation), 159343759d4fSTheodore Ts'o .mode = 0444, 159443759d4fSTheodore Ts'o .proc_handler = proc_doulongvec_minmax, 159543759d4fSTheodore Ts'o }, 159643759d4fSTheodore Ts'o #endif 1597894d2491SEric W. Biederman { } 15981da177e4SLinus Torvalds }; 15995475e8f0SXiaoming Ni 16005475e8f0SXiaoming Ni /* 16015475e8f0SXiaoming Ni * rand_initialize() is called before sysctl_init(), 16025475e8f0SXiaoming Ni * so we cannot call register_sysctl_init() in rand_initialize() 16035475e8f0SXiaoming Ni */ 16045475e8f0SXiaoming Ni static int __init random_sysctls_init(void) 16055475e8f0SXiaoming Ni { 16065475e8f0SXiaoming Ni register_sysctl_init("kernel/random", random_table); 16075475e8f0SXiaoming Ni return 0; 16085475e8f0SXiaoming Ni } 16095475e8f0SXiaoming Ni device_initcall(random_sysctls_init); 16101da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 16111da177e4SLinus Torvalds 161277760fd7SJason A. Donenfeld static atomic_t batch_generation = ATOMIC_INIT(0); 161377760fd7SJason A. Donenfeld 1614f5b98461SJason A. Donenfeld struct batched_entropy { 1615f5b98461SJason A. Donenfeld union { 16161ca1b917SEric Biggers u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)]; 16171ca1b917SEric Biggers u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; 1618f5b98461SJason A. Donenfeld }; 161977760fd7SJason A. Donenfeld local_lock_t lock; 1620f5b98461SJason A. Donenfeld unsigned int position; 162177760fd7SJason A. Donenfeld int generation; 1622f5b98461SJason A. Donenfeld }; 1623b1132deaSEric Biggers 16241da177e4SLinus Torvalds /* 1625f5b98461SJason A. Donenfeld * Get a random word for internal kernel use only. The quality of the random 162669efea71SJason A. Donenfeld * number is good as /dev/urandom, but there is no backtrack protection, with 162769efea71SJason A. Donenfeld * the goal of being quite fast and not depleting entropy. In order to ensure 1628e297a783SJason A. Donenfeld * that the randomness provided by this function is okay, the function 162969efea71SJason A. Donenfeld * wait_for_random_bytes() should be called and return 0 at least once at any 163069efea71SJason A. Donenfeld * point prior. 16311da177e4SLinus Torvalds */ 1632b7d5dc21SSebastian Andrzej Siewior static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { 163377760fd7SJason A. Donenfeld .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock) 1634b7d5dc21SSebastian Andrzej Siewior }; 1635b7d5dc21SSebastian Andrzej Siewior 1636c440408cSJason A. Donenfeld u64 get_random_u64(void) 1637ec9ee4acSDaniel Cashman { 1638c440408cSJason A. Donenfeld u64 ret; 1639b7d5dc21SSebastian Andrzej Siewior unsigned long flags; 1640f5b98461SJason A. Donenfeld struct batched_entropy *batch; 1641eecabf56STheodore Ts'o static void *previous; 164277760fd7SJason A. Donenfeld int next_gen; 1643ec9ee4acSDaniel Cashman 1644eecabf56STheodore Ts'o warn_unseeded_randomness(&previous); 1645d06bfd19SJason A. Donenfeld 164677760fd7SJason A. Donenfeld local_lock_irqsave(&batched_entropy_u64.lock, flags); 1647b7d5dc21SSebastian Andrzej Siewior batch = raw_cpu_ptr(&batched_entropy_u64); 164877760fd7SJason A. Donenfeld 164977760fd7SJason A. Donenfeld next_gen = atomic_read(&batch_generation); 165077760fd7SJason A. Donenfeld if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0 || 165177760fd7SJason A. Donenfeld next_gen != batch->generation) { 1652a5e9f557SEric Biggers extract_crng((u8 *)batch->entropy_u64); 1653f5b98461SJason A. Donenfeld batch->position = 0; 165477760fd7SJason A. Donenfeld batch->generation = next_gen; 1655f5b98461SJason A. Donenfeld } 165677760fd7SJason A. Donenfeld 1657c440408cSJason A. Donenfeld ret = batch->entropy_u64[batch->position++]; 165877760fd7SJason A. Donenfeld local_unlock_irqrestore(&batched_entropy_u64.lock, flags); 1659ec9ee4acSDaniel Cashman return ret; 1660ec9ee4acSDaniel Cashman } 1661c440408cSJason A. Donenfeld EXPORT_SYMBOL(get_random_u64); 1662ec9ee4acSDaniel Cashman 1663b7d5dc21SSebastian Andrzej Siewior static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { 166477760fd7SJason A. Donenfeld .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock) 1665b7d5dc21SSebastian Andrzej Siewior }; 166677760fd7SJason A. Donenfeld 1667c440408cSJason A. Donenfeld u32 get_random_u32(void) 1668f5b98461SJason A. Donenfeld { 1669c440408cSJason A. Donenfeld u32 ret; 1670b7d5dc21SSebastian Andrzej Siewior unsigned long flags; 1671f5b98461SJason A. Donenfeld struct batched_entropy *batch; 1672eecabf56STheodore Ts'o static void *previous; 167377760fd7SJason A. Donenfeld int next_gen; 1674f5b98461SJason A. Donenfeld 1675eecabf56STheodore Ts'o warn_unseeded_randomness(&previous); 1676d06bfd19SJason A. Donenfeld 167777760fd7SJason A. Donenfeld local_lock_irqsave(&batched_entropy_u32.lock, flags); 1678b7d5dc21SSebastian Andrzej Siewior batch = raw_cpu_ptr(&batched_entropy_u32); 167977760fd7SJason A. Donenfeld 168077760fd7SJason A. Donenfeld next_gen = atomic_read(&batch_generation); 168177760fd7SJason A. Donenfeld if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0 || 168277760fd7SJason A. Donenfeld next_gen != batch->generation) { 1683a5e9f557SEric Biggers extract_crng((u8 *)batch->entropy_u32); 1684f5b98461SJason A. Donenfeld batch->position = 0; 168577760fd7SJason A. Donenfeld batch->generation = next_gen; 1686f5b98461SJason A. Donenfeld } 168777760fd7SJason A. Donenfeld 1688c440408cSJason A. Donenfeld ret = batch->entropy_u32[batch->position++]; 168977760fd7SJason A. Donenfeld local_unlock_irqrestore(&batched_entropy_u32.lock, flags); 1690f5b98461SJason A. Donenfeld return ret; 1691f5b98461SJason A. Donenfeld } 1692c440408cSJason A. Donenfeld EXPORT_SYMBOL(get_random_u32); 1693f5b98461SJason A. Donenfeld 1694b169c13dSJason A. Donenfeld /* It's important to invalidate all potential batched entropy that might 1695b169c13dSJason A. Donenfeld * be stored before the crng is initialized, which we can do lazily by 169677760fd7SJason A. Donenfeld * bumping the generation counter. 169777760fd7SJason A. Donenfeld */ 1698b169c13dSJason A. Donenfeld static void invalidate_batched_entropy(void) 1699b169c13dSJason A. Donenfeld { 170077760fd7SJason A. Donenfeld atomic_inc(&batch_generation); 1701b169c13dSJason A. Donenfeld } 1702b169c13dSJason A. Donenfeld 170399fdafdeSJason Cooper /** 170499fdafdeSJason Cooper * randomize_page - Generate a random, page aligned address 170599fdafdeSJason Cooper * @start: The smallest acceptable address the caller will take. 170699fdafdeSJason Cooper * @range: The size of the area, starting at @start, within which the 170799fdafdeSJason Cooper * random address must fall. 170899fdafdeSJason Cooper * 170999fdafdeSJason Cooper * If @start + @range would overflow, @range is capped. 171099fdafdeSJason Cooper * 171199fdafdeSJason Cooper * NOTE: Historical use of randomize_range, which this replaces, presumed that 171299fdafdeSJason Cooper * @start was already page aligned. We now align it regardless. 171399fdafdeSJason Cooper * 171499fdafdeSJason Cooper * Return: A page aligned address within [start, start + range). On error, 171599fdafdeSJason Cooper * @start is returned. 171699fdafdeSJason Cooper */ 1717248045b8SJason A. Donenfeld unsigned long randomize_page(unsigned long start, unsigned long range) 171899fdafdeSJason Cooper { 171999fdafdeSJason Cooper if (!PAGE_ALIGNED(start)) { 172099fdafdeSJason Cooper range -= PAGE_ALIGN(start) - start; 172199fdafdeSJason Cooper start = PAGE_ALIGN(start); 172299fdafdeSJason Cooper } 172399fdafdeSJason Cooper 172499fdafdeSJason Cooper if (start > ULONG_MAX - range) 172599fdafdeSJason Cooper range = ULONG_MAX - start; 172699fdafdeSJason Cooper 172799fdafdeSJason Cooper range >>= PAGE_SHIFT; 172899fdafdeSJason Cooper 172999fdafdeSJason Cooper if (range == 0) 173099fdafdeSJason Cooper return start; 173199fdafdeSJason Cooper 173299fdafdeSJason Cooper return start + (get_random_long() % range << PAGE_SHIFT); 173399fdafdeSJason Cooper } 173499fdafdeSJason Cooper 1735c84dbf61STorsten Duwe /* Interface for in-kernel drivers of true hardware RNGs. 1736c84dbf61STorsten Duwe * Those devices may produce endless random bits and will be throttled 1737c84dbf61STorsten Duwe * when our pool is full. 1738c84dbf61STorsten Duwe */ 1739c84dbf61STorsten Duwe void add_hwgenerator_randomness(const char *buffer, size_t count, 1740c84dbf61STorsten Duwe size_t entropy) 1741c84dbf61STorsten Duwe { 174243838a23STheodore Ts'o if (unlikely(crng_init == 0)) { 174373c7733fSJason A. Donenfeld size_t ret = crng_fast_load(buffer, count); 174490ed1e67SJason A. Donenfeld mix_pool_bytes(buffer, ret); 174573c7733fSJason A. Donenfeld count -= ret; 174673c7733fSJason A. Donenfeld buffer += ret; 174773c7733fSJason A. Donenfeld if (!count || crng_init == 0) 1748e192be9dSTheodore Ts'o return; 17493371f3daSTheodore Ts'o } 1750e192be9dSTheodore Ts'o 1751c321e907SDominik Brodowski /* Throttle writing if we're above the trickle threshold. 1752489c7fc4SJason A. Donenfeld * We'll be woken up again once below POOL_MIN_BITS, when 1753489c7fc4SJason A. Donenfeld * the calling thread is about to terminate, or once 1754489c7fc4SJason A. Donenfeld * CRNG_RESEED_INTERVAL has elapsed. 1755e192be9dSTheodore Ts'o */ 1756c321e907SDominik Brodowski wait_event_interruptible_timeout(random_write_wait, 1757f7e67b8eSDominik Brodowski !system_wq || kthread_should_stop() || 1758489c7fc4SJason A. Donenfeld input_pool.entropy_count < POOL_MIN_BITS, 1759c321e907SDominik Brodowski CRNG_RESEED_INTERVAL); 176090ed1e67SJason A. Donenfeld mix_pool_bytes(buffer, count); 176190ed1e67SJason A. Donenfeld credit_entropy_bits(entropy); 1762c84dbf61STorsten Duwe } 1763c84dbf61STorsten Duwe EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); 1764428826f5SHsin-Yi Wang 1765428826f5SHsin-Yi Wang /* Handle random seed passed by bootloader. 1766428826f5SHsin-Yi Wang * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise 1767428826f5SHsin-Yi Wang * it would be regarded as device data. 1768428826f5SHsin-Yi Wang * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. 1769428826f5SHsin-Yi Wang */ 1770428826f5SHsin-Yi Wang void add_bootloader_randomness(const void *buf, unsigned int size) 1771428826f5SHsin-Yi Wang { 1772428826f5SHsin-Yi Wang if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) 1773428826f5SHsin-Yi Wang add_hwgenerator_randomness(buf, size, size * 8); 1774428826f5SHsin-Yi Wang else 1775428826f5SHsin-Yi Wang add_device_randomness(buf, size); 1776428826f5SHsin-Yi Wang } 1777428826f5SHsin-Yi Wang EXPORT_SYMBOL_GPL(add_bootloader_randomness); 1778