xref: /linux/drivers/char/random.c (revision 64276a9939ff414f2f0db38036cf4e1a0a703394)
1a07fdae3SJason A. Donenfeld // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
21da177e4SLinus Torvalds /*
39f9eff85SJason A. Donenfeld  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
49e95ce27SMatt Mackall  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
55f75d9f3SJason A. Donenfeld  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
61da177e4SLinus Torvalds  *
75f75d9f3SJason A. Donenfeld  * This driver produces cryptographically secure pseudorandom data. It is divided
85f75d9f3SJason A. Donenfeld  * into roughly six sections, each with a section header:
91da177e4SLinus Torvalds  *
105f75d9f3SJason A. Donenfeld  *   - Initialization and readiness waiting.
115f75d9f3SJason A. Donenfeld  *   - Fast key erasure RNG, the "crng".
125f75d9f3SJason A. Donenfeld  *   - Entropy accumulation and extraction routines.
135f75d9f3SJason A. Donenfeld  *   - Entropy collection routines.
145f75d9f3SJason A. Donenfeld  *   - Userspace reader/writer interfaces.
155f75d9f3SJason A. Donenfeld  *   - Sysctl interface.
161da177e4SLinus Torvalds  *
175f75d9f3SJason A. Donenfeld  * The high level overview is that there is one input pool, into which
185f75d9f3SJason A. Donenfeld  * various pieces of data are hashed. Some of that data is then "credited" as
195f75d9f3SJason A. Donenfeld  * having a certain number of bits of entropy. When enough bits of entropy are
205f75d9f3SJason A. Donenfeld  * available, the hash is finalized and handed as a key to a stream cipher that
215f75d9f3SJason A. Donenfeld  * expands it indefinitely for various consumers. This key is periodically
225f75d9f3SJason A. Donenfeld  * refreshed as the various entropy collectors, described below, add data to the
235f75d9f3SJason A. Donenfeld  * input pool and credit it. There is currently no Fortuna-like scheduler
245f75d9f3SJason A. Donenfeld  * involved, which can lead to malicious entropy sources causing a premature
255f75d9f3SJason A. Donenfeld  * reseed, and the entropy estimates are, at best, conservative guesses.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
2812cd53afSYangtao Li #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2912cd53afSYangtao Li 
301da177e4SLinus Torvalds #include <linux/utsname.h>
311da177e4SLinus Torvalds #include <linux/module.h>
321da177e4SLinus Torvalds #include <linux/kernel.h>
331da177e4SLinus Torvalds #include <linux/major.h>
341da177e4SLinus Torvalds #include <linux/string.h>
351da177e4SLinus Torvalds #include <linux/fcntl.h>
361da177e4SLinus Torvalds #include <linux/slab.h>
371da177e4SLinus Torvalds #include <linux/random.h>
381da177e4SLinus Torvalds #include <linux/poll.h>
391da177e4SLinus Torvalds #include <linux/init.h>
401da177e4SLinus Torvalds #include <linux/fs.h>
411da177e4SLinus Torvalds #include <linux/genhd.h>
421da177e4SLinus Torvalds #include <linux/interrupt.h>
4327ac792cSAndrea Righi #include <linux/mm.h>
44dd0f0cf5SMichael Ellerman #include <linux/nodemask.h>
451da177e4SLinus Torvalds #include <linux/spinlock.h>
46c84dbf61STorsten Duwe #include <linux/kthread.h>
471da177e4SLinus Torvalds #include <linux/percpu.h>
48775f4b29STheodore Ts'o #include <linux/ptrace.h>
496265e169STheodore Ts'o #include <linux/workqueue.h>
50d178a1ebSYinghai Lu #include <linux/irq.h>
514e00b339STheodore Ts'o #include <linux/ratelimit.h>
52c6e9d6f3STheodore Ts'o #include <linux/syscalls.h>
53c6e9d6f3STheodore Ts'o #include <linux/completion.h>
548da4b8c4SAndy Shevchenko #include <linux/uuid.h>
5587e7d5abSJason A. Donenfeld #include <linux/uaccess.h>
561ca1b917SEric Biggers #include <crypto/chacha.h>
579f9eff85SJason A. Donenfeld #include <crypto/blake2s.h>
581da177e4SLinus Torvalds #include <asm/processor.h>
591da177e4SLinus Torvalds #include <asm/irq.h>
60775f4b29STheodore Ts'o #include <asm/irq_regs.h>
611da177e4SLinus Torvalds #include <asm/io.h>
621da177e4SLinus Torvalds 
635f1bb112SJason A. Donenfeld /*********************************************************************
645f1bb112SJason A. Donenfeld  *
655f1bb112SJason A. Donenfeld  * Initialization and readiness waiting.
665f1bb112SJason A. Donenfeld  *
675f1bb112SJason A. Donenfeld  * Much of the RNG infrastructure is devoted to various dependencies
685f1bb112SJason A. Donenfeld  * being able to wait until the RNG has collected enough entropy and
695f1bb112SJason A. Donenfeld  * is ready for safe consumption.
705f1bb112SJason A. Donenfeld  *
715f1bb112SJason A. Donenfeld  *********************************************************************/
725f1bb112SJason A. Donenfeld 
735f1bb112SJason A. Donenfeld /*
745f1bb112SJason A. Donenfeld  * crng_init =  0 --> Uninitialized
755f1bb112SJason A. Donenfeld  *		1 --> Initialized
765f1bb112SJason A. Donenfeld  *		2 --> Initialized from input_pool
775f1bb112SJason A. Donenfeld  *
785f1bb112SJason A. Donenfeld  * crng_init is protected by base_crng->lock, and only increases
795f1bb112SJason A. Donenfeld  * its value (from 0->1->2).
805f1bb112SJason A. Donenfeld  */
815f1bb112SJason A. Donenfeld static int crng_init = 0;
825f1bb112SJason A. Donenfeld #define crng_ready() (likely(crng_init > 1))
835f1bb112SJason A. Donenfeld /* Various types of waiters for crng_init->2 transition. */
845f1bb112SJason A. Donenfeld static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
855f1bb112SJason A. Donenfeld static struct fasync_struct *fasync;
865f1bb112SJason A. Donenfeld static DEFINE_SPINLOCK(random_ready_list_lock);
875f1bb112SJason A. Donenfeld static LIST_HEAD(random_ready_list);
885f1bb112SJason A. Donenfeld 
895f1bb112SJason A. Donenfeld /* Control how we warn userspace. */
905f1bb112SJason A. Donenfeld static struct ratelimit_state unseeded_warning =
915f1bb112SJason A. Donenfeld 	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
925f1bb112SJason A. Donenfeld static struct ratelimit_state urandom_warning =
935f1bb112SJason A. Donenfeld 	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
945f1bb112SJason A. Donenfeld static int ratelimit_disable __read_mostly;
955f1bb112SJason A. Donenfeld module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
965f1bb112SJason A. Donenfeld MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
975f1bb112SJason A. Donenfeld 
985f1bb112SJason A. Donenfeld /*
995f1bb112SJason A. Donenfeld  * Returns whether or not the input pool has been seeded and thus guaranteed
1005f1bb112SJason A. Donenfeld  * to supply cryptographically secure random numbers. This applies to: the
1015f1bb112SJason A. Donenfeld  * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1025f1bb112SJason A. Donenfeld  * ,u64,int,long} family of functions.
1035f1bb112SJason A. Donenfeld  *
1045f1bb112SJason A. Donenfeld  * Returns: true if the input pool has been seeded.
1055f1bb112SJason A. Donenfeld  *          false if the input pool has not been seeded.
1065f1bb112SJason A. Donenfeld  */
1075f1bb112SJason A. Donenfeld bool rng_is_initialized(void)
1085f1bb112SJason A. Donenfeld {
1095f1bb112SJason A. Donenfeld 	return crng_ready();
1105f1bb112SJason A. Donenfeld }
1115f1bb112SJason A. Donenfeld EXPORT_SYMBOL(rng_is_initialized);
1125f1bb112SJason A. Donenfeld 
1135f1bb112SJason A. Donenfeld /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
1145f1bb112SJason A. Donenfeld static void try_to_generate_entropy(void);
1155f1bb112SJason A. Donenfeld 
1165f1bb112SJason A. Donenfeld /*
1175f1bb112SJason A. Donenfeld  * Wait for the input pool to be seeded and thus guaranteed to supply
1185f1bb112SJason A. Donenfeld  * cryptographically secure random numbers. This applies to: the /dev/urandom
1195f1bb112SJason A. Donenfeld  * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1205f1bb112SJason A. Donenfeld  * family of functions. Using any of these functions without first calling
1215f1bb112SJason A. Donenfeld  * this function forfeits the guarantee of security.
1225f1bb112SJason A. Donenfeld  *
1235f1bb112SJason A. Donenfeld  * Returns: 0 if the input pool has been seeded.
1245f1bb112SJason A. Donenfeld  *          -ERESTARTSYS if the function was interrupted by a signal.
1255f1bb112SJason A. Donenfeld  */
1265f1bb112SJason A. Donenfeld int wait_for_random_bytes(void)
1275f1bb112SJason A. Donenfeld {
1285f1bb112SJason A. Donenfeld 	if (likely(crng_ready()))
1295f1bb112SJason A. Donenfeld 		return 0;
1305f1bb112SJason A. Donenfeld 
1315f1bb112SJason A. Donenfeld 	do {
1325f1bb112SJason A. Donenfeld 		int ret;
1335f1bb112SJason A. Donenfeld 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1345f1bb112SJason A. Donenfeld 		if (ret)
1355f1bb112SJason A. Donenfeld 			return ret > 0 ? 0 : ret;
1365f1bb112SJason A. Donenfeld 
1375f1bb112SJason A. Donenfeld 		try_to_generate_entropy();
1385f1bb112SJason A. Donenfeld 	} while (!crng_ready());
1395f1bb112SJason A. Donenfeld 
1405f1bb112SJason A. Donenfeld 	return 0;
1415f1bb112SJason A. Donenfeld }
1425f1bb112SJason A. Donenfeld EXPORT_SYMBOL(wait_for_random_bytes);
1435f1bb112SJason A. Donenfeld 
1445f1bb112SJason A. Donenfeld /*
1455f1bb112SJason A. Donenfeld  * Add a callback function that will be invoked when the input
1465f1bb112SJason A. Donenfeld  * pool is initialised.
1475f1bb112SJason A. Donenfeld  *
1485f1bb112SJason A. Donenfeld  * returns: 0 if callback is successfully added
1495f1bb112SJason A. Donenfeld  *	    -EALREADY if pool is already initialised (callback not called)
1505f1bb112SJason A. Donenfeld  *	    -ENOENT if module for callback is not alive
1515f1bb112SJason A. Donenfeld  */
1525f1bb112SJason A. Donenfeld int add_random_ready_callback(struct random_ready_callback *rdy)
1535f1bb112SJason A. Donenfeld {
1545f1bb112SJason A. Donenfeld 	struct module *owner;
1555f1bb112SJason A. Donenfeld 	unsigned long flags;
1565f1bb112SJason A. Donenfeld 	int err = -EALREADY;
1575f1bb112SJason A. Donenfeld 
1585f1bb112SJason A. Donenfeld 	if (crng_ready())
1595f1bb112SJason A. Donenfeld 		return err;
1605f1bb112SJason A. Donenfeld 
1615f1bb112SJason A. Donenfeld 	owner = rdy->owner;
1625f1bb112SJason A. Donenfeld 	if (!try_module_get(owner))
1635f1bb112SJason A. Donenfeld 		return -ENOENT;
1645f1bb112SJason A. Donenfeld 
1655f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
1665f1bb112SJason A. Donenfeld 	if (crng_ready())
1675f1bb112SJason A. Donenfeld 		goto out;
1685f1bb112SJason A. Donenfeld 
1695f1bb112SJason A. Donenfeld 	owner = NULL;
1705f1bb112SJason A. Donenfeld 
1715f1bb112SJason A. Donenfeld 	list_add(&rdy->list, &random_ready_list);
1725f1bb112SJason A. Donenfeld 	err = 0;
1735f1bb112SJason A. Donenfeld 
1745f1bb112SJason A. Donenfeld out:
1755f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1765f1bb112SJason A. Donenfeld 
1775f1bb112SJason A. Donenfeld 	module_put(owner);
1785f1bb112SJason A. Donenfeld 
1795f1bb112SJason A. Donenfeld 	return err;
1805f1bb112SJason A. Donenfeld }
1815f1bb112SJason A. Donenfeld EXPORT_SYMBOL(add_random_ready_callback);
1825f1bb112SJason A. Donenfeld 
1835f1bb112SJason A. Donenfeld /*
1845f1bb112SJason A. Donenfeld  * Delete a previously registered readiness callback function.
1855f1bb112SJason A. Donenfeld  */
1865f1bb112SJason A. Donenfeld void del_random_ready_callback(struct random_ready_callback *rdy)
1875f1bb112SJason A. Donenfeld {
1885f1bb112SJason A. Donenfeld 	unsigned long flags;
1895f1bb112SJason A. Donenfeld 	struct module *owner = NULL;
1905f1bb112SJason A. Donenfeld 
1915f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
1925f1bb112SJason A. Donenfeld 	if (!list_empty(&rdy->list)) {
1935f1bb112SJason A. Donenfeld 		list_del_init(&rdy->list);
1945f1bb112SJason A. Donenfeld 		owner = rdy->owner;
1955f1bb112SJason A. Donenfeld 	}
1965f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1975f1bb112SJason A. Donenfeld 
1985f1bb112SJason A. Donenfeld 	module_put(owner);
1995f1bb112SJason A. Donenfeld }
2005f1bb112SJason A. Donenfeld EXPORT_SYMBOL(del_random_ready_callback);
2015f1bb112SJason A. Donenfeld 
2025f1bb112SJason A. Donenfeld static void process_random_ready_list(void)
2035f1bb112SJason A. Donenfeld {
2045f1bb112SJason A. Donenfeld 	unsigned long flags;
2055f1bb112SJason A. Donenfeld 	struct random_ready_callback *rdy, *tmp;
2065f1bb112SJason A. Donenfeld 
2075f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
2085f1bb112SJason A. Donenfeld 	list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
2095f1bb112SJason A. Donenfeld 		struct module *owner = rdy->owner;
2105f1bb112SJason A. Donenfeld 
2115f1bb112SJason A. Donenfeld 		list_del_init(&rdy->list);
2125f1bb112SJason A. Donenfeld 		rdy->func(rdy);
2135f1bb112SJason A. Donenfeld 		module_put(owner);
2145f1bb112SJason A. Donenfeld 	}
2155f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
2165f1bb112SJason A. Donenfeld }
2175f1bb112SJason A. Donenfeld 
2185f1bb112SJason A. Donenfeld #define warn_unseeded_randomness(previous) \
2195f1bb112SJason A. Donenfeld 	_warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
2205f1bb112SJason A. Donenfeld 
2215f1bb112SJason A. Donenfeld static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
2225f1bb112SJason A. Donenfeld {
2235f1bb112SJason A. Donenfeld #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
2245f1bb112SJason A. Donenfeld 	const bool print_once = false;
2255f1bb112SJason A. Donenfeld #else
2265f1bb112SJason A. Donenfeld 	static bool print_once __read_mostly;
2275f1bb112SJason A. Donenfeld #endif
2285f1bb112SJason A. Donenfeld 
2295f1bb112SJason A. Donenfeld 	if (print_once || crng_ready() ||
2305f1bb112SJason A. Donenfeld 	    (previous && (caller == READ_ONCE(*previous))))
2315f1bb112SJason A. Donenfeld 		return;
2325f1bb112SJason A. Donenfeld 	WRITE_ONCE(*previous, caller);
2335f1bb112SJason A. Donenfeld #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
2345f1bb112SJason A. Donenfeld 	print_once = true;
2355f1bb112SJason A. Donenfeld #endif
2365f1bb112SJason A. Donenfeld 	if (__ratelimit(&unseeded_warning))
2375f1bb112SJason A. Donenfeld 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
2385f1bb112SJason A. Donenfeld 				func_name, caller, crng_init);
2395f1bb112SJason A. Donenfeld }
2405f1bb112SJason A. Donenfeld 
2415f1bb112SJason A. Donenfeld 
2423655adc7SJason A. Donenfeld /*********************************************************************
2433655adc7SJason A. Donenfeld  *
2443655adc7SJason A. Donenfeld  * Fast key erasure RNG, the "crng".
2453655adc7SJason A. Donenfeld  *
2463655adc7SJason A. Donenfeld  * These functions expand entropy from the entropy extractor into
2473655adc7SJason A. Donenfeld  * long streams for external consumption using the "fast key erasure"
2483655adc7SJason A. Donenfeld  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
2493655adc7SJason A. Donenfeld  *
2503655adc7SJason A. Donenfeld  * There are a few exported interfaces for use by other drivers:
2513655adc7SJason A. Donenfeld  *
2523655adc7SJason A. Donenfeld  *	void get_random_bytes(void *buf, size_t nbytes)
2533655adc7SJason A. Donenfeld  *	u32 get_random_u32()
2543655adc7SJason A. Donenfeld  *	u64 get_random_u64()
2553655adc7SJason A. Donenfeld  *	unsigned int get_random_int()
2563655adc7SJason A. Donenfeld  *	unsigned long get_random_long()
2573655adc7SJason A. Donenfeld  *
2583655adc7SJason A. Donenfeld  * These interfaces will return the requested number of random bytes
2593655adc7SJason A. Donenfeld  * into the given buffer or as a return value. This is equivalent to
2603655adc7SJason A. Donenfeld  * a read from /dev/urandom. The integer family of functions may be
2613655adc7SJason A. Donenfeld  * higher performance for one-off random integers, because they do a
2623655adc7SJason A. Donenfeld  * bit of buffering.
2633655adc7SJason A. Donenfeld  *
2643655adc7SJason A. Donenfeld  *********************************************************************/
2653655adc7SJason A. Donenfeld 
2663655adc7SJason A. Donenfeld enum {
2673655adc7SJason A. Donenfeld 	CRNG_RESEED_INTERVAL = 300 * HZ,
2683655adc7SJason A. Donenfeld 	CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
2693655adc7SJason A. Donenfeld };
2703655adc7SJason A. Donenfeld 
2713655adc7SJason A. Donenfeld static struct {
2723655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
2733655adc7SJason A. Donenfeld 	unsigned long birth;
2743655adc7SJason A. Donenfeld 	unsigned long generation;
2753655adc7SJason A. Donenfeld 	spinlock_t lock;
2763655adc7SJason A. Donenfeld } base_crng = {
2773655adc7SJason A. Donenfeld 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
2783655adc7SJason A. Donenfeld };
2793655adc7SJason A. Donenfeld 
2803655adc7SJason A. Donenfeld struct crng {
2813655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE];
2823655adc7SJason A. Donenfeld 	unsigned long generation;
2833655adc7SJason A. Donenfeld 	local_lock_t lock;
2843655adc7SJason A. Donenfeld };
2853655adc7SJason A. Donenfeld 
2863655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct crng, crngs) = {
2873655adc7SJason A. Donenfeld 	.generation = ULONG_MAX,
2883655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(crngs.lock),
2893655adc7SJason A. Donenfeld };
2903655adc7SJason A. Donenfeld 
2913655adc7SJason A. Donenfeld /* Used by crng_reseed() to extract a new seed from the input pool. */
2923655adc7SJason A. Donenfeld static bool drain_entropy(void *buf, size_t nbytes);
2933655adc7SJason A. Donenfeld 
2943655adc7SJason A. Donenfeld /*
2953655adc7SJason A. Donenfeld  * This extracts a new crng key from the input pool, but only if there is a
2963655adc7SJason A. Donenfeld  * sufficient amount of entropy available, in order to mitigate bruteforcing
2973655adc7SJason A. Donenfeld  * of newly added bits.
2983655adc7SJason A. Donenfeld  */
2993655adc7SJason A. Donenfeld static void crng_reseed(void)
3003655adc7SJason A. Donenfeld {
3013655adc7SJason A. Donenfeld 	unsigned long flags;
3023655adc7SJason A. Donenfeld 	unsigned long next_gen;
3033655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE];
3043655adc7SJason A. Donenfeld 	bool finalize_init = false;
3053655adc7SJason A. Donenfeld 
3063655adc7SJason A. Donenfeld 	/* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
3073655adc7SJason A. Donenfeld 	if (!drain_entropy(key, sizeof(key)))
3083655adc7SJason A. Donenfeld 		return;
3093655adc7SJason A. Donenfeld 
3103655adc7SJason A. Donenfeld 	/*
3113655adc7SJason A. Donenfeld 	 * We copy the new key into the base_crng, overwriting the old one,
3123655adc7SJason A. Donenfeld 	 * and update the generation counter. We avoid hitting ULONG_MAX,
3133655adc7SJason A. Donenfeld 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
3143655adc7SJason A. Donenfeld 	 * forces new CPUs that come online to always initialize.
3153655adc7SJason A. Donenfeld 	 */
3163655adc7SJason A. Donenfeld 	spin_lock_irqsave(&base_crng.lock, flags);
3173655adc7SJason A. Donenfeld 	memcpy(base_crng.key, key, sizeof(base_crng.key));
3183655adc7SJason A. Donenfeld 	next_gen = base_crng.generation + 1;
3193655adc7SJason A. Donenfeld 	if (next_gen == ULONG_MAX)
3203655adc7SJason A. Donenfeld 		++next_gen;
3213655adc7SJason A. Donenfeld 	WRITE_ONCE(base_crng.generation, next_gen);
3223655adc7SJason A. Donenfeld 	WRITE_ONCE(base_crng.birth, jiffies);
3233655adc7SJason A. Donenfeld 	if (crng_init < 2) {
3243655adc7SJason A. Donenfeld 		crng_init = 2;
3253655adc7SJason A. Donenfeld 		finalize_init = true;
3263655adc7SJason A. Donenfeld 	}
3273655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
3283655adc7SJason A. Donenfeld 	memzero_explicit(key, sizeof(key));
3293655adc7SJason A. Donenfeld 	if (finalize_init) {
3303655adc7SJason A. Donenfeld 		process_random_ready_list();
3313655adc7SJason A. Donenfeld 		wake_up_interruptible(&crng_init_wait);
3323655adc7SJason A. Donenfeld 		kill_fasync(&fasync, SIGIO, POLL_IN);
3333655adc7SJason A. Donenfeld 		pr_notice("crng init done\n");
3343655adc7SJason A. Donenfeld 		if (unseeded_warning.missed) {
3353655adc7SJason A. Donenfeld 			pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
3363655adc7SJason A. Donenfeld 				  unseeded_warning.missed);
3373655adc7SJason A. Donenfeld 			unseeded_warning.missed = 0;
3383655adc7SJason A. Donenfeld 		}
3393655adc7SJason A. Donenfeld 		if (urandom_warning.missed) {
3403655adc7SJason A. Donenfeld 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
3413655adc7SJason A. Donenfeld 				  urandom_warning.missed);
3423655adc7SJason A. Donenfeld 			urandom_warning.missed = 0;
3433655adc7SJason A. Donenfeld 		}
3443655adc7SJason A. Donenfeld 	}
3453655adc7SJason A. Donenfeld }
3463655adc7SJason A. Donenfeld 
3473655adc7SJason A. Donenfeld /*
3483655adc7SJason A. Donenfeld  * This generates a ChaCha block using the provided key, and then
3493655adc7SJason A. Donenfeld  * immediately overwites that key with half the block. It returns
3503655adc7SJason A. Donenfeld  * the resultant ChaCha state to the user, along with the second
3513655adc7SJason A. Donenfeld  * half of the block containing 32 bytes of random data that may
3523655adc7SJason A. Donenfeld  * be used; random_data_len may not be greater than 32.
3533655adc7SJason A. Donenfeld  */
3543655adc7SJason A. Donenfeld static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
3553655adc7SJason A. Donenfeld 				  u32 chacha_state[CHACHA_STATE_WORDS],
3563655adc7SJason A. Donenfeld 				  u8 *random_data, size_t random_data_len)
3573655adc7SJason A. Donenfeld {
3583655adc7SJason A. Donenfeld 	u8 first_block[CHACHA_BLOCK_SIZE];
3593655adc7SJason A. Donenfeld 
3603655adc7SJason A. Donenfeld 	BUG_ON(random_data_len > 32);
3613655adc7SJason A. Donenfeld 
3623655adc7SJason A. Donenfeld 	chacha_init_consts(chacha_state);
3633655adc7SJason A. Donenfeld 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
3643655adc7SJason A. Donenfeld 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
3653655adc7SJason A. Donenfeld 	chacha20_block(chacha_state, first_block);
3663655adc7SJason A. Donenfeld 
3673655adc7SJason A. Donenfeld 	memcpy(key, first_block, CHACHA_KEY_SIZE);
3683655adc7SJason A. Donenfeld 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
3693655adc7SJason A. Donenfeld 	memzero_explicit(first_block, sizeof(first_block));
3703655adc7SJason A. Donenfeld }
3713655adc7SJason A. Donenfeld 
3723655adc7SJason A. Donenfeld /*
3733655adc7SJason A. Donenfeld  * This function returns a ChaCha state that you may use for generating
3743655adc7SJason A. Donenfeld  * random data. It also returns up to 32 bytes on its own of random data
3753655adc7SJason A. Donenfeld  * that may be used; random_data_len may not be greater than 32.
3763655adc7SJason A. Donenfeld  */
3773655adc7SJason A. Donenfeld static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
3783655adc7SJason A. Donenfeld 			    u8 *random_data, size_t random_data_len)
3793655adc7SJason A. Donenfeld {
3803655adc7SJason A. Donenfeld 	unsigned long flags;
3813655adc7SJason A. Donenfeld 	struct crng *crng;
3823655adc7SJason A. Donenfeld 
3833655adc7SJason A. Donenfeld 	BUG_ON(random_data_len > 32);
3843655adc7SJason A. Donenfeld 
3853655adc7SJason A. Donenfeld 	/*
3863655adc7SJason A. Donenfeld 	 * For the fast path, we check whether we're ready, unlocked first, and
3873655adc7SJason A. Donenfeld 	 * then re-check once locked later. In the case where we're really not
3883655adc7SJason A. Donenfeld 	 * ready, we do fast key erasure with the base_crng directly, because
389da792c6dSJason A. Donenfeld 	 * this is what crng_pre_init_inject() mutates during early init.
3903655adc7SJason A. Donenfeld 	 */
3913655adc7SJason A. Donenfeld 	if (unlikely(!crng_ready())) {
3923655adc7SJason A. Donenfeld 		bool ready;
3933655adc7SJason A. Donenfeld 
3943655adc7SJason A. Donenfeld 		spin_lock_irqsave(&base_crng.lock, flags);
3953655adc7SJason A. Donenfeld 		ready = crng_ready();
3963655adc7SJason A. Donenfeld 		if (!ready)
3973655adc7SJason A. Donenfeld 			crng_fast_key_erasure(base_crng.key, chacha_state,
3983655adc7SJason A. Donenfeld 					      random_data, random_data_len);
3993655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4003655adc7SJason A. Donenfeld 		if (!ready)
4013655adc7SJason A. Donenfeld 			return;
4023655adc7SJason A. Donenfeld 	}
4033655adc7SJason A. Donenfeld 
4043655adc7SJason A. Donenfeld 	/*
4053655adc7SJason A. Donenfeld 	 * If the base_crng is more than 5 minutes old, we reseed, which
4063655adc7SJason A. Donenfeld 	 * in turn bumps the generation counter that we check below.
4073655adc7SJason A. Donenfeld 	 */
4083655adc7SJason A. Donenfeld 	if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL)))
4093655adc7SJason A. Donenfeld 		crng_reseed();
4103655adc7SJason A. Donenfeld 
4113655adc7SJason A. Donenfeld 	local_lock_irqsave(&crngs.lock, flags);
4123655adc7SJason A. Donenfeld 	crng = raw_cpu_ptr(&crngs);
4133655adc7SJason A. Donenfeld 
4143655adc7SJason A. Donenfeld 	/*
4153655adc7SJason A. Donenfeld 	 * If our per-cpu crng is older than the base_crng, then it means
4163655adc7SJason A. Donenfeld 	 * somebody reseeded the base_crng. In that case, we do fast key
4173655adc7SJason A. Donenfeld 	 * erasure on the base_crng, and use its output as the new key
4183655adc7SJason A. Donenfeld 	 * for our per-cpu crng. This brings us up to date with base_crng.
4193655adc7SJason A. Donenfeld 	 */
4203655adc7SJason A. Donenfeld 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
4213655adc7SJason A. Donenfeld 		spin_lock(&base_crng.lock);
4223655adc7SJason A. Donenfeld 		crng_fast_key_erasure(base_crng.key, chacha_state,
4233655adc7SJason A. Donenfeld 				      crng->key, sizeof(crng->key));
4243655adc7SJason A. Donenfeld 		crng->generation = base_crng.generation;
4253655adc7SJason A. Donenfeld 		spin_unlock(&base_crng.lock);
4263655adc7SJason A. Donenfeld 	}
4273655adc7SJason A. Donenfeld 
4283655adc7SJason A. Donenfeld 	/*
4293655adc7SJason A. Donenfeld 	 * Finally, when we've made it this far, our per-cpu crng has an up
4303655adc7SJason A. Donenfeld 	 * to date key, and we can do fast key erasure with it to produce
4313655adc7SJason A. Donenfeld 	 * some random data and a ChaCha state for the caller. All other
4323655adc7SJason A. Donenfeld 	 * branches of this function are "unlikely", so most of the time we
4333655adc7SJason A. Donenfeld 	 * should wind up here immediately.
4343655adc7SJason A. Donenfeld 	 */
4353655adc7SJason A. Donenfeld 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
4363655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&crngs.lock, flags);
4373655adc7SJason A. Donenfeld }
4383655adc7SJason A. Donenfeld 
4393655adc7SJason A. Donenfeld /*
440da792c6dSJason A. Donenfeld  * This function is for crng_init == 0 only. It loads entropy directly
441da792c6dSJason A. Donenfeld  * into the crng's key, without going through the input pool. It is,
442da792c6dSJason A. Donenfeld  * generally speaking, not very safe, but we use this only at early
443da792c6dSJason A. Donenfeld  * boot time when it's better to have something there rather than
444da792c6dSJason A. Donenfeld  * nothing.
4453655adc7SJason A. Donenfeld  *
446da792c6dSJason A. Donenfeld  * There are two paths, a slow one and a fast one. The slow one
447da792c6dSJason A. Donenfeld  * hashes the input along with the current key. The fast one simply
448da792c6dSJason A. Donenfeld  * xors it in, and should only be used from interrupt context.
449da792c6dSJason A. Donenfeld  *
450da792c6dSJason A. Donenfeld  * If account is set, then the crng_init_cnt counter is incremented.
451da792c6dSJason A. Donenfeld  * This shouldn't be set by functions like add_device_randomness(),
452da792c6dSJason A. Donenfeld  * where we can't trust the buffer passed to it is guaranteed to be
453da792c6dSJason A. Donenfeld  * unpredictable (so it might not have any entropy at all).
454da792c6dSJason A. Donenfeld  *
455da792c6dSJason A. Donenfeld  * Returns the number of bytes processed from input, which is bounded
456da792c6dSJason A. Donenfeld  * by CRNG_INIT_CNT_THRESH if account is true.
4573655adc7SJason A. Donenfeld  */
458da792c6dSJason A. Donenfeld static size_t crng_pre_init_inject(const void *input, size_t len,
459da792c6dSJason A. Donenfeld 				   bool fast, bool account)
4603655adc7SJason A. Donenfeld {
4613655adc7SJason A. Donenfeld 	static int crng_init_cnt = 0;
4623655adc7SJason A. Donenfeld 	unsigned long flags;
4633655adc7SJason A. Donenfeld 
464da792c6dSJason A. Donenfeld 	if (fast) {
4653655adc7SJason A. Donenfeld 		if (!spin_trylock_irqsave(&base_crng.lock, flags))
4663655adc7SJason A. Donenfeld 			return 0;
467da792c6dSJason A. Donenfeld 	} else {
468da792c6dSJason A. Donenfeld 		spin_lock_irqsave(&base_crng.lock, flags);
469da792c6dSJason A. Donenfeld 	}
470da792c6dSJason A. Donenfeld 
4713655adc7SJason A. Donenfeld 	if (crng_init != 0) {
4723655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4733655adc7SJason A. Donenfeld 		return 0;
4743655adc7SJason A. Donenfeld 	}
475da792c6dSJason A. Donenfeld 
476da792c6dSJason A. Donenfeld 	if (account)
477da792c6dSJason A. Donenfeld 		len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
478da792c6dSJason A. Donenfeld 
479da792c6dSJason A. Donenfeld 	if (fast) {
480da792c6dSJason A. Donenfeld 		const u8 *src = input;
481da792c6dSJason A. Donenfeld 		size_t i;
482da792c6dSJason A. Donenfeld 
483da792c6dSJason A. Donenfeld 		for (i = 0; i < len; ++i)
484da792c6dSJason A. Donenfeld 			base_crng.key[(crng_init_cnt + i) %
485da792c6dSJason A. Donenfeld 				      sizeof(base_crng.key)] ^= src[i];
486da792c6dSJason A. Donenfeld 	} else {
487da792c6dSJason A. Donenfeld 		struct blake2s_state hash;
488da792c6dSJason A. Donenfeld 
489da792c6dSJason A. Donenfeld 		blake2s_init(&hash, sizeof(base_crng.key));
490da792c6dSJason A. Donenfeld 		blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
491da792c6dSJason A. Donenfeld 		blake2s_update(&hash, input, len);
492da792c6dSJason A. Donenfeld 		blake2s_final(&hash, base_crng.key);
4933655adc7SJason A. Donenfeld 	}
494da792c6dSJason A. Donenfeld 
495da792c6dSJason A. Donenfeld 	if (account) {
496da792c6dSJason A. Donenfeld 		crng_init_cnt += len;
4973655adc7SJason A. Donenfeld 		if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
4983655adc7SJason A. Donenfeld 			++base_crng.generation;
4993655adc7SJason A. Donenfeld 			crng_init = 1;
5003655adc7SJason A. Donenfeld 		}
501da792c6dSJason A. Donenfeld 	}
502da792c6dSJason A. Donenfeld 
5033655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
504da792c6dSJason A. Donenfeld 
5053655adc7SJason A. Donenfeld 	if (crng_init == 1)
5063655adc7SJason A. Donenfeld 		pr_notice("fast init done\n");
5073655adc7SJason A. Donenfeld 
508da792c6dSJason A. Donenfeld 	return len;
5093655adc7SJason A. Donenfeld }
5103655adc7SJason A. Donenfeld 
5113655adc7SJason A. Donenfeld static void _get_random_bytes(void *buf, size_t nbytes)
5123655adc7SJason A. Donenfeld {
5133655adc7SJason A. Donenfeld 	u32 chacha_state[CHACHA_STATE_WORDS];
5143655adc7SJason A. Donenfeld 	u8 tmp[CHACHA_BLOCK_SIZE];
5153655adc7SJason A. Donenfeld 	size_t len;
5163655adc7SJason A. Donenfeld 
5173655adc7SJason A. Donenfeld 	if (!nbytes)
5183655adc7SJason A. Donenfeld 		return;
5193655adc7SJason A. Donenfeld 
5203655adc7SJason A. Donenfeld 	len = min_t(size_t, 32, nbytes);
5213655adc7SJason A. Donenfeld 	crng_make_state(chacha_state, buf, len);
5223655adc7SJason A. Donenfeld 	nbytes -= len;
5233655adc7SJason A. Donenfeld 	buf += len;
5243655adc7SJason A. Donenfeld 
5253655adc7SJason A. Donenfeld 	while (nbytes) {
5263655adc7SJason A. Donenfeld 		if (nbytes < CHACHA_BLOCK_SIZE) {
5273655adc7SJason A. Donenfeld 			chacha20_block(chacha_state, tmp);
5283655adc7SJason A. Donenfeld 			memcpy(buf, tmp, nbytes);
5293655adc7SJason A. Donenfeld 			memzero_explicit(tmp, sizeof(tmp));
5303655adc7SJason A. Donenfeld 			break;
5313655adc7SJason A. Donenfeld 		}
5323655adc7SJason A. Donenfeld 
5333655adc7SJason A. Donenfeld 		chacha20_block(chacha_state, buf);
5343655adc7SJason A. Donenfeld 		if (unlikely(chacha_state[12] == 0))
5353655adc7SJason A. Donenfeld 			++chacha_state[13];
5363655adc7SJason A. Donenfeld 		nbytes -= CHACHA_BLOCK_SIZE;
5373655adc7SJason A. Donenfeld 		buf += CHACHA_BLOCK_SIZE;
5383655adc7SJason A. Donenfeld 	}
5393655adc7SJason A. Donenfeld 
5403655adc7SJason A. Donenfeld 	memzero_explicit(chacha_state, sizeof(chacha_state));
5413655adc7SJason A. Donenfeld }
5423655adc7SJason A. Donenfeld 
5433655adc7SJason A. Donenfeld /*
5443655adc7SJason A. Donenfeld  * This function is the exported kernel interface.  It returns some
5453655adc7SJason A. Donenfeld  * number of good random numbers, suitable for key generation, seeding
5463655adc7SJason A. Donenfeld  * TCP sequence numbers, etc.  It does not rely on the hardware random
5473655adc7SJason A. Donenfeld  * number generator.  For random bytes direct from the hardware RNG
5483655adc7SJason A. Donenfeld  * (when available), use get_random_bytes_arch(). In order to ensure
5493655adc7SJason A. Donenfeld  * that the randomness provided by this function is okay, the function
5503655adc7SJason A. Donenfeld  * wait_for_random_bytes() should be called and return 0 at least once
5513655adc7SJason A. Donenfeld  * at any point prior.
5523655adc7SJason A. Donenfeld  */
5533655adc7SJason A. Donenfeld void get_random_bytes(void *buf, size_t nbytes)
5543655adc7SJason A. Donenfeld {
5553655adc7SJason A. Donenfeld 	static void *previous;
5563655adc7SJason A. Donenfeld 
5573655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
5583655adc7SJason A. Donenfeld 	_get_random_bytes(buf, nbytes);
5593655adc7SJason A. Donenfeld }
5603655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_bytes);
5613655adc7SJason A. Donenfeld 
5623655adc7SJason A. Donenfeld static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
5633655adc7SJason A. Donenfeld {
5643655adc7SJason A. Donenfeld 	bool large_request = nbytes > 256;
5653655adc7SJason A. Donenfeld 	ssize_t ret = 0;
5663655adc7SJason A. Donenfeld 	size_t len;
5673655adc7SJason A. Donenfeld 	u32 chacha_state[CHACHA_STATE_WORDS];
5683655adc7SJason A. Donenfeld 	u8 output[CHACHA_BLOCK_SIZE];
5693655adc7SJason A. Donenfeld 
5703655adc7SJason A. Donenfeld 	if (!nbytes)
5713655adc7SJason A. Donenfeld 		return 0;
5723655adc7SJason A. Donenfeld 
5733655adc7SJason A. Donenfeld 	len = min_t(size_t, 32, nbytes);
5743655adc7SJason A. Donenfeld 	crng_make_state(chacha_state, output, len);
5753655adc7SJason A. Donenfeld 
5763655adc7SJason A. Donenfeld 	if (copy_to_user(buf, output, len))
5773655adc7SJason A. Donenfeld 		return -EFAULT;
5783655adc7SJason A. Donenfeld 	nbytes -= len;
5793655adc7SJason A. Donenfeld 	buf += len;
5803655adc7SJason A. Donenfeld 	ret += len;
5813655adc7SJason A. Donenfeld 
5823655adc7SJason A. Donenfeld 	while (nbytes) {
5833655adc7SJason A. Donenfeld 		if (large_request && need_resched()) {
5843655adc7SJason A. Donenfeld 			if (signal_pending(current))
5853655adc7SJason A. Donenfeld 				break;
5863655adc7SJason A. Donenfeld 			schedule();
5873655adc7SJason A. Donenfeld 		}
5883655adc7SJason A. Donenfeld 
5893655adc7SJason A. Donenfeld 		chacha20_block(chacha_state, output);
5903655adc7SJason A. Donenfeld 		if (unlikely(chacha_state[12] == 0))
5913655adc7SJason A. Donenfeld 			++chacha_state[13];
5923655adc7SJason A. Donenfeld 
5933655adc7SJason A. Donenfeld 		len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
5943655adc7SJason A. Donenfeld 		if (copy_to_user(buf, output, len)) {
5953655adc7SJason A. Donenfeld 			ret = -EFAULT;
5963655adc7SJason A. Donenfeld 			break;
5973655adc7SJason A. Donenfeld 		}
5983655adc7SJason A. Donenfeld 
5993655adc7SJason A. Donenfeld 		nbytes -= len;
6003655adc7SJason A. Donenfeld 		buf += len;
6013655adc7SJason A. Donenfeld 		ret += len;
6023655adc7SJason A. Donenfeld 	}
6033655adc7SJason A. Donenfeld 
6043655adc7SJason A. Donenfeld 	memzero_explicit(chacha_state, sizeof(chacha_state));
6053655adc7SJason A. Donenfeld 	memzero_explicit(output, sizeof(output));
6063655adc7SJason A. Donenfeld 	return ret;
6073655adc7SJason A. Donenfeld }
6083655adc7SJason A. Donenfeld 
6093655adc7SJason A. Donenfeld /*
6103655adc7SJason A. Donenfeld  * Batched entropy returns random integers. The quality of the random
6113655adc7SJason A. Donenfeld  * number is good as /dev/urandom. In order to ensure that the randomness
6123655adc7SJason A. Donenfeld  * provided by this function is okay, the function wait_for_random_bytes()
6133655adc7SJason A. Donenfeld  * should be called and return 0 at least once at any point prior.
6143655adc7SJason A. Donenfeld  */
6153655adc7SJason A. Donenfeld struct batched_entropy {
6163655adc7SJason A. Donenfeld 	union {
6173655adc7SJason A. Donenfeld 		/*
6183655adc7SJason A. Donenfeld 		 * We make this 1.5x a ChaCha block, so that we get the
6193655adc7SJason A. Donenfeld 		 * remaining 32 bytes from fast key erasure, plus one full
6203655adc7SJason A. Donenfeld 		 * block from the detached ChaCha state. We can increase
6213655adc7SJason A. Donenfeld 		 * the size of this later if needed so long as we keep the
6223655adc7SJason A. Donenfeld 		 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
6233655adc7SJason A. Donenfeld 		 */
6243655adc7SJason A. Donenfeld 		u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
6253655adc7SJason A. Donenfeld 		u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
6263655adc7SJason A. Donenfeld 	};
6273655adc7SJason A. Donenfeld 	local_lock_t lock;
6283655adc7SJason A. Donenfeld 	unsigned long generation;
6293655adc7SJason A. Donenfeld 	unsigned int position;
6303655adc7SJason A. Donenfeld };
6313655adc7SJason A. Donenfeld 
6323655adc7SJason A. Donenfeld 
6333655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
6343655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
6353655adc7SJason A. Donenfeld 	.position = UINT_MAX
6363655adc7SJason A. Donenfeld };
6373655adc7SJason A. Donenfeld 
6383655adc7SJason A. Donenfeld u64 get_random_u64(void)
6393655adc7SJason A. Donenfeld {
6403655adc7SJason A. Donenfeld 	u64 ret;
6413655adc7SJason A. Donenfeld 	unsigned long flags;
6423655adc7SJason A. Donenfeld 	struct batched_entropy *batch;
6433655adc7SJason A. Donenfeld 	static void *previous;
6443655adc7SJason A. Donenfeld 	unsigned long next_gen;
6453655adc7SJason A. Donenfeld 
6463655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
6473655adc7SJason A. Donenfeld 
6483655adc7SJason A. Donenfeld 	local_lock_irqsave(&batched_entropy_u64.lock, flags);
6493655adc7SJason A. Donenfeld 	batch = raw_cpu_ptr(&batched_entropy_u64);
6503655adc7SJason A. Donenfeld 
6513655adc7SJason A. Donenfeld 	next_gen = READ_ONCE(base_crng.generation);
6523655adc7SJason A. Donenfeld 	if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
6533655adc7SJason A. Donenfeld 	    next_gen != batch->generation) {
6543655adc7SJason A. Donenfeld 		_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
6553655adc7SJason A. Donenfeld 		batch->position = 0;
6563655adc7SJason A. Donenfeld 		batch->generation = next_gen;
6573655adc7SJason A. Donenfeld 	}
6583655adc7SJason A. Donenfeld 
6593655adc7SJason A. Donenfeld 	ret = batch->entropy_u64[batch->position];
6603655adc7SJason A. Donenfeld 	batch->entropy_u64[batch->position] = 0;
6613655adc7SJason A. Donenfeld 	++batch->position;
6623655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
6633655adc7SJason A. Donenfeld 	return ret;
6643655adc7SJason A. Donenfeld }
6653655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_u64);
6663655adc7SJason A. Donenfeld 
6673655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
6683655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
6693655adc7SJason A. Donenfeld 	.position = UINT_MAX
6703655adc7SJason A. Donenfeld };
6713655adc7SJason A. Donenfeld 
6723655adc7SJason A. Donenfeld u32 get_random_u32(void)
6733655adc7SJason A. Donenfeld {
6743655adc7SJason A. Donenfeld 	u32 ret;
6753655adc7SJason A. Donenfeld 	unsigned long flags;
6763655adc7SJason A. Donenfeld 	struct batched_entropy *batch;
6773655adc7SJason A. Donenfeld 	static void *previous;
6783655adc7SJason A. Donenfeld 	unsigned long next_gen;
6793655adc7SJason A. Donenfeld 
6803655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
6813655adc7SJason A. Donenfeld 
6823655adc7SJason A. Donenfeld 	local_lock_irqsave(&batched_entropy_u32.lock, flags);
6833655adc7SJason A. Donenfeld 	batch = raw_cpu_ptr(&batched_entropy_u32);
6843655adc7SJason A. Donenfeld 
6853655adc7SJason A. Donenfeld 	next_gen = READ_ONCE(base_crng.generation);
6863655adc7SJason A. Donenfeld 	if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
6873655adc7SJason A. Donenfeld 	    next_gen != batch->generation) {
6883655adc7SJason A. Donenfeld 		_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
6893655adc7SJason A. Donenfeld 		batch->position = 0;
6903655adc7SJason A. Donenfeld 		batch->generation = next_gen;
6913655adc7SJason A. Donenfeld 	}
6923655adc7SJason A. Donenfeld 
6933655adc7SJason A. Donenfeld 	ret = batch->entropy_u32[batch->position];
6943655adc7SJason A. Donenfeld 	batch->entropy_u32[batch->position] = 0;
6953655adc7SJason A. Donenfeld 	++batch->position;
6963655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
6973655adc7SJason A. Donenfeld 	return ret;
6983655adc7SJason A. Donenfeld }
6993655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_u32);
7003655adc7SJason A. Donenfeld 
7013191dd5aSJason A. Donenfeld #ifdef CONFIG_SMP
7023191dd5aSJason A. Donenfeld /*
7033191dd5aSJason A. Donenfeld  * This function is called when the CPU is coming up, with entry
7043191dd5aSJason A. Donenfeld  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
7053191dd5aSJason A. Donenfeld  */
7063191dd5aSJason A. Donenfeld int random_prepare_cpu(unsigned int cpu)
7073191dd5aSJason A. Donenfeld {
7083191dd5aSJason A. Donenfeld 	/*
7093191dd5aSJason A. Donenfeld 	 * When the cpu comes back online, immediately invalidate both
7103191dd5aSJason A. Donenfeld 	 * the per-cpu crng and all batches, so that we serve fresh
7113191dd5aSJason A. Donenfeld 	 * randomness.
7123191dd5aSJason A. Donenfeld 	 */
7133191dd5aSJason A. Donenfeld 	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
7143191dd5aSJason A. Donenfeld 	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
7153191dd5aSJason A. Donenfeld 	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
7163191dd5aSJason A. Donenfeld 	return 0;
7173191dd5aSJason A. Donenfeld }
7183191dd5aSJason A. Donenfeld #endif
7193191dd5aSJason A. Donenfeld 
7203655adc7SJason A. Donenfeld /**
7213655adc7SJason A. Donenfeld  * randomize_page - Generate a random, page aligned address
7223655adc7SJason A. Donenfeld  * @start:	The smallest acceptable address the caller will take.
7233655adc7SJason A. Donenfeld  * @range:	The size of the area, starting at @start, within which the
7243655adc7SJason A. Donenfeld  *		random address must fall.
7253655adc7SJason A. Donenfeld  *
7263655adc7SJason A. Donenfeld  * If @start + @range would overflow, @range is capped.
7273655adc7SJason A. Donenfeld  *
7283655adc7SJason A. Donenfeld  * NOTE: Historical use of randomize_range, which this replaces, presumed that
7293655adc7SJason A. Donenfeld  * @start was already page aligned.  We now align it regardless.
7303655adc7SJason A. Donenfeld  *
7313655adc7SJason A. Donenfeld  * Return: A page aligned address within [start, start + range).  On error,
7323655adc7SJason A. Donenfeld  * @start is returned.
7333655adc7SJason A. Donenfeld  */
7343655adc7SJason A. Donenfeld unsigned long randomize_page(unsigned long start, unsigned long range)
7353655adc7SJason A. Donenfeld {
7363655adc7SJason A. Donenfeld 	if (!PAGE_ALIGNED(start)) {
7373655adc7SJason A. Donenfeld 		range -= PAGE_ALIGN(start) - start;
7383655adc7SJason A. Donenfeld 		start = PAGE_ALIGN(start);
7393655adc7SJason A. Donenfeld 	}
7403655adc7SJason A. Donenfeld 
7413655adc7SJason A. Donenfeld 	if (start > ULONG_MAX - range)
7423655adc7SJason A. Donenfeld 		range = ULONG_MAX - start;
7433655adc7SJason A. Donenfeld 
7443655adc7SJason A. Donenfeld 	range >>= PAGE_SHIFT;
7453655adc7SJason A. Donenfeld 
7463655adc7SJason A. Donenfeld 	if (range == 0)
7473655adc7SJason A. Donenfeld 		return start;
7483655adc7SJason A. Donenfeld 
7493655adc7SJason A. Donenfeld 	return start + (get_random_long() % range << PAGE_SHIFT);
7503655adc7SJason A. Donenfeld }
7513655adc7SJason A. Donenfeld 
7523655adc7SJason A. Donenfeld /*
7533655adc7SJason A. Donenfeld  * This function will use the architecture-specific hardware random
7543655adc7SJason A. Donenfeld  * number generator if it is available. It is not recommended for
7553655adc7SJason A. Donenfeld  * use. Use get_random_bytes() instead. It returns the number of
7563655adc7SJason A. Donenfeld  * bytes filled in.
7573655adc7SJason A. Donenfeld  */
7583655adc7SJason A. Donenfeld size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
7593655adc7SJason A. Donenfeld {
7603655adc7SJason A. Donenfeld 	size_t left = nbytes;
7613655adc7SJason A. Donenfeld 	u8 *p = buf;
7623655adc7SJason A. Donenfeld 
7633655adc7SJason A. Donenfeld 	while (left) {
7643655adc7SJason A. Donenfeld 		unsigned long v;
7653655adc7SJason A. Donenfeld 		size_t chunk = min_t(size_t, left, sizeof(unsigned long));
7663655adc7SJason A. Donenfeld 
7673655adc7SJason A. Donenfeld 		if (!arch_get_random_long(&v))
7683655adc7SJason A. Donenfeld 			break;
7693655adc7SJason A. Donenfeld 
7703655adc7SJason A. Donenfeld 		memcpy(p, &v, chunk);
7713655adc7SJason A. Donenfeld 		p += chunk;
7723655adc7SJason A. Donenfeld 		left -= chunk;
7733655adc7SJason A. Donenfeld 	}
7743655adc7SJason A. Donenfeld 
7753655adc7SJason A. Donenfeld 	return nbytes - left;
7763655adc7SJason A. Donenfeld }
7773655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_bytes_arch);
7783655adc7SJason A. Donenfeld 
779a5ed7cb1SJason A. Donenfeld 
780a5ed7cb1SJason A. Donenfeld /**********************************************************************
781a5ed7cb1SJason A. Donenfeld  *
782a5ed7cb1SJason A. Donenfeld  * Entropy accumulation and extraction routines.
783a5ed7cb1SJason A. Donenfeld  *
784a5ed7cb1SJason A. Donenfeld  * Callers may add entropy via:
785a5ed7cb1SJason A. Donenfeld  *
786a5ed7cb1SJason A. Donenfeld  *     static void mix_pool_bytes(const void *in, size_t nbytes)
787a5ed7cb1SJason A. Donenfeld  *
788a5ed7cb1SJason A. Donenfeld  * After which, if added entropy should be credited:
789a5ed7cb1SJason A. Donenfeld  *
790a5ed7cb1SJason A. Donenfeld  *     static void credit_entropy_bits(size_t nbits)
791a5ed7cb1SJason A. Donenfeld  *
792a5ed7cb1SJason A. Donenfeld  * Finally, extract entropy via these two, with the latter one
793a5ed7cb1SJason A. Donenfeld  * setting the entropy count to zero and extracting only if there
794a5ed7cb1SJason A. Donenfeld  * is POOL_MIN_BITS entropy credited prior:
795a5ed7cb1SJason A. Donenfeld  *
796a5ed7cb1SJason A. Donenfeld  *     static void extract_entropy(void *buf, size_t nbytes)
797a5ed7cb1SJason A. Donenfeld  *     static bool drain_entropy(void *buf, size_t nbytes)
798a5ed7cb1SJason A. Donenfeld  *
799a5ed7cb1SJason A. Donenfeld  **********************************************************************/
800a5ed7cb1SJason A. Donenfeld 
801c5704490SJason A. Donenfeld enum {
8026e8ec255SJason A. Donenfeld 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
803c5704490SJason A. Donenfeld 	POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
8041da177e4SLinus Torvalds };
8051da177e4SLinus Torvalds 
806a5ed7cb1SJason A. Donenfeld /* For notifying userspace should write into /dev/random. */
807a11e1d43SLinus Torvalds static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
8081da177e4SLinus Torvalds 
80990ed1e67SJason A. Donenfeld static struct {
8106e8ec255SJason A. Donenfeld 	struct blake2s_state hash;
81143358209SMatt Mackall 	spinlock_t lock;
81204ec96b7SJason A. Donenfeld 	unsigned int entropy_count;
81390ed1e67SJason A. Donenfeld } input_pool = {
8146e8ec255SJason A. Donenfeld 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
8156e8ec255SJason A. Donenfeld 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
8166e8ec255SJason A. Donenfeld 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
8176e8ec255SJason A. Donenfeld 	.hash.outlen = BLAKE2S_HASH_SIZE,
818eece09ecSThomas Gleixner 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
8191da177e4SLinus Torvalds };
8201da177e4SLinus Torvalds 
821a5ed7cb1SJason A. Donenfeld static void _mix_pool_bytes(const void *in, size_t nbytes)
822a5ed7cb1SJason A. Donenfeld {
823a5ed7cb1SJason A. Donenfeld 	blake2s_update(&input_pool.hash, in, nbytes);
824a5ed7cb1SJason A. Donenfeld }
82590ed1e67SJason A. Donenfeld 
8261da177e4SLinus Torvalds /*
827e68e5b66SMatt Mackall  * This function adds bytes into the entropy "pool".  It does not
8281da177e4SLinus Torvalds  * update the entropy estimate.  The caller should call
829adc782daSMatt Mackall  * credit_entropy_bits if this is appropriate.
8301da177e4SLinus Torvalds  */
83104ec96b7SJason A. Donenfeld static void mix_pool_bytes(const void *in, size_t nbytes)
8321da177e4SLinus Torvalds {
833902c098aSTheodore Ts'o 	unsigned long flags;
834902c098aSTheodore Ts'o 
83590ed1e67SJason A. Donenfeld 	spin_lock_irqsave(&input_pool.lock, flags);
83690ed1e67SJason A. Donenfeld 	_mix_pool_bytes(in, nbytes);
83790ed1e67SJason A. Donenfeld 	spin_unlock_irqrestore(&input_pool.lock, flags);
8381da177e4SLinus Torvalds }
8391da177e4SLinus Torvalds 
840a5ed7cb1SJason A. Donenfeld static void credit_entropy_bits(size_t nbits)
841a5ed7cb1SJason A. Donenfeld {
842a5ed7cb1SJason A. Donenfeld 	unsigned int entropy_count, orig, add;
843a5ed7cb1SJason A. Donenfeld 
844a5ed7cb1SJason A. Donenfeld 	if (!nbits)
845a5ed7cb1SJason A. Donenfeld 		return;
846a5ed7cb1SJason A. Donenfeld 
847a5ed7cb1SJason A. Donenfeld 	add = min_t(size_t, nbits, POOL_BITS);
848a5ed7cb1SJason A. Donenfeld 
849a5ed7cb1SJason A. Donenfeld 	do {
850a5ed7cb1SJason A. Donenfeld 		orig = READ_ONCE(input_pool.entropy_count);
851a5ed7cb1SJason A. Donenfeld 		entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
852a5ed7cb1SJason A. Donenfeld 	} while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
853a5ed7cb1SJason A. Donenfeld 
854a5ed7cb1SJason A. Donenfeld 	if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
855a5ed7cb1SJason A. Donenfeld 		crng_reseed();
856a5ed7cb1SJason A. Donenfeld }
857a5ed7cb1SJason A. Donenfeld 
858a5ed7cb1SJason A. Donenfeld /*
859a5ed7cb1SJason A. Donenfeld  * This is an HKDF-like construction for using the hashed collected entropy
860a5ed7cb1SJason A. Donenfeld  * as a PRF key, that's then expanded block-by-block.
861a5ed7cb1SJason A. Donenfeld  */
862a5ed7cb1SJason A. Donenfeld static void extract_entropy(void *buf, size_t nbytes)
863a5ed7cb1SJason A. Donenfeld {
864a5ed7cb1SJason A. Donenfeld 	unsigned long flags;
865a5ed7cb1SJason A. Donenfeld 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
866a5ed7cb1SJason A. Donenfeld 	struct {
867a5ed7cb1SJason A. Donenfeld 		unsigned long rdseed[32 / sizeof(long)];
868a5ed7cb1SJason A. Donenfeld 		size_t counter;
869a5ed7cb1SJason A. Donenfeld 	} block;
870a5ed7cb1SJason A. Donenfeld 	size_t i;
871a5ed7cb1SJason A. Donenfeld 
872a5ed7cb1SJason A. Donenfeld 	for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
873a5ed7cb1SJason A. Donenfeld 		if (!arch_get_random_seed_long(&block.rdseed[i]) &&
874a5ed7cb1SJason A. Donenfeld 		    !arch_get_random_long(&block.rdseed[i]))
875a5ed7cb1SJason A. Donenfeld 			block.rdseed[i] = random_get_entropy();
876a5ed7cb1SJason A. Donenfeld 	}
877a5ed7cb1SJason A. Donenfeld 
878a5ed7cb1SJason A. Donenfeld 	spin_lock_irqsave(&input_pool.lock, flags);
879a5ed7cb1SJason A. Donenfeld 
880a5ed7cb1SJason A. Donenfeld 	/* seed = HASHPRF(last_key, entropy_input) */
881a5ed7cb1SJason A. Donenfeld 	blake2s_final(&input_pool.hash, seed);
882a5ed7cb1SJason A. Donenfeld 
883a5ed7cb1SJason A. Donenfeld 	/* next_key = HASHPRF(seed, RDSEED || 0) */
884a5ed7cb1SJason A. Donenfeld 	block.counter = 0;
885a5ed7cb1SJason A. Donenfeld 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
886a5ed7cb1SJason A. Donenfeld 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
887a5ed7cb1SJason A. Donenfeld 
888a5ed7cb1SJason A. Donenfeld 	spin_unlock_irqrestore(&input_pool.lock, flags);
889a5ed7cb1SJason A. Donenfeld 	memzero_explicit(next_key, sizeof(next_key));
890a5ed7cb1SJason A. Donenfeld 
891a5ed7cb1SJason A. Donenfeld 	while (nbytes) {
892a5ed7cb1SJason A. Donenfeld 		i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
893a5ed7cb1SJason A. Donenfeld 		/* output = HASHPRF(seed, RDSEED || ++counter) */
894a5ed7cb1SJason A. Donenfeld 		++block.counter;
895a5ed7cb1SJason A. Donenfeld 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
896a5ed7cb1SJason A. Donenfeld 		nbytes -= i;
897a5ed7cb1SJason A. Donenfeld 		buf += i;
898a5ed7cb1SJason A. Donenfeld 	}
899a5ed7cb1SJason A. Donenfeld 
900a5ed7cb1SJason A. Donenfeld 	memzero_explicit(seed, sizeof(seed));
901a5ed7cb1SJason A. Donenfeld 	memzero_explicit(&block, sizeof(block));
902a5ed7cb1SJason A. Donenfeld }
903a5ed7cb1SJason A. Donenfeld 
904a5ed7cb1SJason A. Donenfeld /*
905a5ed7cb1SJason A. Donenfeld  * First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we
906a5ed7cb1SJason A. Donenfeld  * set the entropy count to zero (but don't actually touch any data). Only then
907a5ed7cb1SJason A. Donenfeld  * can we extract a new key with extract_entropy().
908a5ed7cb1SJason A. Donenfeld  */
909a5ed7cb1SJason A. Donenfeld static bool drain_entropy(void *buf, size_t nbytes)
910a5ed7cb1SJason A. Donenfeld {
911a5ed7cb1SJason A. Donenfeld 	unsigned int entropy_count;
912a5ed7cb1SJason A. Donenfeld 	do {
913a5ed7cb1SJason A. Donenfeld 		entropy_count = READ_ONCE(input_pool.entropy_count);
914a5ed7cb1SJason A. Donenfeld 		if (entropy_count < POOL_MIN_BITS)
915a5ed7cb1SJason A. Donenfeld 			return false;
916a5ed7cb1SJason A. Donenfeld 	} while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
917a5ed7cb1SJason A. Donenfeld 	extract_entropy(buf, nbytes);
918a5ed7cb1SJason A. Donenfeld 	wake_up_interruptible(&random_write_wait);
919a5ed7cb1SJason A. Donenfeld 	kill_fasync(&fasync, SIGIO, POLL_OUT);
920a5ed7cb1SJason A. Donenfeld 	return true;
921a5ed7cb1SJason A. Donenfeld }
922a5ed7cb1SJason A. Donenfeld 
92392c653cfSJason A. Donenfeld 
92492c653cfSJason A. Donenfeld /**********************************************************************
92592c653cfSJason A. Donenfeld  *
92692c653cfSJason A. Donenfeld  * Entropy collection routines.
92792c653cfSJason A. Donenfeld  *
92892c653cfSJason A. Donenfeld  * The following exported functions are used for pushing entropy into
92992c653cfSJason A. Donenfeld  * the above entropy accumulation routines:
93092c653cfSJason A. Donenfeld  *
93192c653cfSJason A. Donenfeld  *	void add_device_randomness(const void *buf, size_t size);
93292c653cfSJason A. Donenfeld  *	void add_input_randomness(unsigned int type, unsigned int code,
93392c653cfSJason A. Donenfeld  *	                          unsigned int value);
93492c653cfSJason A. Donenfeld  *	void add_disk_randomness(struct gendisk *disk);
93592c653cfSJason A. Donenfeld  *	void add_hwgenerator_randomness(const void *buffer, size_t count,
93692c653cfSJason A. Donenfeld  *					size_t entropy);
93792c653cfSJason A. Donenfeld  *	void add_bootloader_randomness(const void *buf, size_t size);
93892c653cfSJason A. Donenfeld  *	void add_interrupt_randomness(int irq);
93992c653cfSJason A. Donenfeld  *
94092c653cfSJason A. Donenfeld  * add_device_randomness() adds data to the input pool that
94192c653cfSJason A. Donenfeld  * is likely to differ between two devices (or possibly even per boot).
94292c653cfSJason A. Donenfeld  * This would be things like MAC addresses or serial numbers, or the
94392c653cfSJason A. Donenfeld  * read-out of the RTC. This does *not* credit any actual entropy to
94492c653cfSJason A. Donenfeld  * the pool, but it initializes the pool to different values for devices
94592c653cfSJason A. Donenfeld  * that might otherwise be identical and have very little entropy
94692c653cfSJason A. Donenfeld  * available to them (particularly common in the embedded world).
94792c653cfSJason A. Donenfeld  *
94892c653cfSJason A. Donenfeld  * add_input_randomness() uses the input layer interrupt timing, as well
94992c653cfSJason A. Donenfeld  * as the event type information from the hardware.
95092c653cfSJason A. Donenfeld  *
95192c653cfSJason A. Donenfeld  * add_disk_randomness() uses what amounts to the seek time of block
95292c653cfSJason A. Donenfeld  * layer request events, on a per-disk_devt basis, as input to the
95392c653cfSJason A. Donenfeld  * entropy pool. Note that high-speed solid state drives with very low
95492c653cfSJason A. Donenfeld  * seek times do not make for good sources of entropy, as their seek
95592c653cfSJason A. Donenfeld  * times are usually fairly consistent.
95692c653cfSJason A. Donenfeld  *
95792c653cfSJason A. Donenfeld  * The above two routines try to estimate how many bits of entropy
95892c653cfSJason A. Donenfeld  * to credit. They do this by keeping track of the first and second
95992c653cfSJason A. Donenfeld  * order deltas of the event timings.
96092c653cfSJason A. Donenfeld  *
96192c653cfSJason A. Donenfeld  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
96292c653cfSJason A. Donenfeld  * entropy as specified by the caller. If the entropy pool is full it will
96392c653cfSJason A. Donenfeld  * block until more entropy is needed.
96492c653cfSJason A. Donenfeld  *
96592c653cfSJason A. Donenfeld  * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
96692c653cfSJason A. Donenfeld  * add_device_randomness(), depending on whether or not the configuration
96792c653cfSJason A. Donenfeld  * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
96892c653cfSJason A. Donenfeld  *
96992c653cfSJason A. Donenfeld  * add_interrupt_randomness() uses the interrupt timing as random
97092c653cfSJason A. Donenfeld  * inputs to the entropy pool. Using the cycle counters and the irq source
97192c653cfSJason A. Donenfeld  * as inputs, it feeds the input pool roughly once a second or after 64
97292c653cfSJason A. Donenfeld  * interrupts, crediting 1 bit of entropy for whichever comes first.
97392c653cfSJason A. Donenfeld  *
97492c653cfSJason A. Donenfeld  **********************************************************************/
97592c653cfSJason A. Donenfeld 
97692c653cfSJason A. Donenfeld static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
97792c653cfSJason A. Donenfeld static int __init parse_trust_cpu(char *arg)
97892c653cfSJason A. Donenfeld {
97992c653cfSJason A. Donenfeld 	return kstrtobool(arg, &trust_cpu);
98092c653cfSJason A. Donenfeld }
98192c653cfSJason A. Donenfeld early_param("random.trust_cpu", parse_trust_cpu);
982775f4b29STheodore Ts'o 
983775f4b29STheodore Ts'o /*
98492c653cfSJason A. Donenfeld  * The first collection of entropy occurs at system boot while interrupts
98592c653cfSJason A. Donenfeld  * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
98692c653cfSJason A. Donenfeld  * Depending on the above configuration knob, RDSEED may be considered
98792c653cfSJason A. Donenfeld  * sufficient for initialization. Note that much earlier setup may already
98892c653cfSJason A. Donenfeld  * have pushed entropy into the input pool by the time we get here.
989775f4b29STheodore Ts'o  */
99092c653cfSJason A. Donenfeld int __init rand_initialize(void)
991775f4b29STheodore Ts'o {
99292c653cfSJason A. Donenfeld 	size_t i;
99392c653cfSJason A. Donenfeld 	ktime_t now = ktime_get_real();
99492c653cfSJason A. Donenfeld 	bool arch_init = true;
99592c653cfSJason A. Donenfeld 	unsigned long rv;
996775f4b29STheodore Ts'o 
99792c653cfSJason A. Donenfeld 	for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
99892c653cfSJason A. Donenfeld 		if (!arch_get_random_seed_long_early(&rv) &&
99992c653cfSJason A. Donenfeld 		    !arch_get_random_long_early(&rv)) {
100092c653cfSJason A. Donenfeld 			rv = random_get_entropy();
100192c653cfSJason A. Donenfeld 			arch_init = false;
100292c653cfSJason A. Donenfeld 		}
1003afba0b80SJason A. Donenfeld 		_mix_pool_bytes(&rv, sizeof(rv));
100492c653cfSJason A. Donenfeld 	}
1005afba0b80SJason A. Donenfeld 	_mix_pool_bytes(&now, sizeof(now));
1006afba0b80SJason A. Donenfeld 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));
1007655b2264STheodore Ts'o 
100892c653cfSJason A. Donenfeld 	extract_entropy(base_crng.key, sizeof(base_crng.key));
100992c653cfSJason A. Donenfeld 	++base_crng.generation;
101043759d4fSTheodore Ts'o 
101192c653cfSJason A. Donenfeld 	if (arch_init && trust_cpu && crng_init < 2) {
101292c653cfSJason A. Donenfeld 		crng_init = 2;
101392c653cfSJason A. Donenfeld 		pr_notice("crng init done (trusting CPU's manufacturer)\n");
1014775f4b29STheodore Ts'o 	}
1015775f4b29STheodore Ts'o 
101692c653cfSJason A. Donenfeld 	if (ratelimit_disable) {
101792c653cfSJason A. Donenfeld 		urandom_warning.interval = 0;
101892c653cfSJason A. Donenfeld 		unseeded_warning.interval = 0;
101992c653cfSJason A. Donenfeld 	}
102092c653cfSJason A. Donenfeld 	return 0;
102192c653cfSJason A. Donenfeld }
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds /* There is one of these per entropy source */
10241da177e4SLinus Torvalds struct timer_rand_state {
10251da177e4SLinus Torvalds 	cycles_t last_time;
10261da177e4SLinus Torvalds 	long last_delta, last_delta2;
10271da177e4SLinus Torvalds };
10281da177e4SLinus Torvalds 
1029a2080a67SLinus Torvalds /*
1030e192be9dSTheodore Ts'o  * Add device- or boot-specific data to the input pool to help
1031e192be9dSTheodore Ts'o  * initialize it.
1032a2080a67SLinus Torvalds  *
1033e192be9dSTheodore Ts'o  * None of this adds any entropy; it is meant to avoid the problem of
1034e192be9dSTheodore Ts'o  * the entropy pool having similar initial state across largely
1035e192be9dSTheodore Ts'o  * identical devices.
1036a2080a67SLinus Torvalds  */
103704ec96b7SJason A. Donenfeld void add_device_randomness(const void *buf, size_t size)
1038a2080a67SLinus Torvalds {
103961875f30STheodore Ts'o 	unsigned long time = random_get_entropy() ^ jiffies;
10403ef4cb2dSTheodore Ts'o 	unsigned long flags;
1041a2080a67SLinus Torvalds 
10421daf2f38SJason A. Donenfeld 	if (crng_init == 0 && size)
1043da792c6dSJason A. Donenfeld 		crng_pre_init_inject(buf, size, false, false);
1044ee7998c5SKees Cook 
10453ef4cb2dSTheodore Ts'o 	spin_lock_irqsave(&input_pool.lock, flags);
104690ed1e67SJason A. Donenfeld 	_mix_pool_bytes(buf, size);
104790ed1e67SJason A. Donenfeld 	_mix_pool_bytes(&time, sizeof(time));
10483ef4cb2dSTheodore Ts'o 	spin_unlock_irqrestore(&input_pool.lock, flags);
1049a2080a67SLinus Torvalds }
1050a2080a67SLinus Torvalds EXPORT_SYMBOL(add_device_randomness);
1051a2080a67SLinus Torvalds 
10521da177e4SLinus Torvalds /*
10531da177e4SLinus Torvalds  * This function adds entropy to the entropy "pool" by using timing
10541da177e4SLinus Torvalds  * delays.  It uses the timer_rand_state structure to make an estimate
10551da177e4SLinus Torvalds  * of how many bits of entropy this call has added to the pool.
10561da177e4SLinus Torvalds  *
10571da177e4SLinus Torvalds  * The number "num" is also added to the pool - it should somehow describe
10581da177e4SLinus Torvalds  * the type of event which just happened.  This is currently 0-255 for
10591da177e4SLinus Torvalds  * keyboard scan codes, and 256 upwards for interrupts.
10601da177e4SLinus Torvalds  *
10611da177e4SLinus Torvalds  */
106204ec96b7SJason A. Donenfeld static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
10631da177e4SLinus Torvalds {
10641da177e4SLinus Torvalds 	struct {
10651da177e4SLinus Torvalds 		long jiffies;
1066d38bb085SJason A. Donenfeld 		unsigned int cycles;
1067d38bb085SJason A. Donenfeld 		unsigned int num;
10681da177e4SLinus Torvalds 	} sample;
10691da177e4SLinus Torvalds 	long delta, delta2, delta3;
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	sample.jiffies = jiffies;
107261875f30STheodore Ts'o 	sample.cycles = random_get_entropy();
10731da177e4SLinus Torvalds 	sample.num = num;
107490ed1e67SJason A. Donenfeld 	mix_pool_bytes(&sample, sizeof(sample));
10751da177e4SLinus Torvalds 
10761da177e4SLinus Torvalds 	/*
10771da177e4SLinus Torvalds 	 * Calculate number of bits of randomness we probably added.
10781da177e4SLinus Torvalds 	 * We take into account the first, second and third-order deltas
10791da177e4SLinus Torvalds 	 * in order to make our estimate.
10801da177e4SLinus Torvalds 	 */
1081e00d996aSQian Cai 	delta = sample.jiffies - READ_ONCE(state->last_time);
1082e00d996aSQian Cai 	WRITE_ONCE(state->last_time, sample.jiffies);
10831da177e4SLinus Torvalds 
1084e00d996aSQian Cai 	delta2 = delta - READ_ONCE(state->last_delta);
1085e00d996aSQian Cai 	WRITE_ONCE(state->last_delta, delta);
10861da177e4SLinus Torvalds 
1087e00d996aSQian Cai 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1088e00d996aSQian Cai 	WRITE_ONCE(state->last_delta2, delta2);
10891da177e4SLinus Torvalds 
10901da177e4SLinus Torvalds 	if (delta < 0)
10911da177e4SLinus Torvalds 		delta = -delta;
10921da177e4SLinus Torvalds 	if (delta2 < 0)
10931da177e4SLinus Torvalds 		delta2 = -delta2;
10941da177e4SLinus Torvalds 	if (delta3 < 0)
10951da177e4SLinus Torvalds 		delta3 = -delta3;
10961da177e4SLinus Torvalds 	if (delta > delta2)
10971da177e4SLinus Torvalds 		delta = delta2;
10981da177e4SLinus Torvalds 	if (delta > delta3)
10991da177e4SLinus Torvalds 		delta = delta3;
11001da177e4SLinus Torvalds 
11011da177e4SLinus Torvalds 	/*
11021da177e4SLinus Torvalds 	 * delta is now minimum absolute delta.
11031da177e4SLinus Torvalds 	 * Round down by 1 bit on general principles,
1104727d499aSYangtao Li 	 * and limit entropy estimate to 12 bits.
11051da177e4SLinus Torvalds 	 */
110604ec96b7SJason A. Donenfeld 	credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
11071da177e4SLinus Torvalds }
11081da177e4SLinus Torvalds 
1109d251575aSStephen Hemminger void add_input_randomness(unsigned int type, unsigned int code,
11101da177e4SLinus Torvalds 			  unsigned int value)
11111da177e4SLinus Torvalds {
11121da177e4SLinus Torvalds 	static unsigned char last_value;
111392c653cfSJason A. Donenfeld 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
11141da177e4SLinus Torvalds 
111592c653cfSJason A. Donenfeld 	/* Ignore autorepeat and the like. */
11161da177e4SLinus Torvalds 	if (value == last_value)
11171da177e4SLinus Torvalds 		return;
11181da177e4SLinus Torvalds 
11191da177e4SLinus Torvalds 	last_value = value;
11201da177e4SLinus Torvalds 	add_timer_randomness(&input_timer_state,
11211da177e4SLinus Torvalds 			     (type << 4) ^ code ^ (code >> 4) ^ value);
11221da177e4SLinus Torvalds }
112380fc9f53SDmitry Torokhov EXPORT_SYMBOL_GPL(add_input_randomness);
11241da177e4SLinus Torvalds 
112592c653cfSJason A. Donenfeld #ifdef CONFIG_BLOCK
112692c653cfSJason A. Donenfeld void add_disk_randomness(struct gendisk *disk)
112792c653cfSJason A. Donenfeld {
112892c653cfSJason A. Donenfeld 	if (!disk || !disk->random)
112992c653cfSJason A. Donenfeld 		return;
113092c653cfSJason A. Donenfeld 	/* First major is 1, so we get >= 0x200 here. */
113192c653cfSJason A. Donenfeld 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
113292c653cfSJason A. Donenfeld }
113392c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_disk_randomness);
113492c653cfSJason A. Donenfeld 
113592c653cfSJason A. Donenfeld void rand_initialize_disk(struct gendisk *disk)
113692c653cfSJason A. Donenfeld {
113792c653cfSJason A. Donenfeld 	struct timer_rand_state *state;
113892c653cfSJason A. Donenfeld 
113992c653cfSJason A. Donenfeld 	/*
114092c653cfSJason A. Donenfeld 	 * If kzalloc returns null, we just won't use that entropy
114192c653cfSJason A. Donenfeld 	 * source.
114292c653cfSJason A. Donenfeld 	 */
114392c653cfSJason A. Donenfeld 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
114492c653cfSJason A. Donenfeld 	if (state) {
114592c653cfSJason A. Donenfeld 		state->last_time = INITIAL_JIFFIES;
114692c653cfSJason A. Donenfeld 		disk->random = state;
114792c653cfSJason A. Donenfeld 	}
114892c653cfSJason A. Donenfeld }
114992c653cfSJason A. Donenfeld #endif
115092c653cfSJason A. Donenfeld 
115192c653cfSJason A. Donenfeld /*
115292c653cfSJason A. Donenfeld  * Interface for in-kernel drivers of true hardware RNGs.
115392c653cfSJason A. Donenfeld  * Those devices may produce endless random bits and will be throttled
115492c653cfSJason A. Donenfeld  * when our pool is full.
115592c653cfSJason A. Donenfeld  */
115692c653cfSJason A. Donenfeld void add_hwgenerator_randomness(const void *buffer, size_t count,
115792c653cfSJason A. Donenfeld 				size_t entropy)
115892c653cfSJason A. Donenfeld {
115992c653cfSJason A. Donenfeld 	if (unlikely(crng_init == 0)) {
1160da792c6dSJason A. Donenfeld 		size_t ret = crng_pre_init_inject(buffer, count, false, true);
116192c653cfSJason A. Donenfeld 		mix_pool_bytes(buffer, ret);
116292c653cfSJason A. Donenfeld 		count -= ret;
116392c653cfSJason A. Donenfeld 		buffer += ret;
116492c653cfSJason A. Donenfeld 		if (!count || crng_init == 0)
116592c653cfSJason A. Donenfeld 			return;
116692c653cfSJason A. Donenfeld 	}
116792c653cfSJason A. Donenfeld 
116892c653cfSJason A. Donenfeld 	/*
116992c653cfSJason A. Donenfeld 	 * Throttle writing if we're above the trickle threshold.
117092c653cfSJason A. Donenfeld 	 * We'll be woken up again once below POOL_MIN_BITS, when
117192c653cfSJason A. Donenfeld 	 * the calling thread is about to terminate, or once
117292c653cfSJason A. Donenfeld 	 * CRNG_RESEED_INTERVAL has elapsed.
117392c653cfSJason A. Donenfeld 	 */
117492c653cfSJason A. Donenfeld 	wait_event_interruptible_timeout(random_write_wait,
117592c653cfSJason A. Donenfeld 			!system_wq || kthread_should_stop() ||
117692c653cfSJason A. Donenfeld 			input_pool.entropy_count < POOL_MIN_BITS,
117792c653cfSJason A. Donenfeld 			CRNG_RESEED_INTERVAL);
117892c653cfSJason A. Donenfeld 	mix_pool_bytes(buffer, count);
117992c653cfSJason A. Donenfeld 	credit_entropy_bits(entropy);
118092c653cfSJason A. Donenfeld }
118192c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
118292c653cfSJason A. Donenfeld 
118392c653cfSJason A. Donenfeld /*
118492c653cfSJason A. Donenfeld  * Handle random seed passed by bootloader.
118592c653cfSJason A. Donenfeld  * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
118692c653cfSJason A. Donenfeld  * it would be regarded as device data.
118792c653cfSJason A. Donenfeld  * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
118892c653cfSJason A. Donenfeld  */
118992c653cfSJason A. Donenfeld void add_bootloader_randomness(const void *buf, size_t size)
119092c653cfSJason A. Donenfeld {
119192c653cfSJason A. Donenfeld 	if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
119292c653cfSJason A. Donenfeld 		add_hwgenerator_randomness(buf, size, size * 8);
119392c653cfSJason A. Donenfeld 	else
119492c653cfSJason A. Donenfeld 		add_device_randomness(buf, size);
119592c653cfSJason A. Donenfeld }
119692c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_bootloader_randomness);
119792c653cfSJason A. Donenfeld 
119892c653cfSJason A. Donenfeld struct fast_pool {
119992c653cfSJason A. Donenfeld 	union {
120092c653cfSJason A. Donenfeld 		u32 pool32[4];
120192c653cfSJason A. Donenfeld 		u64 pool64[2];
120292c653cfSJason A. Donenfeld 	};
120358340f8eSJason A. Donenfeld 	struct work_struct mix;
120492c653cfSJason A. Donenfeld 	unsigned long last;
12053191dd5aSJason A. Donenfeld 	unsigned int count;
120692c653cfSJason A. Donenfeld 	u16 reg_idx;
120792c653cfSJason A. Donenfeld };
120892c653cfSJason A. Donenfeld 
120992c653cfSJason A. Donenfeld /*
121092c653cfSJason A. Donenfeld  * This is a fast mixing routine used by the interrupt randomness
121192c653cfSJason A. Donenfeld  * collector. It's hardcoded for an 128 bit pool and assumes that any
121292c653cfSJason A. Donenfeld  * locks that might be needed are taken by the caller.
121392c653cfSJason A. Donenfeld  */
121492c653cfSJason A. Donenfeld static void fast_mix(u32 pool[4])
121592c653cfSJason A. Donenfeld {
121692c653cfSJason A. Donenfeld 	u32 a = pool[0],	b = pool[1];
121792c653cfSJason A. Donenfeld 	u32 c = pool[2],	d = pool[3];
121892c653cfSJason A. Donenfeld 
121992c653cfSJason A. Donenfeld 	a += b;			c += d;
122092c653cfSJason A. Donenfeld 	b = rol32(b, 6);	d = rol32(d, 27);
122192c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
122292c653cfSJason A. Donenfeld 
122392c653cfSJason A. Donenfeld 	a += b;			c += d;
122492c653cfSJason A. Donenfeld 	b = rol32(b, 16);	d = rol32(d, 14);
122592c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
122692c653cfSJason A. Donenfeld 
122792c653cfSJason A. Donenfeld 	a += b;			c += d;
122892c653cfSJason A. Donenfeld 	b = rol32(b, 6);	d = rol32(d, 27);
122992c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
123092c653cfSJason A. Donenfeld 
123192c653cfSJason A. Donenfeld 	a += b;			c += d;
123292c653cfSJason A. Donenfeld 	b = rol32(b, 16);	d = rol32(d, 14);
123392c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
123492c653cfSJason A. Donenfeld 
123592c653cfSJason A. Donenfeld 	pool[0] = a;  pool[1] = b;
123692c653cfSJason A. Donenfeld 	pool[2] = c;  pool[3] = d;
123792c653cfSJason A. Donenfeld }
123892c653cfSJason A. Donenfeld 
1239775f4b29STheodore Ts'o static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1240775f4b29STheodore Ts'o 
12413191dd5aSJason A. Donenfeld #ifdef CONFIG_SMP
12423191dd5aSJason A. Donenfeld /*
12433191dd5aSJason A. Donenfeld  * This function is called when the CPU has just come online, with
12443191dd5aSJason A. Donenfeld  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
12453191dd5aSJason A. Donenfeld  */
12463191dd5aSJason A. Donenfeld int random_online_cpu(unsigned int cpu)
12473191dd5aSJason A. Donenfeld {
12483191dd5aSJason A. Donenfeld 	/*
12493191dd5aSJason A. Donenfeld 	 * During CPU shutdown and before CPU onlining, add_interrupt_
12503191dd5aSJason A. Donenfeld 	 * randomness() may schedule mix_interrupt_randomness(), and
12513191dd5aSJason A. Donenfeld 	 * set the MIX_INFLIGHT flag. However, because the worker can
12523191dd5aSJason A. Donenfeld 	 * be scheduled on a different CPU during this period, that
12533191dd5aSJason A. Donenfeld 	 * flag will never be cleared. For that reason, we zero out
12543191dd5aSJason A. Donenfeld 	 * the flag here, which runs just after workqueues are onlined
12553191dd5aSJason A. Donenfeld 	 * for the CPU again. This also has the effect of setting the
12563191dd5aSJason A. Donenfeld 	 * irq randomness count to zero so that new accumulated irqs
12573191dd5aSJason A. Donenfeld 	 * are fresh.
12583191dd5aSJason A. Donenfeld 	 */
12593191dd5aSJason A. Donenfeld 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
12603191dd5aSJason A. Donenfeld 	return 0;
12613191dd5aSJason A. Donenfeld }
12623191dd5aSJason A. Donenfeld #endif
12633191dd5aSJason A. Donenfeld 
1264da3951ebSJason A. Donenfeld static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs)
1265ee3e00e9STheodore Ts'o {
1266da3951ebSJason A. Donenfeld 	unsigned long *ptr = (unsigned long *)regs;
126792e75428STheodore Ts'o 	unsigned int idx;
1268ee3e00e9STheodore Ts'o 
1269ee3e00e9STheodore Ts'o 	if (regs == NULL)
1270ee3e00e9STheodore Ts'o 		return 0;
127192e75428STheodore Ts'o 	idx = READ_ONCE(f->reg_idx);
1272da3951ebSJason A. Donenfeld 	if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long))
127392e75428STheodore Ts'o 		idx = 0;
127492e75428STheodore Ts'o 	ptr += idx++;
127592e75428STheodore Ts'o 	WRITE_ONCE(f->reg_idx, idx);
12769dfa7bbaSMichael Schmitz 	return *ptr;
1277ee3e00e9STheodore Ts'o }
1278ee3e00e9STheodore Ts'o 
127958340f8eSJason A. Donenfeld static void mix_interrupt_randomness(struct work_struct *work)
128058340f8eSJason A. Donenfeld {
128158340f8eSJason A. Donenfeld 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
128258340f8eSJason A. Donenfeld 	u32 pool[4];
128358340f8eSJason A. Donenfeld 
128458340f8eSJason A. Donenfeld 	/* Check to see if we're running on the wrong CPU due to hotplug. */
128558340f8eSJason A. Donenfeld 	local_irq_disable();
128658340f8eSJason A. Donenfeld 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
128758340f8eSJason A. Donenfeld 		local_irq_enable();
128858340f8eSJason A. Donenfeld 		return;
128958340f8eSJason A. Donenfeld 	}
129058340f8eSJason A. Donenfeld 
129158340f8eSJason A. Donenfeld 	/*
129258340f8eSJason A. Donenfeld 	 * Copy the pool to the stack so that the mixer always has a
129358340f8eSJason A. Donenfeld 	 * consistent view, before we reenable irqs again.
129458340f8eSJason A. Donenfeld 	 */
129558340f8eSJason A. Donenfeld 	memcpy(pool, fast_pool->pool32, sizeof(pool));
12963191dd5aSJason A. Donenfeld 	fast_pool->count = 0;
129758340f8eSJason A. Donenfeld 	fast_pool->last = jiffies;
129858340f8eSJason A. Donenfeld 	local_irq_enable();
129958340f8eSJason A. Donenfeld 
130058340f8eSJason A. Donenfeld 	mix_pool_bytes(pool, sizeof(pool));
130158340f8eSJason A. Donenfeld 	credit_entropy_bits(1);
130258340f8eSJason A. Donenfeld 	memzero_explicit(pool, sizeof(pool));
130358340f8eSJason A. Donenfeld }
130458340f8eSJason A. Donenfeld 
1305703f7066SSebastian Andrzej Siewior void add_interrupt_randomness(int irq)
13061da177e4SLinus Torvalds {
130758340f8eSJason A. Donenfeld 	enum { MIX_INFLIGHT = 1U << 31 };
13081b2a1a7eSChristoph Lameter 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1309775f4b29STheodore Ts'o 	struct pt_regs *regs = get_irq_regs();
1310775f4b29STheodore Ts'o 	unsigned long now = jiffies;
1311655b2264STheodore Ts'o 	cycles_t cycles = random_get_entropy();
131258340f8eSJason A. Donenfeld 	unsigned int new_count;
13133060d6feSYinghai Lu 
1314ee3e00e9STheodore Ts'o 	if (cycles == 0)
1315ee3e00e9STheodore Ts'o 		cycles = get_reg(fast_pool, regs);
13163060d6feSYinghai Lu 
1317b2f408feSJason A. Donenfeld 	if (sizeof(cycles) == 8)
1318b2f408feSJason A. Donenfeld 		fast_pool->pool64[0] ^= cycles ^ rol64(now, 32) ^ irq;
1319b2f408feSJason A. Donenfeld 	else {
1320b2f408feSJason A. Donenfeld 		fast_pool->pool32[0] ^= cycles ^ irq;
1321b2f408feSJason A. Donenfeld 		fast_pool->pool32[1] ^= now;
1322b2f408feSJason A. Donenfeld 	}
1323b2f408feSJason A. Donenfeld 
1324b2f408feSJason A. Donenfeld 	if (sizeof(unsigned long) == 8)
1325b2f408feSJason A. Donenfeld 		fast_pool->pool64[1] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1326b2f408feSJason A. Donenfeld 	else {
1327b2f408feSJason A. Donenfeld 		fast_pool->pool32[2] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1328b2f408feSJason A. Donenfeld 		fast_pool->pool32[3] ^= get_reg(fast_pool, regs);
1329b2f408feSJason A. Donenfeld 	}
1330b2f408feSJason A. Donenfeld 
1331b2f408feSJason A. Donenfeld 	fast_mix(fast_pool->pool32);
13323191dd5aSJason A. Donenfeld 	new_count = ++fast_pool->count;
1333775f4b29STheodore Ts'o 
133443838a23STheodore Ts'o 	if (unlikely(crng_init == 0)) {
133558340f8eSJason A. Donenfeld 		if (new_count >= 64 &&
1336da792c6dSJason A. Donenfeld 		    crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32),
1337da792c6dSJason A. Donenfeld 					 true, true) > 0) {
13383191dd5aSJason A. Donenfeld 			fast_pool->count = 0;
1339e192be9dSTheodore Ts'o 			fast_pool->last = now;
1340c30c575dSJason A. Donenfeld 			if (spin_trylock(&input_pool.lock)) {
1341b2f408feSJason A. Donenfeld 				_mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
1342c30c575dSJason A. Donenfeld 				spin_unlock(&input_pool.lock);
1343c30c575dSJason A. Donenfeld 			}
1344e192be9dSTheodore Ts'o 		}
1345e192be9dSTheodore Ts'o 		return;
1346e192be9dSTheodore Ts'o 	}
1347e192be9dSTheodore Ts'o 
134858340f8eSJason A. Donenfeld 	if (new_count & MIX_INFLIGHT)
13491da177e4SLinus Torvalds 		return;
1350840f9507STheodore Ts'o 
135158340f8eSJason A. Donenfeld 	if (new_count < 64 && !time_after(now, fast_pool->last + HZ))
13521da177e4SLinus Torvalds 		return;
13531da177e4SLinus Torvalds 
135458340f8eSJason A. Donenfeld 	if (unlikely(!fast_pool->mix.func))
135558340f8eSJason A. Donenfeld 		INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
13563191dd5aSJason A. Donenfeld 	fast_pool->count |= MIX_INFLIGHT;
135758340f8eSJason A. Donenfeld 	queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
13581da177e4SLinus Torvalds }
13594b44f2d1SStephan Mueller EXPORT_SYMBOL_GPL(add_interrupt_randomness);
13601da177e4SLinus Torvalds 
13611da177e4SLinus Torvalds /*
136250ee7529SLinus Torvalds  * Each time the timer fires, we expect that we got an unpredictable
136350ee7529SLinus Torvalds  * jump in the cycle counter. Even if the timer is running on another
136450ee7529SLinus Torvalds  * CPU, the timer activity will be touching the stack of the CPU that is
136550ee7529SLinus Torvalds  * generating entropy..
136650ee7529SLinus Torvalds  *
136750ee7529SLinus Torvalds  * Note that we don't re-arm the timer in the timer itself - we are
136850ee7529SLinus Torvalds  * happy to be scheduled away, since that just makes the load more
136950ee7529SLinus Torvalds  * complex, but we do not want the timer to keep ticking unless the
137050ee7529SLinus Torvalds  * entropy loop is running.
137150ee7529SLinus Torvalds  *
137250ee7529SLinus Torvalds  * So the re-arming always happens in the entropy loop itself.
137350ee7529SLinus Torvalds  */
137450ee7529SLinus Torvalds static void entropy_timer(struct timer_list *t)
137550ee7529SLinus Torvalds {
137690ed1e67SJason A. Donenfeld 	credit_entropy_bits(1);
137750ee7529SLinus Torvalds }
137850ee7529SLinus Torvalds 
137950ee7529SLinus Torvalds /*
138050ee7529SLinus Torvalds  * If we have an actual cycle counter, see if we can
138150ee7529SLinus Torvalds  * generate enough entropy with timing noise
138250ee7529SLinus Torvalds  */
138350ee7529SLinus Torvalds static void try_to_generate_entropy(void)
138450ee7529SLinus Torvalds {
138550ee7529SLinus Torvalds 	struct {
138650ee7529SLinus Torvalds 		unsigned long now;
138750ee7529SLinus Torvalds 		struct timer_list timer;
138850ee7529SLinus Torvalds 	} stack;
138950ee7529SLinus Torvalds 
139050ee7529SLinus Torvalds 	stack.now = random_get_entropy();
139150ee7529SLinus Torvalds 
139250ee7529SLinus Torvalds 	/* Slow counter - or none. Don't even bother */
139350ee7529SLinus Torvalds 	if (stack.now == random_get_entropy())
139450ee7529SLinus Torvalds 		return;
139550ee7529SLinus Torvalds 
139650ee7529SLinus Torvalds 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
139750ee7529SLinus Torvalds 	while (!crng_ready()) {
139850ee7529SLinus Torvalds 		if (!timer_pending(&stack.timer))
139950ee7529SLinus Torvalds 			mod_timer(&stack.timer, jiffies + 1);
140090ed1e67SJason A. Donenfeld 		mix_pool_bytes(&stack.now, sizeof(stack.now));
140150ee7529SLinus Torvalds 		schedule();
140250ee7529SLinus Torvalds 		stack.now = random_get_entropy();
140350ee7529SLinus Torvalds 	}
140450ee7529SLinus Torvalds 
140550ee7529SLinus Torvalds 	del_timer_sync(&stack.timer);
140650ee7529SLinus Torvalds 	destroy_timer_on_stack(&stack.timer);
140790ed1e67SJason A. Donenfeld 	mix_pool_bytes(&stack.now, sizeof(stack.now));
140850ee7529SLinus Torvalds }
140950ee7529SLinus Torvalds 
1410a6adf8e7SJason A. Donenfeld 
1411a6adf8e7SJason A. Donenfeld /**********************************************************************
1412a6adf8e7SJason A. Donenfeld  *
1413a6adf8e7SJason A. Donenfeld  * Userspace reader/writer interfaces.
1414a6adf8e7SJason A. Donenfeld  *
1415a6adf8e7SJason A. Donenfeld  * getrandom(2) is the primary modern interface into the RNG and should
1416a6adf8e7SJason A. Donenfeld  * be used in preference to anything else.
1417a6adf8e7SJason A. Donenfeld  *
1418a6adf8e7SJason A. Donenfeld  * Reading from /dev/random has the same functionality as calling
1419a6adf8e7SJason A. Donenfeld  * getrandom(2) with flags=0. In earlier versions, however, it had
1420a6adf8e7SJason A. Donenfeld  * vastly different semantics and should therefore be avoided, to
1421a6adf8e7SJason A. Donenfeld  * prevent backwards compatibility issues.
1422a6adf8e7SJason A. Donenfeld  *
1423a6adf8e7SJason A. Donenfeld  * Reading from /dev/urandom has the same functionality as calling
1424a6adf8e7SJason A. Donenfeld  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1425a6adf8e7SJason A. Donenfeld  * waiting for the RNG to be ready, it should not be used.
1426a6adf8e7SJason A. Donenfeld  *
1427a6adf8e7SJason A. Donenfeld  * Writing to either /dev/random or /dev/urandom adds entropy to
1428a6adf8e7SJason A. Donenfeld  * the input pool but does not credit it.
1429a6adf8e7SJason A. Donenfeld  *
1430a6adf8e7SJason A. Donenfeld  * Polling on /dev/random indicates when the RNG is initialized, on
1431a6adf8e7SJason A. Donenfeld  * the read side, and when it wants new entropy, on the write side.
1432a6adf8e7SJason A. Donenfeld  *
1433a6adf8e7SJason A. Donenfeld  * Both /dev/random and /dev/urandom have the same set of ioctls for
1434a6adf8e7SJason A. Donenfeld  * adding entropy, getting the entropy count, zeroing the count, and
1435a6adf8e7SJason A. Donenfeld  * reseeding the crng.
1436a6adf8e7SJason A. Donenfeld  *
1437a6adf8e7SJason A. Donenfeld  **********************************************************************/
1438a6adf8e7SJason A. Donenfeld 
1439a6adf8e7SJason A. Donenfeld SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1440a6adf8e7SJason A. Donenfeld 		flags)
14411da177e4SLinus Torvalds {
1442a6adf8e7SJason A. Donenfeld 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1443a6adf8e7SJason A. Donenfeld 		return -EINVAL;
1444301f0595STheodore Ts'o 
1445a6adf8e7SJason A. Donenfeld 	/*
1446a6adf8e7SJason A. Donenfeld 	 * Requesting insecure and blocking randomness at the same time makes
1447a6adf8e7SJason A. Donenfeld 	 * no sense.
1448a6adf8e7SJason A. Donenfeld 	 */
1449a6adf8e7SJason A. Donenfeld 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1450a6adf8e7SJason A. Donenfeld 		return -EINVAL;
1451c6f1deb1SAndy Lutomirski 
1452a6adf8e7SJason A. Donenfeld 	if (count > INT_MAX)
1453a6adf8e7SJason A. Donenfeld 		count = INT_MAX;
14541da177e4SLinus Torvalds 
1455a6adf8e7SJason A. Donenfeld 	if (!(flags & GRND_INSECURE) && !crng_ready()) {
145630c08efeSAndy Lutomirski 		int ret;
145730c08efeSAndy Lutomirski 
1458a6adf8e7SJason A. Donenfeld 		if (flags & GRND_NONBLOCK)
1459a6adf8e7SJason A. Donenfeld 			return -EAGAIN;
146030c08efeSAndy Lutomirski 		ret = wait_for_random_bytes();
1461a6adf8e7SJason A. Donenfeld 		if (unlikely(ret))
146230c08efeSAndy Lutomirski 			return ret;
1463a6adf8e7SJason A. Donenfeld 	}
1464a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, count);
146530c08efeSAndy Lutomirski }
146630c08efeSAndy Lutomirski 
1467248045b8SJason A. Donenfeld static __poll_t random_poll(struct file *file, poll_table *wait)
146889b310a2SChristoph Hellwig {
1469a11e1d43SLinus Torvalds 	__poll_t mask;
147089b310a2SChristoph Hellwig 
147130c08efeSAndy Lutomirski 	poll_wait(file, &crng_init_wait, wait);
1472a11e1d43SLinus Torvalds 	poll_wait(file, &random_write_wait, wait);
1473a11e1d43SLinus Torvalds 	mask = 0;
147430c08efeSAndy Lutomirski 	if (crng_ready())
1475a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
1476489c7fc4SJason A. Donenfeld 	if (input_pool.entropy_count < POOL_MIN_BITS)
1477a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
14781da177e4SLinus Torvalds 	return mask;
14791da177e4SLinus Torvalds }
14801da177e4SLinus Torvalds 
148104ec96b7SJason A. Donenfeld static int write_pool(const char __user *ubuf, size_t count)
14827f397dcdSMatt Mackall {
148304ec96b7SJason A. Donenfeld 	size_t len;
14847b5164fbSJason A. Donenfeld 	int ret = 0;
148504ec96b7SJason A. Donenfeld 	u8 block[BLAKE2S_BLOCK_SIZE];
14867f397dcdSMatt Mackall 
148704ec96b7SJason A. Donenfeld 	while (count) {
148804ec96b7SJason A. Donenfeld 		len = min(count, sizeof(block));
14897b5164fbSJason A. Donenfeld 		if (copy_from_user(block, ubuf, len)) {
14907b5164fbSJason A. Donenfeld 			ret = -EFAULT;
14917b5164fbSJason A. Donenfeld 			goto out;
14927b5164fbSJason A. Donenfeld 		}
149304ec96b7SJason A. Donenfeld 		count -= len;
149404ec96b7SJason A. Donenfeld 		ubuf += len;
149504ec96b7SJason A. Donenfeld 		mix_pool_bytes(block, len);
149691f3f1e3SMatt Mackall 		cond_resched();
14977f397dcdSMatt Mackall 	}
14987f397dcdSMatt Mackall 
14997b5164fbSJason A. Donenfeld out:
15007b5164fbSJason A. Donenfeld 	memzero_explicit(block, sizeof(block));
15017b5164fbSJason A. Donenfeld 	return ret;
15027f397dcdSMatt Mackall }
15037f397dcdSMatt Mackall 
150490b75ee5SMatt Mackall static ssize_t random_write(struct file *file, const char __user *buffer,
15051da177e4SLinus Torvalds 			    size_t count, loff_t *ppos)
15061da177e4SLinus Torvalds {
150704ec96b7SJason A. Donenfeld 	int ret;
15087f397dcdSMatt Mackall 
150990ed1e67SJason A. Donenfeld 	ret = write_pool(buffer, count);
15107f397dcdSMatt Mackall 	if (ret)
15117f397dcdSMatt Mackall 		return ret;
15127f397dcdSMatt Mackall 
15137f397dcdSMatt Mackall 	return (ssize_t)count;
15141da177e4SLinus Torvalds }
15151da177e4SLinus Torvalds 
1516a6adf8e7SJason A. Donenfeld static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1517a6adf8e7SJason A. Donenfeld 			    loff_t *ppos)
1518a6adf8e7SJason A. Donenfeld {
1519a6adf8e7SJason A. Donenfeld 	static int maxwarn = 10;
1520a6adf8e7SJason A. Donenfeld 
1521a6adf8e7SJason A. Donenfeld 	if (!crng_ready() && maxwarn > 0) {
1522a6adf8e7SJason A. Donenfeld 		maxwarn--;
1523a6adf8e7SJason A. Donenfeld 		if (__ratelimit(&urandom_warning))
1524a6adf8e7SJason A. Donenfeld 			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1525a6adf8e7SJason A. Donenfeld 				  current->comm, nbytes);
1526a6adf8e7SJason A. Donenfeld 	}
1527a6adf8e7SJason A. Donenfeld 
1528a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, nbytes);
1529a6adf8e7SJason A. Donenfeld }
1530a6adf8e7SJason A. Donenfeld 
1531a6adf8e7SJason A. Donenfeld static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1532a6adf8e7SJason A. Donenfeld 			   loff_t *ppos)
1533a6adf8e7SJason A. Donenfeld {
1534a6adf8e7SJason A. Donenfeld 	int ret;
1535a6adf8e7SJason A. Donenfeld 
1536a6adf8e7SJason A. Donenfeld 	ret = wait_for_random_bytes();
1537a6adf8e7SJason A. Donenfeld 	if (ret != 0)
1538a6adf8e7SJason A. Donenfeld 		return ret;
1539a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, nbytes);
1540a6adf8e7SJason A. Donenfeld }
1541a6adf8e7SJason A. Donenfeld 
154243ae4860SMatt Mackall static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
15431da177e4SLinus Torvalds {
15441da177e4SLinus Torvalds 	int size, ent_count;
15451da177e4SLinus Torvalds 	int __user *p = (int __user *)arg;
15461da177e4SLinus Torvalds 	int retval;
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds 	switch (cmd) {
15491da177e4SLinus Torvalds 	case RNDGETENTCNT:
1550a6adf8e7SJason A. Donenfeld 		/* Inherently racy, no point locking. */
1551c5704490SJason A. Donenfeld 		if (put_user(input_pool.entropy_count, p))
15521da177e4SLinus Torvalds 			return -EFAULT;
15531da177e4SLinus Torvalds 		return 0;
15541da177e4SLinus Torvalds 	case RNDADDTOENTCNT:
15551da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15561da177e4SLinus Torvalds 			return -EPERM;
15571da177e4SLinus Torvalds 		if (get_user(ent_count, p))
15581da177e4SLinus Torvalds 			return -EFAULT;
1559a49c010eSJason A. Donenfeld 		if (ent_count < 0)
1560a49c010eSJason A. Donenfeld 			return -EINVAL;
1561a49c010eSJason A. Donenfeld 		credit_entropy_bits(ent_count);
1562a49c010eSJason A. Donenfeld 		return 0;
15631da177e4SLinus Torvalds 	case RNDADDENTROPY:
15641da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15651da177e4SLinus Torvalds 			return -EPERM;
15661da177e4SLinus Torvalds 		if (get_user(ent_count, p++))
15671da177e4SLinus Torvalds 			return -EFAULT;
15681da177e4SLinus Torvalds 		if (ent_count < 0)
15691da177e4SLinus Torvalds 			return -EINVAL;
15701da177e4SLinus Torvalds 		if (get_user(size, p++))
15711da177e4SLinus Torvalds 			return -EFAULT;
157290ed1e67SJason A. Donenfeld 		retval = write_pool((const char __user *)p, size);
15731da177e4SLinus Torvalds 		if (retval < 0)
15741da177e4SLinus Torvalds 			return retval;
1575a49c010eSJason A. Donenfeld 		credit_entropy_bits(ent_count);
1576a49c010eSJason A. Donenfeld 		return 0;
15771da177e4SLinus Torvalds 	case RNDZAPENTCNT:
15781da177e4SLinus Torvalds 	case RNDCLEARPOOL:
1579ae9ecd92STheodore Ts'o 		/*
1580ae9ecd92STheodore Ts'o 		 * Clear the entropy pool counters. We no longer clear
1581ae9ecd92STheodore Ts'o 		 * the entropy pool, as that's silly.
1582ae9ecd92STheodore Ts'o 		 */
15831da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15841da177e4SLinus Torvalds 			return -EPERM;
1585a3f9e891SJason A. Donenfeld 		if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) {
1586042e293eSJason A. Donenfeld 			wake_up_interruptible(&random_write_wait);
1587042e293eSJason A. Donenfeld 			kill_fasync(&fasync, SIGIO, POLL_OUT);
1588042e293eSJason A. Donenfeld 		}
15891da177e4SLinus Torvalds 		return 0;
1590d848e5f8STheodore Ts'o 	case RNDRESEEDCRNG:
1591d848e5f8STheodore Ts'o 		if (!capable(CAP_SYS_ADMIN))
1592d848e5f8STheodore Ts'o 			return -EPERM;
1593d848e5f8STheodore Ts'o 		if (crng_init < 2)
1594d848e5f8STheodore Ts'o 			return -ENODATA;
1595a9412d51SJason A. Donenfeld 		crng_reseed();
1596d848e5f8STheodore Ts'o 		return 0;
15971da177e4SLinus Torvalds 	default:
15981da177e4SLinus Torvalds 		return -EINVAL;
15991da177e4SLinus Torvalds 	}
16001da177e4SLinus Torvalds }
16011da177e4SLinus Torvalds 
16029a6f70bbSJeff Dike static int random_fasync(int fd, struct file *filp, int on)
16039a6f70bbSJeff Dike {
16049a6f70bbSJeff Dike 	return fasync_helper(fd, filp, on, &fasync);
16059a6f70bbSJeff Dike }
16069a6f70bbSJeff Dike 
16072b8693c0SArjan van de Ven const struct file_operations random_fops = {
16081da177e4SLinus Torvalds 	.read = random_read,
16091da177e4SLinus Torvalds 	.write = random_write,
1610a11e1d43SLinus Torvalds 	.poll = random_poll,
161143ae4860SMatt Mackall 	.unlocked_ioctl = random_ioctl,
1612507e4e2bSArnd Bergmann 	.compat_ioctl = compat_ptr_ioctl,
16139a6f70bbSJeff Dike 	.fasync = random_fasync,
16146038f373SArnd Bergmann 	.llseek = noop_llseek,
16151da177e4SLinus Torvalds };
16161da177e4SLinus Torvalds 
16172b8693c0SArjan van de Ven const struct file_operations urandom_fops = {
16181da177e4SLinus Torvalds 	.read = urandom_read,
16191da177e4SLinus Torvalds 	.write = random_write,
162043ae4860SMatt Mackall 	.unlocked_ioctl = random_ioctl,
16214aa37c46SJason A. Donenfeld 	.compat_ioctl = compat_ptr_ioctl,
16229a6f70bbSJeff Dike 	.fasync = random_fasync,
16236038f373SArnd Bergmann 	.llseek = noop_llseek,
16241da177e4SLinus Torvalds };
16251da177e4SLinus Torvalds 
16260deff3c4SJason A. Donenfeld 
16271da177e4SLinus Torvalds /********************************************************************
16281da177e4SLinus Torvalds  *
16290deff3c4SJason A. Donenfeld  * Sysctl interface.
16300deff3c4SJason A. Donenfeld  *
16310deff3c4SJason A. Donenfeld  * These are partly unused legacy knobs with dummy values to not break
16320deff3c4SJason A. Donenfeld  * userspace and partly still useful things. They are usually accessible
16330deff3c4SJason A. Donenfeld  * in /proc/sys/kernel/random/ and are as follows:
16340deff3c4SJason A. Donenfeld  *
16350deff3c4SJason A. Donenfeld  * - boot_id - a UUID representing the current boot.
16360deff3c4SJason A. Donenfeld  *
16370deff3c4SJason A. Donenfeld  * - uuid - a random UUID, different each time the file is read.
16380deff3c4SJason A. Donenfeld  *
16390deff3c4SJason A. Donenfeld  * - poolsize - the number of bits of entropy that the input pool can
16400deff3c4SJason A. Donenfeld  *   hold, tied to the POOL_BITS constant.
16410deff3c4SJason A. Donenfeld  *
16420deff3c4SJason A. Donenfeld  * - entropy_avail - the number of bits of entropy currently in the
16430deff3c4SJason A. Donenfeld  *   input pool. Always <= poolsize.
16440deff3c4SJason A. Donenfeld  *
16450deff3c4SJason A. Donenfeld  * - write_wakeup_threshold - the amount of entropy in the input pool
16460deff3c4SJason A. Donenfeld  *   below which write polls to /dev/random will unblock, requesting
16470deff3c4SJason A. Donenfeld  *   more entropy, tied to the POOL_MIN_BITS constant. It is writable
16480deff3c4SJason A. Donenfeld  *   to avoid breaking old userspaces, but writing to it does not
16490deff3c4SJason A. Donenfeld  *   change any behavior of the RNG.
16500deff3c4SJason A. Donenfeld  *
16510deff3c4SJason A. Donenfeld  * - urandom_min_reseed_secs - fixed to the meaningless value "60".
16520deff3c4SJason A. Donenfeld  *   It is writable to avoid breaking old userspaces, but writing
16530deff3c4SJason A. Donenfeld  *   to it does not change any behavior of the RNG.
16541da177e4SLinus Torvalds  *
16551da177e4SLinus Torvalds  ********************************************************************/
16561da177e4SLinus Torvalds 
16571da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL
16581da177e4SLinus Torvalds 
16591da177e4SLinus Torvalds #include <linux/sysctl.h>
16601da177e4SLinus Torvalds 
16610deff3c4SJason A. Donenfeld static int sysctl_random_min_urandom_seed = 60;
16620deff3c4SJason A. Donenfeld static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
1663489c7fc4SJason A. Donenfeld static int sysctl_poolsize = POOL_BITS;
1664*64276a99SJason A. Donenfeld static u8 sysctl_bootid[UUID_SIZE];
16651da177e4SLinus Torvalds 
16661da177e4SLinus Torvalds /*
1667f22052b2SGreg Price  * This function is used to return both the bootid UUID, and random
16681da177e4SLinus Torvalds  * UUID. The difference is in whether table->data is NULL; if it is,
16691da177e4SLinus Torvalds  * then a new UUID is generated and returned to the user.
16701da177e4SLinus Torvalds  */
1671248045b8SJason A. Donenfeld static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1672248045b8SJason A. Donenfeld 			size_t *lenp, loff_t *ppos)
16731da177e4SLinus Torvalds {
1674*64276a99SJason A. Donenfeld 	u8 tmp_uuid[UUID_SIZE], *uuid;
1675*64276a99SJason A. Donenfeld 	char uuid_string[UUID_STRING_LEN + 1];
1676*64276a99SJason A. Donenfeld 	struct ctl_table fake_table = {
1677*64276a99SJason A. Donenfeld 		.data = uuid_string,
1678*64276a99SJason A. Donenfeld 		.maxlen = UUID_STRING_LEN
1679*64276a99SJason A. Donenfeld 	};
1680*64276a99SJason A. Donenfeld 
1681*64276a99SJason A. Donenfeld 	if (write)
1682*64276a99SJason A. Donenfeld 		return -EPERM;
16831da177e4SLinus Torvalds 
16841da177e4SLinus Torvalds 	uuid = table->data;
16851da177e4SLinus Torvalds 	if (!uuid) {
16861da177e4SLinus Torvalds 		uuid = tmp_uuid;
16871da177e4SLinus Torvalds 		generate_random_uuid(uuid);
168844e4360fSMathieu Desnoyers 	} else {
168944e4360fSMathieu Desnoyers 		static DEFINE_SPINLOCK(bootid_spinlock);
169044e4360fSMathieu Desnoyers 
169144e4360fSMathieu Desnoyers 		spin_lock(&bootid_spinlock);
169244e4360fSMathieu Desnoyers 		if (!uuid[8])
169344e4360fSMathieu Desnoyers 			generate_random_uuid(uuid);
169444e4360fSMathieu Desnoyers 		spin_unlock(&bootid_spinlock);
169544e4360fSMathieu Desnoyers 	}
16961da177e4SLinus Torvalds 
1697*64276a99SJason A. Donenfeld 	snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1698*64276a99SJason A. Donenfeld 	return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
16991da177e4SLinus Torvalds }
17001da177e4SLinus Torvalds 
17015475e8f0SXiaoming Ni static struct ctl_table random_table[] = {
17021da177e4SLinus Torvalds 	{
17031da177e4SLinus Torvalds 		.procname	= "poolsize",
17041da177e4SLinus Torvalds 		.data		= &sysctl_poolsize,
17051da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
17061da177e4SLinus Torvalds 		.mode		= 0444,
17076d456111SEric W. Biederman 		.proc_handler	= proc_dointvec,
17081da177e4SLinus Torvalds 	},
17091da177e4SLinus Torvalds 	{
17101da177e4SLinus Torvalds 		.procname	= "entropy_avail",
1711c5704490SJason A. Donenfeld 		.data		= &input_pool.entropy_count,
17121da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
17131da177e4SLinus Torvalds 		.mode		= 0444,
1714c5704490SJason A. Donenfeld 		.proc_handler	= proc_dointvec,
17151da177e4SLinus Torvalds 	},
17161da177e4SLinus Torvalds 	{
17171da177e4SLinus Torvalds 		.procname	= "write_wakeup_threshold",
17180deff3c4SJason A. Donenfeld 		.data		= &sysctl_random_write_wakeup_bits,
17191da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
17201da177e4SLinus Torvalds 		.mode		= 0644,
1721489c7fc4SJason A. Donenfeld 		.proc_handler	= proc_dointvec,
17221da177e4SLinus Torvalds 	},
17231da177e4SLinus Torvalds 	{
1724f5c2742cSTheodore Ts'o 		.procname	= "urandom_min_reseed_secs",
17250deff3c4SJason A. Donenfeld 		.data		= &sysctl_random_min_urandom_seed,
1726f5c2742cSTheodore Ts'o 		.maxlen		= sizeof(int),
1727f5c2742cSTheodore Ts'o 		.mode		= 0644,
1728f5c2742cSTheodore Ts'o 		.proc_handler	= proc_dointvec,
1729f5c2742cSTheodore Ts'o 	},
1730f5c2742cSTheodore Ts'o 	{
17311da177e4SLinus Torvalds 		.procname	= "boot_id",
17321da177e4SLinus Torvalds 		.data		= &sysctl_bootid,
17331da177e4SLinus Torvalds 		.mode		= 0444,
17346d456111SEric W. Biederman 		.proc_handler	= proc_do_uuid,
17351da177e4SLinus Torvalds 	},
17361da177e4SLinus Torvalds 	{
17371da177e4SLinus Torvalds 		.procname	= "uuid",
17381da177e4SLinus Torvalds 		.mode		= 0444,
17396d456111SEric W. Biederman 		.proc_handler	= proc_do_uuid,
17401da177e4SLinus Torvalds 	},
1741894d2491SEric W. Biederman 	{ }
17421da177e4SLinus Torvalds };
17435475e8f0SXiaoming Ni 
17445475e8f0SXiaoming Ni /*
17455475e8f0SXiaoming Ni  * rand_initialize() is called before sysctl_init(),
17465475e8f0SXiaoming Ni  * so we cannot call register_sysctl_init() in rand_initialize()
17475475e8f0SXiaoming Ni  */
17485475e8f0SXiaoming Ni static int __init random_sysctls_init(void)
17495475e8f0SXiaoming Ni {
17505475e8f0SXiaoming Ni 	register_sysctl_init("kernel/random", random_table);
17515475e8f0SXiaoming Ni 	return 0;
17525475e8f0SXiaoming Ni }
17535475e8f0SXiaoming Ni device_initcall(random_sysctls_init);
17540deff3c4SJason A. Donenfeld #endif
1755