xref: /linux/drivers/char/random.c (revision da792c6d5f59a76c10a310c5d4c93428fd18f996)
1a07fdae3SJason A. Donenfeld // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
21da177e4SLinus Torvalds /*
39f9eff85SJason A. Donenfeld  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
49e95ce27SMatt Mackall  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
55f75d9f3SJason A. Donenfeld  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
61da177e4SLinus Torvalds  *
75f75d9f3SJason A. Donenfeld  * This driver produces cryptographically secure pseudorandom data. It is divided
85f75d9f3SJason A. Donenfeld  * into roughly six sections, each with a section header:
91da177e4SLinus Torvalds  *
105f75d9f3SJason A. Donenfeld  *   - Initialization and readiness waiting.
115f75d9f3SJason A. Donenfeld  *   - Fast key erasure RNG, the "crng".
125f75d9f3SJason A. Donenfeld  *   - Entropy accumulation and extraction routines.
135f75d9f3SJason A. Donenfeld  *   - Entropy collection routines.
145f75d9f3SJason A. Donenfeld  *   - Userspace reader/writer interfaces.
155f75d9f3SJason A. Donenfeld  *   - Sysctl interface.
161da177e4SLinus Torvalds  *
175f75d9f3SJason A. Donenfeld  * The high level overview is that there is one input pool, into which
185f75d9f3SJason A. Donenfeld  * various pieces of data are hashed. Some of that data is then "credited" as
195f75d9f3SJason A. Donenfeld  * having a certain number of bits of entropy. When enough bits of entropy are
205f75d9f3SJason A. Donenfeld  * available, the hash is finalized and handed as a key to a stream cipher that
215f75d9f3SJason A. Donenfeld  * expands it indefinitely for various consumers. This key is periodically
225f75d9f3SJason A. Donenfeld  * refreshed as the various entropy collectors, described below, add data to the
235f75d9f3SJason A. Donenfeld  * input pool and credit it. There is currently no Fortuna-like scheduler
245f75d9f3SJason A. Donenfeld  * involved, which can lead to malicious entropy sources causing a premature
255f75d9f3SJason A. Donenfeld  * reseed, and the entropy estimates are, at best, conservative guesses.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
2812cd53afSYangtao Li #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2912cd53afSYangtao Li 
301da177e4SLinus Torvalds #include <linux/utsname.h>
311da177e4SLinus Torvalds #include <linux/module.h>
321da177e4SLinus Torvalds #include <linux/kernel.h>
331da177e4SLinus Torvalds #include <linux/major.h>
341da177e4SLinus Torvalds #include <linux/string.h>
351da177e4SLinus Torvalds #include <linux/fcntl.h>
361da177e4SLinus Torvalds #include <linux/slab.h>
371da177e4SLinus Torvalds #include <linux/random.h>
381da177e4SLinus Torvalds #include <linux/poll.h>
391da177e4SLinus Torvalds #include <linux/init.h>
401da177e4SLinus Torvalds #include <linux/fs.h>
411da177e4SLinus Torvalds #include <linux/genhd.h>
421da177e4SLinus Torvalds #include <linux/interrupt.h>
4327ac792cSAndrea Righi #include <linux/mm.h>
44dd0f0cf5SMichael Ellerman #include <linux/nodemask.h>
451da177e4SLinus Torvalds #include <linux/spinlock.h>
46c84dbf61STorsten Duwe #include <linux/kthread.h>
471da177e4SLinus Torvalds #include <linux/percpu.h>
48775f4b29STheodore Ts'o #include <linux/ptrace.h>
496265e169STheodore Ts'o #include <linux/workqueue.h>
50d178a1ebSYinghai Lu #include <linux/irq.h>
514e00b339STheodore Ts'o #include <linux/ratelimit.h>
52c6e9d6f3STheodore Ts'o #include <linux/syscalls.h>
53c6e9d6f3STheodore Ts'o #include <linux/completion.h>
548da4b8c4SAndy Shevchenko #include <linux/uuid.h>
5587e7d5abSJason A. Donenfeld #include <linux/uaccess.h>
561ca1b917SEric Biggers #include <crypto/chacha.h>
579f9eff85SJason A. Donenfeld #include <crypto/blake2s.h>
581da177e4SLinus Torvalds #include <asm/processor.h>
591da177e4SLinus Torvalds #include <asm/irq.h>
60775f4b29STheodore Ts'o #include <asm/irq_regs.h>
611da177e4SLinus Torvalds #include <asm/io.h>
621da177e4SLinus Torvalds 
635f1bb112SJason A. Donenfeld /*********************************************************************
645f1bb112SJason A. Donenfeld  *
655f1bb112SJason A. Donenfeld  * Initialization and readiness waiting.
665f1bb112SJason A. Donenfeld  *
675f1bb112SJason A. Donenfeld  * Much of the RNG infrastructure is devoted to various dependencies
685f1bb112SJason A. Donenfeld  * being able to wait until the RNG has collected enough entropy and
695f1bb112SJason A. Donenfeld  * is ready for safe consumption.
705f1bb112SJason A. Donenfeld  *
715f1bb112SJason A. Donenfeld  *********************************************************************/
725f1bb112SJason A. Donenfeld 
735f1bb112SJason A. Donenfeld /*
745f1bb112SJason A. Donenfeld  * crng_init =  0 --> Uninitialized
755f1bb112SJason A. Donenfeld  *		1 --> Initialized
765f1bb112SJason A. Donenfeld  *		2 --> Initialized from input_pool
775f1bb112SJason A. Donenfeld  *
785f1bb112SJason A. Donenfeld  * crng_init is protected by base_crng->lock, and only increases
795f1bb112SJason A. Donenfeld  * its value (from 0->1->2).
805f1bb112SJason A. Donenfeld  */
815f1bb112SJason A. Donenfeld static int crng_init = 0;
825f1bb112SJason A. Donenfeld #define crng_ready() (likely(crng_init > 1))
835f1bb112SJason A. Donenfeld /* Various types of waiters for crng_init->2 transition. */
845f1bb112SJason A. Donenfeld static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
855f1bb112SJason A. Donenfeld static struct fasync_struct *fasync;
865f1bb112SJason A. Donenfeld static DEFINE_SPINLOCK(random_ready_list_lock);
875f1bb112SJason A. Donenfeld static LIST_HEAD(random_ready_list);
885f1bb112SJason A. Donenfeld 
895f1bb112SJason A. Donenfeld /* Control how we warn userspace. */
905f1bb112SJason A. Donenfeld static struct ratelimit_state unseeded_warning =
915f1bb112SJason A. Donenfeld 	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
925f1bb112SJason A. Donenfeld static struct ratelimit_state urandom_warning =
935f1bb112SJason A. Donenfeld 	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
945f1bb112SJason A. Donenfeld static int ratelimit_disable __read_mostly;
955f1bb112SJason A. Donenfeld module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
965f1bb112SJason A. Donenfeld MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
975f1bb112SJason A. Donenfeld 
985f1bb112SJason A. Donenfeld /*
995f1bb112SJason A. Donenfeld  * Returns whether or not the input pool has been seeded and thus guaranteed
1005f1bb112SJason A. Donenfeld  * to supply cryptographically secure random numbers. This applies to: the
1015f1bb112SJason A. Donenfeld  * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1025f1bb112SJason A. Donenfeld  * ,u64,int,long} family of functions.
1035f1bb112SJason A. Donenfeld  *
1045f1bb112SJason A. Donenfeld  * Returns: true if the input pool has been seeded.
1055f1bb112SJason A. Donenfeld  *          false if the input pool has not been seeded.
1065f1bb112SJason A. Donenfeld  */
1075f1bb112SJason A. Donenfeld bool rng_is_initialized(void)
1085f1bb112SJason A. Donenfeld {
1095f1bb112SJason A. Donenfeld 	return crng_ready();
1105f1bb112SJason A. Donenfeld }
1115f1bb112SJason A. Donenfeld EXPORT_SYMBOL(rng_is_initialized);
1125f1bb112SJason A. Donenfeld 
1135f1bb112SJason A. Donenfeld /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
1145f1bb112SJason A. Donenfeld static void try_to_generate_entropy(void);
1155f1bb112SJason A. Donenfeld 
1165f1bb112SJason A. Donenfeld /*
1175f1bb112SJason A. Donenfeld  * Wait for the input pool to be seeded and thus guaranteed to supply
1185f1bb112SJason A. Donenfeld  * cryptographically secure random numbers. This applies to: the /dev/urandom
1195f1bb112SJason A. Donenfeld  * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1205f1bb112SJason A. Donenfeld  * family of functions. Using any of these functions without first calling
1215f1bb112SJason A. Donenfeld  * this function forfeits the guarantee of security.
1225f1bb112SJason A. Donenfeld  *
1235f1bb112SJason A. Donenfeld  * Returns: 0 if the input pool has been seeded.
1245f1bb112SJason A. Donenfeld  *          -ERESTARTSYS if the function was interrupted by a signal.
1255f1bb112SJason A. Donenfeld  */
1265f1bb112SJason A. Donenfeld int wait_for_random_bytes(void)
1275f1bb112SJason A. Donenfeld {
1285f1bb112SJason A. Donenfeld 	if (likely(crng_ready()))
1295f1bb112SJason A. Donenfeld 		return 0;
1305f1bb112SJason A. Donenfeld 
1315f1bb112SJason A. Donenfeld 	do {
1325f1bb112SJason A. Donenfeld 		int ret;
1335f1bb112SJason A. Donenfeld 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1345f1bb112SJason A. Donenfeld 		if (ret)
1355f1bb112SJason A. Donenfeld 			return ret > 0 ? 0 : ret;
1365f1bb112SJason A. Donenfeld 
1375f1bb112SJason A. Donenfeld 		try_to_generate_entropy();
1385f1bb112SJason A. Donenfeld 	} while (!crng_ready());
1395f1bb112SJason A. Donenfeld 
1405f1bb112SJason A. Donenfeld 	return 0;
1415f1bb112SJason A. Donenfeld }
1425f1bb112SJason A. Donenfeld EXPORT_SYMBOL(wait_for_random_bytes);
1435f1bb112SJason A. Donenfeld 
1445f1bb112SJason A. Donenfeld /*
1455f1bb112SJason A. Donenfeld  * Add a callback function that will be invoked when the input
1465f1bb112SJason A. Donenfeld  * pool is initialised.
1475f1bb112SJason A. Donenfeld  *
1485f1bb112SJason A. Donenfeld  * returns: 0 if callback is successfully added
1495f1bb112SJason A. Donenfeld  *	    -EALREADY if pool is already initialised (callback not called)
1505f1bb112SJason A. Donenfeld  *	    -ENOENT if module for callback is not alive
1515f1bb112SJason A. Donenfeld  */
1525f1bb112SJason A. Donenfeld int add_random_ready_callback(struct random_ready_callback *rdy)
1535f1bb112SJason A. Donenfeld {
1545f1bb112SJason A. Donenfeld 	struct module *owner;
1555f1bb112SJason A. Donenfeld 	unsigned long flags;
1565f1bb112SJason A. Donenfeld 	int err = -EALREADY;
1575f1bb112SJason A. Donenfeld 
1585f1bb112SJason A. Donenfeld 	if (crng_ready())
1595f1bb112SJason A. Donenfeld 		return err;
1605f1bb112SJason A. Donenfeld 
1615f1bb112SJason A. Donenfeld 	owner = rdy->owner;
1625f1bb112SJason A. Donenfeld 	if (!try_module_get(owner))
1635f1bb112SJason A. Donenfeld 		return -ENOENT;
1645f1bb112SJason A. Donenfeld 
1655f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
1665f1bb112SJason A. Donenfeld 	if (crng_ready())
1675f1bb112SJason A. Donenfeld 		goto out;
1685f1bb112SJason A. Donenfeld 
1695f1bb112SJason A. Donenfeld 	owner = NULL;
1705f1bb112SJason A. Donenfeld 
1715f1bb112SJason A. Donenfeld 	list_add(&rdy->list, &random_ready_list);
1725f1bb112SJason A. Donenfeld 	err = 0;
1735f1bb112SJason A. Donenfeld 
1745f1bb112SJason A. Donenfeld out:
1755f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1765f1bb112SJason A. Donenfeld 
1775f1bb112SJason A. Donenfeld 	module_put(owner);
1785f1bb112SJason A. Donenfeld 
1795f1bb112SJason A. Donenfeld 	return err;
1805f1bb112SJason A. Donenfeld }
1815f1bb112SJason A. Donenfeld EXPORT_SYMBOL(add_random_ready_callback);
1825f1bb112SJason A. Donenfeld 
1835f1bb112SJason A. Donenfeld /*
1845f1bb112SJason A. Donenfeld  * Delete a previously registered readiness callback function.
1855f1bb112SJason A. Donenfeld  */
1865f1bb112SJason A. Donenfeld void del_random_ready_callback(struct random_ready_callback *rdy)
1875f1bb112SJason A. Donenfeld {
1885f1bb112SJason A. Donenfeld 	unsigned long flags;
1895f1bb112SJason A. Donenfeld 	struct module *owner = NULL;
1905f1bb112SJason A. Donenfeld 
1915f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
1925f1bb112SJason A. Donenfeld 	if (!list_empty(&rdy->list)) {
1935f1bb112SJason A. Donenfeld 		list_del_init(&rdy->list);
1945f1bb112SJason A. Donenfeld 		owner = rdy->owner;
1955f1bb112SJason A. Donenfeld 	}
1965f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1975f1bb112SJason A. Donenfeld 
1985f1bb112SJason A. Donenfeld 	module_put(owner);
1995f1bb112SJason A. Donenfeld }
2005f1bb112SJason A. Donenfeld EXPORT_SYMBOL(del_random_ready_callback);
2015f1bb112SJason A. Donenfeld 
2025f1bb112SJason A. Donenfeld static void process_random_ready_list(void)
2035f1bb112SJason A. Donenfeld {
2045f1bb112SJason A. Donenfeld 	unsigned long flags;
2055f1bb112SJason A. Donenfeld 	struct random_ready_callback *rdy, *tmp;
2065f1bb112SJason A. Donenfeld 
2075f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
2085f1bb112SJason A. Donenfeld 	list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
2095f1bb112SJason A. Donenfeld 		struct module *owner = rdy->owner;
2105f1bb112SJason A. Donenfeld 
2115f1bb112SJason A. Donenfeld 		list_del_init(&rdy->list);
2125f1bb112SJason A. Donenfeld 		rdy->func(rdy);
2135f1bb112SJason A. Donenfeld 		module_put(owner);
2145f1bb112SJason A. Donenfeld 	}
2155f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
2165f1bb112SJason A. Donenfeld }
2175f1bb112SJason A. Donenfeld 
2185f1bb112SJason A. Donenfeld #define warn_unseeded_randomness(previous) \
2195f1bb112SJason A. Donenfeld 	_warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
2205f1bb112SJason A. Donenfeld 
2215f1bb112SJason A. Donenfeld static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
2225f1bb112SJason A. Donenfeld {
2235f1bb112SJason A. Donenfeld #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
2245f1bb112SJason A. Donenfeld 	const bool print_once = false;
2255f1bb112SJason A. Donenfeld #else
2265f1bb112SJason A. Donenfeld 	static bool print_once __read_mostly;
2275f1bb112SJason A. Donenfeld #endif
2285f1bb112SJason A. Donenfeld 
2295f1bb112SJason A. Donenfeld 	if (print_once || crng_ready() ||
2305f1bb112SJason A. Donenfeld 	    (previous && (caller == READ_ONCE(*previous))))
2315f1bb112SJason A. Donenfeld 		return;
2325f1bb112SJason A. Donenfeld 	WRITE_ONCE(*previous, caller);
2335f1bb112SJason A. Donenfeld #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
2345f1bb112SJason A. Donenfeld 	print_once = true;
2355f1bb112SJason A. Donenfeld #endif
2365f1bb112SJason A. Donenfeld 	if (__ratelimit(&unseeded_warning))
2375f1bb112SJason A. Donenfeld 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
2385f1bb112SJason A. Donenfeld 				func_name, caller, crng_init);
2395f1bb112SJason A. Donenfeld }
2405f1bb112SJason A. Donenfeld 
2415f1bb112SJason A. Donenfeld 
2423655adc7SJason A. Donenfeld /*********************************************************************
2433655adc7SJason A. Donenfeld  *
2443655adc7SJason A. Donenfeld  * Fast key erasure RNG, the "crng".
2453655adc7SJason A. Donenfeld  *
2463655adc7SJason A. Donenfeld  * These functions expand entropy from the entropy extractor into
2473655adc7SJason A. Donenfeld  * long streams for external consumption using the "fast key erasure"
2483655adc7SJason A. Donenfeld  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
2493655adc7SJason A. Donenfeld  *
2503655adc7SJason A. Donenfeld  * There are a few exported interfaces for use by other drivers:
2513655adc7SJason A. Donenfeld  *
2523655adc7SJason A. Donenfeld  *	void get_random_bytes(void *buf, size_t nbytes)
2533655adc7SJason A. Donenfeld  *	u32 get_random_u32()
2543655adc7SJason A. Donenfeld  *	u64 get_random_u64()
2553655adc7SJason A. Donenfeld  *	unsigned int get_random_int()
2563655adc7SJason A. Donenfeld  *	unsigned long get_random_long()
2573655adc7SJason A. Donenfeld  *
2583655adc7SJason A. Donenfeld  * These interfaces will return the requested number of random bytes
2593655adc7SJason A. Donenfeld  * into the given buffer or as a return value. This is equivalent to
2603655adc7SJason A. Donenfeld  * a read from /dev/urandom. The integer family of functions may be
2613655adc7SJason A. Donenfeld  * higher performance for one-off random integers, because they do a
2623655adc7SJason A. Donenfeld  * bit of buffering.
2633655adc7SJason A. Donenfeld  *
2643655adc7SJason A. Donenfeld  *********************************************************************/
2653655adc7SJason A. Donenfeld 
2663655adc7SJason A. Donenfeld enum {
2673655adc7SJason A. Donenfeld 	CRNG_RESEED_INTERVAL = 300 * HZ,
2683655adc7SJason A. Donenfeld 	CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
2693655adc7SJason A. Donenfeld };
2703655adc7SJason A. Donenfeld 
2713655adc7SJason A. Donenfeld static struct {
2723655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
2733655adc7SJason A. Donenfeld 	unsigned long birth;
2743655adc7SJason A. Donenfeld 	unsigned long generation;
2753655adc7SJason A. Donenfeld 	spinlock_t lock;
2763655adc7SJason A. Donenfeld } base_crng = {
2773655adc7SJason A. Donenfeld 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
2783655adc7SJason A. Donenfeld };
2793655adc7SJason A. Donenfeld 
2803655adc7SJason A. Donenfeld struct crng {
2813655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE];
2823655adc7SJason A. Donenfeld 	unsigned long generation;
2833655adc7SJason A. Donenfeld 	local_lock_t lock;
2843655adc7SJason A. Donenfeld };
2853655adc7SJason A. Donenfeld 
2863655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct crng, crngs) = {
2873655adc7SJason A. Donenfeld 	.generation = ULONG_MAX,
2883655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(crngs.lock),
2893655adc7SJason A. Donenfeld };
2903655adc7SJason A. Donenfeld 
2913655adc7SJason A. Donenfeld /* Used by crng_reseed() to extract a new seed from the input pool. */
2923655adc7SJason A. Donenfeld static bool drain_entropy(void *buf, size_t nbytes);
2933655adc7SJason A. Donenfeld 
2943655adc7SJason A. Donenfeld /*
2953655adc7SJason A. Donenfeld  * This extracts a new crng key from the input pool, but only if there is a
2963655adc7SJason A. Donenfeld  * sufficient amount of entropy available, in order to mitigate bruteforcing
2973655adc7SJason A. Donenfeld  * of newly added bits.
2983655adc7SJason A. Donenfeld  */
2993655adc7SJason A. Donenfeld static void crng_reseed(void)
3003655adc7SJason A. Donenfeld {
3013655adc7SJason A. Donenfeld 	unsigned long flags;
3023655adc7SJason A. Donenfeld 	unsigned long next_gen;
3033655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE];
3043655adc7SJason A. Donenfeld 	bool finalize_init = false;
3053655adc7SJason A. Donenfeld 
3063655adc7SJason A. Donenfeld 	/* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
3073655adc7SJason A. Donenfeld 	if (!drain_entropy(key, sizeof(key)))
3083655adc7SJason A. Donenfeld 		return;
3093655adc7SJason A. Donenfeld 
3103655adc7SJason A. Donenfeld 	/*
3113655adc7SJason A. Donenfeld 	 * We copy the new key into the base_crng, overwriting the old one,
3123655adc7SJason A. Donenfeld 	 * and update the generation counter. We avoid hitting ULONG_MAX,
3133655adc7SJason A. Donenfeld 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
3143655adc7SJason A. Donenfeld 	 * forces new CPUs that come online to always initialize.
3153655adc7SJason A. Donenfeld 	 */
3163655adc7SJason A. Donenfeld 	spin_lock_irqsave(&base_crng.lock, flags);
3173655adc7SJason A. Donenfeld 	memcpy(base_crng.key, key, sizeof(base_crng.key));
3183655adc7SJason A. Donenfeld 	next_gen = base_crng.generation + 1;
3193655adc7SJason A. Donenfeld 	if (next_gen == ULONG_MAX)
3203655adc7SJason A. Donenfeld 		++next_gen;
3213655adc7SJason A. Donenfeld 	WRITE_ONCE(base_crng.generation, next_gen);
3223655adc7SJason A. Donenfeld 	WRITE_ONCE(base_crng.birth, jiffies);
3233655adc7SJason A. Donenfeld 	if (crng_init < 2) {
3243655adc7SJason A. Donenfeld 		crng_init = 2;
3253655adc7SJason A. Donenfeld 		finalize_init = true;
3263655adc7SJason A. Donenfeld 	}
3273655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
3283655adc7SJason A. Donenfeld 	memzero_explicit(key, sizeof(key));
3293655adc7SJason A. Donenfeld 	if (finalize_init) {
3303655adc7SJason A. Donenfeld 		process_random_ready_list();
3313655adc7SJason A. Donenfeld 		wake_up_interruptible(&crng_init_wait);
3323655adc7SJason A. Donenfeld 		kill_fasync(&fasync, SIGIO, POLL_IN);
3333655adc7SJason A. Donenfeld 		pr_notice("crng init done\n");
3343655adc7SJason A. Donenfeld 		if (unseeded_warning.missed) {
3353655adc7SJason A. Donenfeld 			pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
3363655adc7SJason A. Donenfeld 				  unseeded_warning.missed);
3373655adc7SJason A. Donenfeld 			unseeded_warning.missed = 0;
3383655adc7SJason A. Donenfeld 		}
3393655adc7SJason A. Donenfeld 		if (urandom_warning.missed) {
3403655adc7SJason A. Donenfeld 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
3413655adc7SJason A. Donenfeld 				  urandom_warning.missed);
3423655adc7SJason A. Donenfeld 			urandom_warning.missed = 0;
3433655adc7SJason A. Donenfeld 		}
3443655adc7SJason A. Donenfeld 	}
3453655adc7SJason A. Donenfeld }
3463655adc7SJason A. Donenfeld 
3473655adc7SJason A. Donenfeld /*
3483655adc7SJason A. Donenfeld  * This generates a ChaCha block using the provided key, and then
3493655adc7SJason A. Donenfeld  * immediately overwites that key with half the block. It returns
3503655adc7SJason A. Donenfeld  * the resultant ChaCha state to the user, along with the second
3513655adc7SJason A. Donenfeld  * half of the block containing 32 bytes of random data that may
3523655adc7SJason A. Donenfeld  * be used; random_data_len may not be greater than 32.
3533655adc7SJason A. Donenfeld  */
3543655adc7SJason A. Donenfeld static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
3553655adc7SJason A. Donenfeld 				  u32 chacha_state[CHACHA_STATE_WORDS],
3563655adc7SJason A. Donenfeld 				  u8 *random_data, size_t random_data_len)
3573655adc7SJason A. Donenfeld {
3583655adc7SJason A. Donenfeld 	u8 first_block[CHACHA_BLOCK_SIZE];
3593655adc7SJason A. Donenfeld 
3603655adc7SJason A. Donenfeld 	BUG_ON(random_data_len > 32);
3613655adc7SJason A. Donenfeld 
3623655adc7SJason A. Donenfeld 	chacha_init_consts(chacha_state);
3633655adc7SJason A. Donenfeld 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
3643655adc7SJason A. Donenfeld 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
3653655adc7SJason A. Donenfeld 	chacha20_block(chacha_state, first_block);
3663655adc7SJason A. Donenfeld 
3673655adc7SJason A. Donenfeld 	memcpy(key, first_block, CHACHA_KEY_SIZE);
3683655adc7SJason A. Donenfeld 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
3693655adc7SJason A. Donenfeld 	memzero_explicit(first_block, sizeof(first_block));
3703655adc7SJason A. Donenfeld }
3713655adc7SJason A. Donenfeld 
3723655adc7SJason A. Donenfeld /*
3733655adc7SJason A. Donenfeld  * This function returns a ChaCha state that you may use for generating
3743655adc7SJason A. Donenfeld  * random data. It also returns up to 32 bytes on its own of random data
3753655adc7SJason A. Donenfeld  * that may be used; random_data_len may not be greater than 32.
3763655adc7SJason A. Donenfeld  */
3773655adc7SJason A. Donenfeld static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
3783655adc7SJason A. Donenfeld 			    u8 *random_data, size_t random_data_len)
3793655adc7SJason A. Donenfeld {
3803655adc7SJason A. Donenfeld 	unsigned long flags;
3813655adc7SJason A. Donenfeld 	struct crng *crng;
3823655adc7SJason A. Donenfeld 
3833655adc7SJason A. Donenfeld 	BUG_ON(random_data_len > 32);
3843655adc7SJason A. Donenfeld 
3853655adc7SJason A. Donenfeld 	/*
3863655adc7SJason A. Donenfeld 	 * For the fast path, we check whether we're ready, unlocked first, and
3873655adc7SJason A. Donenfeld 	 * then re-check once locked later. In the case where we're really not
3883655adc7SJason A. Donenfeld 	 * ready, we do fast key erasure with the base_crng directly, because
389*da792c6dSJason A. Donenfeld 	 * this is what crng_pre_init_inject() mutates during early init.
3903655adc7SJason A. Donenfeld 	 */
3913655adc7SJason A. Donenfeld 	if (unlikely(!crng_ready())) {
3923655adc7SJason A. Donenfeld 		bool ready;
3933655adc7SJason A. Donenfeld 
3943655adc7SJason A. Donenfeld 		spin_lock_irqsave(&base_crng.lock, flags);
3953655adc7SJason A. Donenfeld 		ready = crng_ready();
3963655adc7SJason A. Donenfeld 		if (!ready)
3973655adc7SJason A. Donenfeld 			crng_fast_key_erasure(base_crng.key, chacha_state,
3983655adc7SJason A. Donenfeld 					      random_data, random_data_len);
3993655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4003655adc7SJason A. Donenfeld 		if (!ready)
4013655adc7SJason A. Donenfeld 			return;
4023655adc7SJason A. Donenfeld 	}
4033655adc7SJason A. Donenfeld 
4043655adc7SJason A. Donenfeld 	/*
4053655adc7SJason A. Donenfeld 	 * If the base_crng is more than 5 minutes old, we reseed, which
4063655adc7SJason A. Donenfeld 	 * in turn bumps the generation counter that we check below.
4073655adc7SJason A. Donenfeld 	 */
4083655adc7SJason A. Donenfeld 	if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL)))
4093655adc7SJason A. Donenfeld 		crng_reseed();
4103655adc7SJason A. Donenfeld 
4113655adc7SJason A. Donenfeld 	local_lock_irqsave(&crngs.lock, flags);
4123655adc7SJason A. Donenfeld 	crng = raw_cpu_ptr(&crngs);
4133655adc7SJason A. Donenfeld 
4143655adc7SJason A. Donenfeld 	/*
4153655adc7SJason A. Donenfeld 	 * If our per-cpu crng is older than the base_crng, then it means
4163655adc7SJason A. Donenfeld 	 * somebody reseeded the base_crng. In that case, we do fast key
4173655adc7SJason A. Donenfeld 	 * erasure on the base_crng, and use its output as the new key
4183655adc7SJason A. Donenfeld 	 * for our per-cpu crng. This brings us up to date with base_crng.
4193655adc7SJason A. Donenfeld 	 */
4203655adc7SJason A. Donenfeld 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
4213655adc7SJason A. Donenfeld 		spin_lock(&base_crng.lock);
4223655adc7SJason A. Donenfeld 		crng_fast_key_erasure(base_crng.key, chacha_state,
4233655adc7SJason A. Donenfeld 				      crng->key, sizeof(crng->key));
4243655adc7SJason A. Donenfeld 		crng->generation = base_crng.generation;
4253655adc7SJason A. Donenfeld 		spin_unlock(&base_crng.lock);
4263655adc7SJason A. Donenfeld 	}
4273655adc7SJason A. Donenfeld 
4283655adc7SJason A. Donenfeld 	/*
4293655adc7SJason A. Donenfeld 	 * Finally, when we've made it this far, our per-cpu crng has an up
4303655adc7SJason A. Donenfeld 	 * to date key, and we can do fast key erasure with it to produce
4313655adc7SJason A. Donenfeld 	 * some random data and a ChaCha state for the caller. All other
4323655adc7SJason A. Donenfeld 	 * branches of this function are "unlikely", so most of the time we
4333655adc7SJason A. Donenfeld 	 * should wind up here immediately.
4343655adc7SJason A. Donenfeld 	 */
4353655adc7SJason A. Donenfeld 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
4363655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&crngs.lock, flags);
4373655adc7SJason A. Donenfeld }
4383655adc7SJason A. Donenfeld 
4393655adc7SJason A. Donenfeld /*
440*da792c6dSJason A. Donenfeld  * This function is for crng_init == 0 only. It loads entropy directly
441*da792c6dSJason A. Donenfeld  * into the crng's key, without going through the input pool. It is,
442*da792c6dSJason A. Donenfeld  * generally speaking, not very safe, but we use this only at early
443*da792c6dSJason A. Donenfeld  * boot time when it's better to have something there rather than
444*da792c6dSJason A. Donenfeld  * nothing.
4453655adc7SJason A. Donenfeld  *
446*da792c6dSJason A. Donenfeld  * There are two paths, a slow one and a fast one. The slow one
447*da792c6dSJason A. Donenfeld  * hashes the input along with the current key. The fast one simply
448*da792c6dSJason A. Donenfeld  * xors it in, and should only be used from interrupt context.
449*da792c6dSJason A. Donenfeld  *
450*da792c6dSJason A. Donenfeld  * If account is set, then the crng_init_cnt counter is incremented.
451*da792c6dSJason A. Donenfeld  * This shouldn't be set by functions like add_device_randomness(),
452*da792c6dSJason A. Donenfeld  * where we can't trust the buffer passed to it is guaranteed to be
453*da792c6dSJason A. Donenfeld  * unpredictable (so it might not have any entropy at all).
454*da792c6dSJason A. Donenfeld  *
455*da792c6dSJason A. Donenfeld  * Returns the number of bytes processed from input, which is bounded
456*da792c6dSJason A. Donenfeld  * by CRNG_INIT_CNT_THRESH if account is true.
4573655adc7SJason A. Donenfeld  */
458*da792c6dSJason A. Donenfeld static size_t crng_pre_init_inject(const void *input, size_t len,
459*da792c6dSJason A. Donenfeld 				   bool fast, bool account)
4603655adc7SJason A. Donenfeld {
4613655adc7SJason A. Donenfeld 	static int crng_init_cnt = 0;
4623655adc7SJason A. Donenfeld 	unsigned long flags;
4633655adc7SJason A. Donenfeld 
464*da792c6dSJason A. Donenfeld 	if (fast) {
4653655adc7SJason A. Donenfeld 		if (!spin_trylock_irqsave(&base_crng.lock, flags))
4663655adc7SJason A. Donenfeld 			return 0;
467*da792c6dSJason A. Donenfeld 	} else {
468*da792c6dSJason A. Donenfeld 		spin_lock_irqsave(&base_crng.lock, flags);
469*da792c6dSJason A. Donenfeld 	}
470*da792c6dSJason A. Donenfeld 
4713655adc7SJason A. Donenfeld 	if (crng_init != 0) {
4723655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4733655adc7SJason A. Donenfeld 		return 0;
4743655adc7SJason A. Donenfeld 	}
475*da792c6dSJason A. Donenfeld 
476*da792c6dSJason A. Donenfeld 	if (account)
477*da792c6dSJason A. Donenfeld 		len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
478*da792c6dSJason A. Donenfeld 
479*da792c6dSJason A. Donenfeld 	if (fast) {
480*da792c6dSJason A. Donenfeld 		const u8 *src = input;
481*da792c6dSJason A. Donenfeld 		size_t i;
482*da792c6dSJason A. Donenfeld 
483*da792c6dSJason A. Donenfeld 		for (i = 0; i < len; ++i)
484*da792c6dSJason A. Donenfeld 			base_crng.key[(crng_init_cnt + i) %
485*da792c6dSJason A. Donenfeld 				      sizeof(base_crng.key)] ^= src[i];
486*da792c6dSJason A. Donenfeld 	} else {
487*da792c6dSJason A. Donenfeld 		struct blake2s_state hash;
488*da792c6dSJason A. Donenfeld 
489*da792c6dSJason A. Donenfeld 		blake2s_init(&hash, sizeof(base_crng.key));
490*da792c6dSJason A. Donenfeld 		blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
491*da792c6dSJason A. Donenfeld 		blake2s_update(&hash, input, len);
492*da792c6dSJason A. Donenfeld 		blake2s_final(&hash, base_crng.key);
4933655adc7SJason A. Donenfeld 	}
494*da792c6dSJason A. Donenfeld 
495*da792c6dSJason A. Donenfeld 	if (account) {
496*da792c6dSJason A. Donenfeld 		crng_init_cnt += len;
4973655adc7SJason A. Donenfeld 		if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
4983655adc7SJason A. Donenfeld 			++base_crng.generation;
4993655adc7SJason A. Donenfeld 			crng_init = 1;
5003655adc7SJason A. Donenfeld 		}
501*da792c6dSJason A. Donenfeld 	}
502*da792c6dSJason A. Donenfeld 
5033655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
504*da792c6dSJason A. Donenfeld 
5053655adc7SJason A. Donenfeld 	if (crng_init == 1)
5063655adc7SJason A. Donenfeld 		pr_notice("fast init done\n");
5073655adc7SJason A. Donenfeld 
508*da792c6dSJason A. Donenfeld 	return len;
5093655adc7SJason A. Donenfeld }
5103655adc7SJason A. Donenfeld 
5113655adc7SJason A. Donenfeld static void _get_random_bytes(void *buf, size_t nbytes)
5123655adc7SJason A. Donenfeld {
5133655adc7SJason A. Donenfeld 	u32 chacha_state[CHACHA_STATE_WORDS];
5143655adc7SJason A. Donenfeld 	u8 tmp[CHACHA_BLOCK_SIZE];
5153655adc7SJason A. Donenfeld 	size_t len;
5163655adc7SJason A. Donenfeld 
5173655adc7SJason A. Donenfeld 	if (!nbytes)
5183655adc7SJason A. Donenfeld 		return;
5193655adc7SJason A. Donenfeld 
5203655adc7SJason A. Donenfeld 	len = min_t(size_t, 32, nbytes);
5213655adc7SJason A. Donenfeld 	crng_make_state(chacha_state, buf, len);
5223655adc7SJason A. Donenfeld 	nbytes -= len;
5233655adc7SJason A. Donenfeld 	buf += len;
5243655adc7SJason A. Donenfeld 
5253655adc7SJason A. Donenfeld 	while (nbytes) {
5263655adc7SJason A. Donenfeld 		if (nbytes < CHACHA_BLOCK_SIZE) {
5273655adc7SJason A. Donenfeld 			chacha20_block(chacha_state, tmp);
5283655adc7SJason A. Donenfeld 			memcpy(buf, tmp, nbytes);
5293655adc7SJason A. Donenfeld 			memzero_explicit(tmp, sizeof(tmp));
5303655adc7SJason A. Donenfeld 			break;
5313655adc7SJason A. Donenfeld 		}
5323655adc7SJason A. Donenfeld 
5333655adc7SJason A. Donenfeld 		chacha20_block(chacha_state, buf);
5343655adc7SJason A. Donenfeld 		if (unlikely(chacha_state[12] == 0))
5353655adc7SJason A. Donenfeld 			++chacha_state[13];
5363655adc7SJason A. Donenfeld 		nbytes -= CHACHA_BLOCK_SIZE;
5373655adc7SJason A. Donenfeld 		buf += CHACHA_BLOCK_SIZE;
5383655adc7SJason A. Donenfeld 	}
5393655adc7SJason A. Donenfeld 
5403655adc7SJason A. Donenfeld 	memzero_explicit(chacha_state, sizeof(chacha_state));
5413655adc7SJason A. Donenfeld }
5423655adc7SJason A. Donenfeld 
5433655adc7SJason A. Donenfeld /*
5443655adc7SJason A. Donenfeld  * This function is the exported kernel interface.  It returns some
5453655adc7SJason A. Donenfeld  * number of good random numbers, suitable for key generation, seeding
5463655adc7SJason A. Donenfeld  * TCP sequence numbers, etc.  It does not rely on the hardware random
5473655adc7SJason A. Donenfeld  * number generator.  For random bytes direct from the hardware RNG
5483655adc7SJason A. Donenfeld  * (when available), use get_random_bytes_arch(). In order to ensure
5493655adc7SJason A. Donenfeld  * that the randomness provided by this function is okay, the function
5503655adc7SJason A. Donenfeld  * wait_for_random_bytes() should be called and return 0 at least once
5513655adc7SJason A. Donenfeld  * at any point prior.
5523655adc7SJason A. Donenfeld  */
5533655adc7SJason A. Donenfeld void get_random_bytes(void *buf, size_t nbytes)
5543655adc7SJason A. Donenfeld {
5553655adc7SJason A. Donenfeld 	static void *previous;
5563655adc7SJason A. Donenfeld 
5573655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
5583655adc7SJason A. Donenfeld 	_get_random_bytes(buf, nbytes);
5593655adc7SJason A. Donenfeld }
5603655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_bytes);
5613655adc7SJason A. Donenfeld 
5623655adc7SJason A. Donenfeld static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
5633655adc7SJason A. Donenfeld {
5643655adc7SJason A. Donenfeld 	bool large_request = nbytes > 256;
5653655adc7SJason A. Donenfeld 	ssize_t ret = 0;
5663655adc7SJason A. Donenfeld 	size_t len;
5673655adc7SJason A. Donenfeld 	u32 chacha_state[CHACHA_STATE_WORDS];
5683655adc7SJason A. Donenfeld 	u8 output[CHACHA_BLOCK_SIZE];
5693655adc7SJason A. Donenfeld 
5703655adc7SJason A. Donenfeld 	if (!nbytes)
5713655adc7SJason A. Donenfeld 		return 0;
5723655adc7SJason A. Donenfeld 
5733655adc7SJason A. Donenfeld 	len = min_t(size_t, 32, nbytes);
5743655adc7SJason A. Donenfeld 	crng_make_state(chacha_state, output, len);
5753655adc7SJason A. Donenfeld 
5763655adc7SJason A. Donenfeld 	if (copy_to_user(buf, output, len))
5773655adc7SJason A. Donenfeld 		return -EFAULT;
5783655adc7SJason A. Donenfeld 	nbytes -= len;
5793655adc7SJason A. Donenfeld 	buf += len;
5803655adc7SJason A. Donenfeld 	ret += len;
5813655adc7SJason A. Donenfeld 
5823655adc7SJason A. Donenfeld 	while (nbytes) {
5833655adc7SJason A. Donenfeld 		if (large_request && need_resched()) {
5843655adc7SJason A. Donenfeld 			if (signal_pending(current))
5853655adc7SJason A. Donenfeld 				break;
5863655adc7SJason A. Donenfeld 			schedule();
5873655adc7SJason A. Donenfeld 		}
5883655adc7SJason A. Donenfeld 
5893655adc7SJason A. Donenfeld 		chacha20_block(chacha_state, output);
5903655adc7SJason A. Donenfeld 		if (unlikely(chacha_state[12] == 0))
5913655adc7SJason A. Donenfeld 			++chacha_state[13];
5923655adc7SJason A. Donenfeld 
5933655adc7SJason A. Donenfeld 		len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
5943655adc7SJason A. Donenfeld 		if (copy_to_user(buf, output, len)) {
5953655adc7SJason A. Donenfeld 			ret = -EFAULT;
5963655adc7SJason A. Donenfeld 			break;
5973655adc7SJason A. Donenfeld 		}
5983655adc7SJason A. Donenfeld 
5993655adc7SJason A. Donenfeld 		nbytes -= len;
6003655adc7SJason A. Donenfeld 		buf += len;
6013655adc7SJason A. Donenfeld 		ret += len;
6023655adc7SJason A. Donenfeld 	}
6033655adc7SJason A. Donenfeld 
6043655adc7SJason A. Donenfeld 	memzero_explicit(chacha_state, sizeof(chacha_state));
6053655adc7SJason A. Donenfeld 	memzero_explicit(output, sizeof(output));
6063655adc7SJason A. Donenfeld 	return ret;
6073655adc7SJason A. Donenfeld }
6083655adc7SJason A. Donenfeld 
6093655adc7SJason A. Donenfeld /*
6103655adc7SJason A. Donenfeld  * Batched entropy returns random integers. The quality of the random
6113655adc7SJason A. Donenfeld  * number is good as /dev/urandom. In order to ensure that the randomness
6123655adc7SJason A. Donenfeld  * provided by this function is okay, the function wait_for_random_bytes()
6133655adc7SJason A. Donenfeld  * should be called and return 0 at least once at any point prior.
6143655adc7SJason A. Donenfeld  */
6153655adc7SJason A. Donenfeld struct batched_entropy {
6163655adc7SJason A. Donenfeld 	union {
6173655adc7SJason A. Donenfeld 		/*
6183655adc7SJason A. Donenfeld 		 * We make this 1.5x a ChaCha block, so that we get the
6193655adc7SJason A. Donenfeld 		 * remaining 32 bytes from fast key erasure, plus one full
6203655adc7SJason A. Donenfeld 		 * block from the detached ChaCha state. We can increase
6213655adc7SJason A. Donenfeld 		 * the size of this later if needed so long as we keep the
6223655adc7SJason A. Donenfeld 		 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
6233655adc7SJason A. Donenfeld 		 */
6243655adc7SJason A. Donenfeld 		u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
6253655adc7SJason A. Donenfeld 		u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
6263655adc7SJason A. Donenfeld 	};
6273655adc7SJason A. Donenfeld 	local_lock_t lock;
6283655adc7SJason A. Donenfeld 	unsigned long generation;
6293655adc7SJason A. Donenfeld 	unsigned int position;
6303655adc7SJason A. Donenfeld };
6313655adc7SJason A. Donenfeld 
6323655adc7SJason A. Donenfeld 
6333655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
6343655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
6353655adc7SJason A. Donenfeld 	.position = UINT_MAX
6363655adc7SJason A. Donenfeld };
6373655adc7SJason A. Donenfeld 
6383655adc7SJason A. Donenfeld u64 get_random_u64(void)
6393655adc7SJason A. Donenfeld {
6403655adc7SJason A. Donenfeld 	u64 ret;
6413655adc7SJason A. Donenfeld 	unsigned long flags;
6423655adc7SJason A. Donenfeld 	struct batched_entropy *batch;
6433655adc7SJason A. Donenfeld 	static void *previous;
6443655adc7SJason A. Donenfeld 	unsigned long next_gen;
6453655adc7SJason A. Donenfeld 
6463655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
6473655adc7SJason A. Donenfeld 
6483655adc7SJason A. Donenfeld 	local_lock_irqsave(&batched_entropy_u64.lock, flags);
6493655adc7SJason A. Donenfeld 	batch = raw_cpu_ptr(&batched_entropy_u64);
6503655adc7SJason A. Donenfeld 
6513655adc7SJason A. Donenfeld 	next_gen = READ_ONCE(base_crng.generation);
6523655adc7SJason A. Donenfeld 	if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
6533655adc7SJason A. Donenfeld 	    next_gen != batch->generation) {
6543655adc7SJason A. Donenfeld 		_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
6553655adc7SJason A. Donenfeld 		batch->position = 0;
6563655adc7SJason A. Donenfeld 		batch->generation = next_gen;
6573655adc7SJason A. Donenfeld 	}
6583655adc7SJason A. Donenfeld 
6593655adc7SJason A. Donenfeld 	ret = batch->entropy_u64[batch->position];
6603655adc7SJason A. Donenfeld 	batch->entropy_u64[batch->position] = 0;
6613655adc7SJason A. Donenfeld 	++batch->position;
6623655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
6633655adc7SJason A. Donenfeld 	return ret;
6643655adc7SJason A. Donenfeld }
6653655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_u64);
6663655adc7SJason A. Donenfeld 
6673655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
6683655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
6693655adc7SJason A. Donenfeld 	.position = UINT_MAX
6703655adc7SJason A. Donenfeld };
6713655adc7SJason A. Donenfeld 
6723655adc7SJason A. Donenfeld u32 get_random_u32(void)
6733655adc7SJason A. Donenfeld {
6743655adc7SJason A. Donenfeld 	u32 ret;
6753655adc7SJason A. Donenfeld 	unsigned long flags;
6763655adc7SJason A. Donenfeld 	struct batched_entropy *batch;
6773655adc7SJason A. Donenfeld 	static void *previous;
6783655adc7SJason A. Donenfeld 	unsigned long next_gen;
6793655adc7SJason A. Donenfeld 
6803655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
6813655adc7SJason A. Donenfeld 
6823655adc7SJason A. Donenfeld 	local_lock_irqsave(&batched_entropy_u32.lock, flags);
6833655adc7SJason A. Donenfeld 	batch = raw_cpu_ptr(&batched_entropy_u32);
6843655adc7SJason A. Donenfeld 
6853655adc7SJason A. Donenfeld 	next_gen = READ_ONCE(base_crng.generation);
6863655adc7SJason A. Donenfeld 	if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
6873655adc7SJason A. Donenfeld 	    next_gen != batch->generation) {
6883655adc7SJason A. Donenfeld 		_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
6893655adc7SJason A. Donenfeld 		batch->position = 0;
6903655adc7SJason A. Donenfeld 		batch->generation = next_gen;
6913655adc7SJason A. Donenfeld 	}
6923655adc7SJason A. Donenfeld 
6933655adc7SJason A. Donenfeld 	ret = batch->entropy_u32[batch->position];
6943655adc7SJason A. Donenfeld 	batch->entropy_u32[batch->position] = 0;
6953655adc7SJason A. Donenfeld 	++batch->position;
6963655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
6973655adc7SJason A. Donenfeld 	return ret;
6983655adc7SJason A. Donenfeld }
6993655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_u32);
7003655adc7SJason A. Donenfeld 
7013655adc7SJason A. Donenfeld /**
7023655adc7SJason A. Donenfeld  * randomize_page - Generate a random, page aligned address
7033655adc7SJason A. Donenfeld  * @start:	The smallest acceptable address the caller will take.
7043655adc7SJason A. Donenfeld  * @range:	The size of the area, starting at @start, within which the
7053655adc7SJason A. Donenfeld  *		random address must fall.
7063655adc7SJason A. Donenfeld  *
7073655adc7SJason A. Donenfeld  * If @start + @range would overflow, @range is capped.
7083655adc7SJason A. Donenfeld  *
7093655adc7SJason A. Donenfeld  * NOTE: Historical use of randomize_range, which this replaces, presumed that
7103655adc7SJason A. Donenfeld  * @start was already page aligned.  We now align it regardless.
7113655adc7SJason A. Donenfeld  *
7123655adc7SJason A. Donenfeld  * Return: A page aligned address within [start, start + range).  On error,
7133655adc7SJason A. Donenfeld  * @start is returned.
7143655adc7SJason A. Donenfeld  */
7153655adc7SJason A. Donenfeld unsigned long randomize_page(unsigned long start, unsigned long range)
7163655adc7SJason A. Donenfeld {
7173655adc7SJason A. Donenfeld 	if (!PAGE_ALIGNED(start)) {
7183655adc7SJason A. Donenfeld 		range -= PAGE_ALIGN(start) - start;
7193655adc7SJason A. Donenfeld 		start = PAGE_ALIGN(start);
7203655adc7SJason A. Donenfeld 	}
7213655adc7SJason A. Donenfeld 
7223655adc7SJason A. Donenfeld 	if (start > ULONG_MAX - range)
7233655adc7SJason A. Donenfeld 		range = ULONG_MAX - start;
7243655adc7SJason A. Donenfeld 
7253655adc7SJason A. Donenfeld 	range >>= PAGE_SHIFT;
7263655adc7SJason A. Donenfeld 
7273655adc7SJason A. Donenfeld 	if (range == 0)
7283655adc7SJason A. Donenfeld 		return start;
7293655adc7SJason A. Donenfeld 
7303655adc7SJason A. Donenfeld 	return start + (get_random_long() % range << PAGE_SHIFT);
7313655adc7SJason A. Donenfeld }
7323655adc7SJason A. Donenfeld 
7333655adc7SJason A. Donenfeld /*
7343655adc7SJason A. Donenfeld  * This function will use the architecture-specific hardware random
7353655adc7SJason A. Donenfeld  * number generator if it is available. It is not recommended for
7363655adc7SJason A. Donenfeld  * use. Use get_random_bytes() instead. It returns the number of
7373655adc7SJason A. Donenfeld  * bytes filled in.
7383655adc7SJason A. Donenfeld  */
7393655adc7SJason A. Donenfeld size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
7403655adc7SJason A. Donenfeld {
7413655adc7SJason A. Donenfeld 	size_t left = nbytes;
7423655adc7SJason A. Donenfeld 	u8 *p = buf;
7433655adc7SJason A. Donenfeld 
7443655adc7SJason A. Donenfeld 	while (left) {
7453655adc7SJason A. Donenfeld 		unsigned long v;
7463655adc7SJason A. Donenfeld 		size_t chunk = min_t(size_t, left, sizeof(unsigned long));
7473655adc7SJason A. Donenfeld 
7483655adc7SJason A. Donenfeld 		if (!arch_get_random_long(&v))
7493655adc7SJason A. Donenfeld 			break;
7503655adc7SJason A. Donenfeld 
7513655adc7SJason A. Donenfeld 		memcpy(p, &v, chunk);
7523655adc7SJason A. Donenfeld 		p += chunk;
7533655adc7SJason A. Donenfeld 		left -= chunk;
7543655adc7SJason A. Donenfeld 	}
7553655adc7SJason A. Donenfeld 
7563655adc7SJason A. Donenfeld 	return nbytes - left;
7573655adc7SJason A. Donenfeld }
7583655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_bytes_arch);
7593655adc7SJason A. Donenfeld 
760a5ed7cb1SJason A. Donenfeld 
761a5ed7cb1SJason A. Donenfeld /**********************************************************************
762a5ed7cb1SJason A. Donenfeld  *
763a5ed7cb1SJason A. Donenfeld  * Entropy accumulation and extraction routines.
764a5ed7cb1SJason A. Donenfeld  *
765a5ed7cb1SJason A. Donenfeld  * Callers may add entropy via:
766a5ed7cb1SJason A. Donenfeld  *
767a5ed7cb1SJason A. Donenfeld  *     static void mix_pool_bytes(const void *in, size_t nbytes)
768a5ed7cb1SJason A. Donenfeld  *
769a5ed7cb1SJason A. Donenfeld  * After which, if added entropy should be credited:
770a5ed7cb1SJason A. Donenfeld  *
771a5ed7cb1SJason A. Donenfeld  *     static void credit_entropy_bits(size_t nbits)
772a5ed7cb1SJason A. Donenfeld  *
773a5ed7cb1SJason A. Donenfeld  * Finally, extract entropy via these two, with the latter one
774a5ed7cb1SJason A. Donenfeld  * setting the entropy count to zero and extracting only if there
775a5ed7cb1SJason A. Donenfeld  * is POOL_MIN_BITS entropy credited prior:
776a5ed7cb1SJason A. Donenfeld  *
777a5ed7cb1SJason A. Donenfeld  *     static void extract_entropy(void *buf, size_t nbytes)
778a5ed7cb1SJason A. Donenfeld  *     static bool drain_entropy(void *buf, size_t nbytes)
779a5ed7cb1SJason A. Donenfeld  *
780a5ed7cb1SJason A. Donenfeld  **********************************************************************/
781a5ed7cb1SJason A. Donenfeld 
782c5704490SJason A. Donenfeld enum {
7836e8ec255SJason A. Donenfeld 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
784c5704490SJason A. Donenfeld 	POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
7851da177e4SLinus Torvalds };
7861da177e4SLinus Torvalds 
787a5ed7cb1SJason A. Donenfeld /* For notifying userspace should write into /dev/random. */
788a11e1d43SLinus Torvalds static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
7891da177e4SLinus Torvalds 
79090ed1e67SJason A. Donenfeld static struct {
7916e8ec255SJason A. Donenfeld 	struct blake2s_state hash;
79243358209SMatt Mackall 	spinlock_t lock;
79304ec96b7SJason A. Donenfeld 	unsigned int entropy_count;
79490ed1e67SJason A. Donenfeld } input_pool = {
7956e8ec255SJason A. Donenfeld 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
7966e8ec255SJason A. Donenfeld 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
7976e8ec255SJason A. Donenfeld 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
7986e8ec255SJason A. Donenfeld 	.hash.outlen = BLAKE2S_HASH_SIZE,
799eece09ecSThomas Gleixner 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
8001da177e4SLinus Torvalds };
8011da177e4SLinus Torvalds 
802a5ed7cb1SJason A. Donenfeld static void _mix_pool_bytes(const void *in, size_t nbytes)
803a5ed7cb1SJason A. Donenfeld {
804a5ed7cb1SJason A. Donenfeld 	blake2s_update(&input_pool.hash, in, nbytes);
805a5ed7cb1SJason A. Donenfeld }
80690ed1e67SJason A. Donenfeld 
8071da177e4SLinus Torvalds /*
808e68e5b66SMatt Mackall  * This function adds bytes into the entropy "pool".  It does not
8091da177e4SLinus Torvalds  * update the entropy estimate.  The caller should call
810adc782daSMatt Mackall  * credit_entropy_bits if this is appropriate.
8111da177e4SLinus Torvalds  */
81204ec96b7SJason A. Donenfeld static void mix_pool_bytes(const void *in, size_t nbytes)
8131da177e4SLinus Torvalds {
814902c098aSTheodore Ts'o 	unsigned long flags;
815902c098aSTheodore Ts'o 
81690ed1e67SJason A. Donenfeld 	spin_lock_irqsave(&input_pool.lock, flags);
81790ed1e67SJason A. Donenfeld 	_mix_pool_bytes(in, nbytes);
81890ed1e67SJason A. Donenfeld 	spin_unlock_irqrestore(&input_pool.lock, flags);
8191da177e4SLinus Torvalds }
8201da177e4SLinus Torvalds 
821a5ed7cb1SJason A. Donenfeld static void credit_entropy_bits(size_t nbits)
822a5ed7cb1SJason A. Donenfeld {
823a5ed7cb1SJason A. Donenfeld 	unsigned int entropy_count, orig, add;
824a5ed7cb1SJason A. Donenfeld 
825a5ed7cb1SJason A. Donenfeld 	if (!nbits)
826a5ed7cb1SJason A. Donenfeld 		return;
827a5ed7cb1SJason A. Donenfeld 
828a5ed7cb1SJason A. Donenfeld 	add = min_t(size_t, nbits, POOL_BITS);
829a5ed7cb1SJason A. Donenfeld 
830a5ed7cb1SJason A. Donenfeld 	do {
831a5ed7cb1SJason A. Donenfeld 		orig = READ_ONCE(input_pool.entropy_count);
832a5ed7cb1SJason A. Donenfeld 		entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
833a5ed7cb1SJason A. Donenfeld 	} while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
834a5ed7cb1SJason A. Donenfeld 
835a5ed7cb1SJason A. Donenfeld 	if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
836a5ed7cb1SJason A. Donenfeld 		crng_reseed();
837a5ed7cb1SJason A. Donenfeld }
838a5ed7cb1SJason A. Donenfeld 
839a5ed7cb1SJason A. Donenfeld /*
840a5ed7cb1SJason A. Donenfeld  * This is an HKDF-like construction for using the hashed collected entropy
841a5ed7cb1SJason A. Donenfeld  * as a PRF key, that's then expanded block-by-block.
842a5ed7cb1SJason A. Donenfeld  */
843a5ed7cb1SJason A. Donenfeld static void extract_entropy(void *buf, size_t nbytes)
844a5ed7cb1SJason A. Donenfeld {
845a5ed7cb1SJason A. Donenfeld 	unsigned long flags;
846a5ed7cb1SJason A. Donenfeld 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
847a5ed7cb1SJason A. Donenfeld 	struct {
848a5ed7cb1SJason A. Donenfeld 		unsigned long rdseed[32 / sizeof(long)];
849a5ed7cb1SJason A. Donenfeld 		size_t counter;
850a5ed7cb1SJason A. Donenfeld 	} block;
851a5ed7cb1SJason A. Donenfeld 	size_t i;
852a5ed7cb1SJason A. Donenfeld 
853a5ed7cb1SJason A. Donenfeld 	for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
854a5ed7cb1SJason A. Donenfeld 		if (!arch_get_random_seed_long(&block.rdseed[i]) &&
855a5ed7cb1SJason A. Donenfeld 		    !arch_get_random_long(&block.rdseed[i]))
856a5ed7cb1SJason A. Donenfeld 			block.rdseed[i] = random_get_entropy();
857a5ed7cb1SJason A. Donenfeld 	}
858a5ed7cb1SJason A. Donenfeld 
859a5ed7cb1SJason A. Donenfeld 	spin_lock_irqsave(&input_pool.lock, flags);
860a5ed7cb1SJason A. Donenfeld 
861a5ed7cb1SJason A. Donenfeld 	/* seed = HASHPRF(last_key, entropy_input) */
862a5ed7cb1SJason A. Donenfeld 	blake2s_final(&input_pool.hash, seed);
863a5ed7cb1SJason A. Donenfeld 
864a5ed7cb1SJason A. Donenfeld 	/* next_key = HASHPRF(seed, RDSEED || 0) */
865a5ed7cb1SJason A. Donenfeld 	block.counter = 0;
866a5ed7cb1SJason A. Donenfeld 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
867a5ed7cb1SJason A. Donenfeld 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
868a5ed7cb1SJason A. Donenfeld 
869a5ed7cb1SJason A. Donenfeld 	spin_unlock_irqrestore(&input_pool.lock, flags);
870a5ed7cb1SJason A. Donenfeld 	memzero_explicit(next_key, sizeof(next_key));
871a5ed7cb1SJason A. Donenfeld 
872a5ed7cb1SJason A. Donenfeld 	while (nbytes) {
873a5ed7cb1SJason A. Donenfeld 		i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
874a5ed7cb1SJason A. Donenfeld 		/* output = HASHPRF(seed, RDSEED || ++counter) */
875a5ed7cb1SJason A. Donenfeld 		++block.counter;
876a5ed7cb1SJason A. Donenfeld 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
877a5ed7cb1SJason A. Donenfeld 		nbytes -= i;
878a5ed7cb1SJason A. Donenfeld 		buf += i;
879a5ed7cb1SJason A. Donenfeld 	}
880a5ed7cb1SJason A. Donenfeld 
881a5ed7cb1SJason A. Donenfeld 	memzero_explicit(seed, sizeof(seed));
882a5ed7cb1SJason A. Donenfeld 	memzero_explicit(&block, sizeof(block));
883a5ed7cb1SJason A. Donenfeld }
884a5ed7cb1SJason A. Donenfeld 
885a5ed7cb1SJason A. Donenfeld /*
886a5ed7cb1SJason A. Donenfeld  * First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we
887a5ed7cb1SJason A. Donenfeld  * set the entropy count to zero (but don't actually touch any data). Only then
888a5ed7cb1SJason A. Donenfeld  * can we extract a new key with extract_entropy().
889a5ed7cb1SJason A. Donenfeld  */
890a5ed7cb1SJason A. Donenfeld static bool drain_entropy(void *buf, size_t nbytes)
891a5ed7cb1SJason A. Donenfeld {
892a5ed7cb1SJason A. Donenfeld 	unsigned int entropy_count;
893a5ed7cb1SJason A. Donenfeld 	do {
894a5ed7cb1SJason A. Donenfeld 		entropy_count = READ_ONCE(input_pool.entropy_count);
895a5ed7cb1SJason A. Donenfeld 		if (entropy_count < POOL_MIN_BITS)
896a5ed7cb1SJason A. Donenfeld 			return false;
897a5ed7cb1SJason A. Donenfeld 	} while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
898a5ed7cb1SJason A. Donenfeld 	extract_entropy(buf, nbytes);
899a5ed7cb1SJason A. Donenfeld 	wake_up_interruptible(&random_write_wait);
900a5ed7cb1SJason A. Donenfeld 	kill_fasync(&fasync, SIGIO, POLL_OUT);
901a5ed7cb1SJason A. Donenfeld 	return true;
902a5ed7cb1SJason A. Donenfeld }
903a5ed7cb1SJason A. Donenfeld 
90492c653cfSJason A. Donenfeld 
90592c653cfSJason A. Donenfeld /**********************************************************************
90692c653cfSJason A. Donenfeld  *
90792c653cfSJason A. Donenfeld  * Entropy collection routines.
90892c653cfSJason A. Donenfeld  *
90992c653cfSJason A. Donenfeld  * The following exported functions are used for pushing entropy into
91092c653cfSJason A. Donenfeld  * the above entropy accumulation routines:
91192c653cfSJason A. Donenfeld  *
91292c653cfSJason A. Donenfeld  *	void add_device_randomness(const void *buf, size_t size);
91392c653cfSJason A. Donenfeld  *	void add_input_randomness(unsigned int type, unsigned int code,
91492c653cfSJason A. Donenfeld  *	                          unsigned int value);
91592c653cfSJason A. Donenfeld  *	void add_disk_randomness(struct gendisk *disk);
91692c653cfSJason A. Donenfeld  *	void add_hwgenerator_randomness(const void *buffer, size_t count,
91792c653cfSJason A. Donenfeld  *					size_t entropy);
91892c653cfSJason A. Donenfeld  *	void add_bootloader_randomness(const void *buf, size_t size);
91992c653cfSJason A. Donenfeld  *	void add_interrupt_randomness(int irq);
92092c653cfSJason A. Donenfeld  *
92192c653cfSJason A. Donenfeld  * add_device_randomness() adds data to the input pool that
92292c653cfSJason A. Donenfeld  * is likely to differ between two devices (or possibly even per boot).
92392c653cfSJason A. Donenfeld  * This would be things like MAC addresses or serial numbers, or the
92492c653cfSJason A. Donenfeld  * read-out of the RTC. This does *not* credit any actual entropy to
92592c653cfSJason A. Donenfeld  * the pool, but it initializes the pool to different values for devices
92692c653cfSJason A. Donenfeld  * that might otherwise be identical and have very little entropy
92792c653cfSJason A. Donenfeld  * available to them (particularly common in the embedded world).
92892c653cfSJason A. Donenfeld  *
92992c653cfSJason A. Donenfeld  * add_input_randomness() uses the input layer interrupt timing, as well
93092c653cfSJason A. Donenfeld  * as the event type information from the hardware.
93192c653cfSJason A. Donenfeld  *
93292c653cfSJason A. Donenfeld  * add_disk_randomness() uses what amounts to the seek time of block
93392c653cfSJason A. Donenfeld  * layer request events, on a per-disk_devt basis, as input to the
93492c653cfSJason A. Donenfeld  * entropy pool. Note that high-speed solid state drives with very low
93592c653cfSJason A. Donenfeld  * seek times do not make for good sources of entropy, as their seek
93692c653cfSJason A. Donenfeld  * times are usually fairly consistent.
93792c653cfSJason A. Donenfeld  *
93892c653cfSJason A. Donenfeld  * The above two routines try to estimate how many bits of entropy
93992c653cfSJason A. Donenfeld  * to credit. They do this by keeping track of the first and second
94092c653cfSJason A. Donenfeld  * order deltas of the event timings.
94192c653cfSJason A. Donenfeld  *
94292c653cfSJason A. Donenfeld  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
94392c653cfSJason A. Donenfeld  * entropy as specified by the caller. If the entropy pool is full it will
94492c653cfSJason A. Donenfeld  * block until more entropy is needed.
94592c653cfSJason A. Donenfeld  *
94692c653cfSJason A. Donenfeld  * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
94792c653cfSJason A. Donenfeld  * add_device_randomness(), depending on whether or not the configuration
94892c653cfSJason A. Donenfeld  * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
94992c653cfSJason A. Donenfeld  *
95092c653cfSJason A. Donenfeld  * add_interrupt_randomness() uses the interrupt timing as random
95192c653cfSJason A. Donenfeld  * inputs to the entropy pool. Using the cycle counters and the irq source
95292c653cfSJason A. Donenfeld  * as inputs, it feeds the input pool roughly once a second or after 64
95392c653cfSJason A. Donenfeld  * interrupts, crediting 1 bit of entropy for whichever comes first.
95492c653cfSJason A. Donenfeld  *
95592c653cfSJason A. Donenfeld  **********************************************************************/
95692c653cfSJason A. Donenfeld 
95792c653cfSJason A. Donenfeld static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
95892c653cfSJason A. Donenfeld static int __init parse_trust_cpu(char *arg)
95992c653cfSJason A. Donenfeld {
96092c653cfSJason A. Donenfeld 	return kstrtobool(arg, &trust_cpu);
96192c653cfSJason A. Donenfeld }
96292c653cfSJason A. Donenfeld early_param("random.trust_cpu", parse_trust_cpu);
963775f4b29STheodore Ts'o 
964775f4b29STheodore Ts'o /*
96592c653cfSJason A. Donenfeld  * The first collection of entropy occurs at system boot while interrupts
96692c653cfSJason A. Donenfeld  * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
96792c653cfSJason A. Donenfeld  * Depending on the above configuration knob, RDSEED may be considered
96892c653cfSJason A. Donenfeld  * sufficient for initialization. Note that much earlier setup may already
96992c653cfSJason A. Donenfeld  * have pushed entropy into the input pool by the time we get here.
970775f4b29STheodore Ts'o  */
97192c653cfSJason A. Donenfeld int __init rand_initialize(void)
972775f4b29STheodore Ts'o {
97392c653cfSJason A. Donenfeld 	size_t i;
97492c653cfSJason A. Donenfeld 	ktime_t now = ktime_get_real();
97592c653cfSJason A. Donenfeld 	bool arch_init = true;
97692c653cfSJason A. Donenfeld 	unsigned long rv;
977775f4b29STheodore Ts'o 
97892c653cfSJason A. Donenfeld 	for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
97992c653cfSJason A. Donenfeld 		if (!arch_get_random_seed_long_early(&rv) &&
98092c653cfSJason A. Donenfeld 		    !arch_get_random_long_early(&rv)) {
98192c653cfSJason A. Donenfeld 			rv = random_get_entropy();
98292c653cfSJason A. Donenfeld 			arch_init = false;
98392c653cfSJason A. Donenfeld 		}
984afba0b80SJason A. Donenfeld 		_mix_pool_bytes(&rv, sizeof(rv));
98592c653cfSJason A. Donenfeld 	}
986afba0b80SJason A. Donenfeld 	_mix_pool_bytes(&now, sizeof(now));
987afba0b80SJason A. Donenfeld 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));
988655b2264STheodore Ts'o 
98992c653cfSJason A. Donenfeld 	extract_entropy(base_crng.key, sizeof(base_crng.key));
99092c653cfSJason A. Donenfeld 	++base_crng.generation;
99143759d4fSTheodore Ts'o 
99292c653cfSJason A. Donenfeld 	if (arch_init && trust_cpu && crng_init < 2) {
99392c653cfSJason A. Donenfeld 		crng_init = 2;
99492c653cfSJason A. Donenfeld 		pr_notice("crng init done (trusting CPU's manufacturer)\n");
995775f4b29STheodore Ts'o 	}
996775f4b29STheodore Ts'o 
99792c653cfSJason A. Donenfeld 	if (ratelimit_disable) {
99892c653cfSJason A. Donenfeld 		urandom_warning.interval = 0;
99992c653cfSJason A. Donenfeld 		unseeded_warning.interval = 0;
100092c653cfSJason A. Donenfeld 	}
100192c653cfSJason A. Donenfeld 	return 0;
100292c653cfSJason A. Donenfeld }
10031da177e4SLinus Torvalds 
10041da177e4SLinus Torvalds /* There is one of these per entropy source */
10051da177e4SLinus Torvalds struct timer_rand_state {
10061da177e4SLinus Torvalds 	cycles_t last_time;
10071da177e4SLinus Torvalds 	long last_delta, last_delta2;
10081da177e4SLinus Torvalds };
10091da177e4SLinus Torvalds 
1010a2080a67SLinus Torvalds /*
1011e192be9dSTheodore Ts'o  * Add device- or boot-specific data to the input pool to help
1012e192be9dSTheodore Ts'o  * initialize it.
1013a2080a67SLinus Torvalds  *
1014e192be9dSTheodore Ts'o  * None of this adds any entropy; it is meant to avoid the problem of
1015e192be9dSTheodore Ts'o  * the entropy pool having similar initial state across largely
1016e192be9dSTheodore Ts'o  * identical devices.
1017a2080a67SLinus Torvalds  */
101804ec96b7SJason A. Donenfeld void add_device_randomness(const void *buf, size_t size)
1019a2080a67SLinus Torvalds {
102061875f30STheodore Ts'o 	unsigned long time = random_get_entropy() ^ jiffies;
10213ef4cb2dSTheodore Ts'o 	unsigned long flags;
1022a2080a67SLinus Torvalds 
1023dc12baacSTheodore Ts'o 	if (!crng_ready() && size)
1024*da792c6dSJason A. Donenfeld 		crng_pre_init_inject(buf, size, false, false);
1025ee7998c5SKees Cook 
10263ef4cb2dSTheodore Ts'o 	spin_lock_irqsave(&input_pool.lock, flags);
102790ed1e67SJason A. Donenfeld 	_mix_pool_bytes(buf, size);
102890ed1e67SJason A. Donenfeld 	_mix_pool_bytes(&time, sizeof(time));
10293ef4cb2dSTheodore Ts'o 	spin_unlock_irqrestore(&input_pool.lock, flags);
1030a2080a67SLinus Torvalds }
1031a2080a67SLinus Torvalds EXPORT_SYMBOL(add_device_randomness);
1032a2080a67SLinus Torvalds 
10331da177e4SLinus Torvalds /*
10341da177e4SLinus Torvalds  * This function adds entropy to the entropy "pool" by using timing
10351da177e4SLinus Torvalds  * delays.  It uses the timer_rand_state structure to make an estimate
10361da177e4SLinus Torvalds  * of how many bits of entropy this call has added to the pool.
10371da177e4SLinus Torvalds  *
10381da177e4SLinus Torvalds  * The number "num" is also added to the pool - it should somehow describe
10391da177e4SLinus Torvalds  * the type of event which just happened.  This is currently 0-255 for
10401da177e4SLinus Torvalds  * keyboard scan codes, and 256 upwards for interrupts.
10411da177e4SLinus Torvalds  *
10421da177e4SLinus Torvalds  */
104304ec96b7SJason A. Donenfeld static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
10441da177e4SLinus Torvalds {
10451da177e4SLinus Torvalds 	struct {
10461da177e4SLinus Torvalds 		long jiffies;
1047d38bb085SJason A. Donenfeld 		unsigned int cycles;
1048d38bb085SJason A. Donenfeld 		unsigned int num;
10491da177e4SLinus Torvalds 	} sample;
10501da177e4SLinus Torvalds 	long delta, delta2, delta3;
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds 	sample.jiffies = jiffies;
105361875f30STheodore Ts'o 	sample.cycles = random_get_entropy();
10541da177e4SLinus Torvalds 	sample.num = num;
105590ed1e67SJason A. Donenfeld 	mix_pool_bytes(&sample, sizeof(sample));
10561da177e4SLinus Torvalds 
10571da177e4SLinus Torvalds 	/*
10581da177e4SLinus Torvalds 	 * Calculate number of bits of randomness we probably added.
10591da177e4SLinus Torvalds 	 * We take into account the first, second and third-order deltas
10601da177e4SLinus Torvalds 	 * in order to make our estimate.
10611da177e4SLinus Torvalds 	 */
1062e00d996aSQian Cai 	delta = sample.jiffies - READ_ONCE(state->last_time);
1063e00d996aSQian Cai 	WRITE_ONCE(state->last_time, sample.jiffies);
10641da177e4SLinus Torvalds 
1065e00d996aSQian Cai 	delta2 = delta - READ_ONCE(state->last_delta);
1066e00d996aSQian Cai 	WRITE_ONCE(state->last_delta, delta);
10671da177e4SLinus Torvalds 
1068e00d996aSQian Cai 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1069e00d996aSQian Cai 	WRITE_ONCE(state->last_delta2, delta2);
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	if (delta < 0)
10721da177e4SLinus Torvalds 		delta = -delta;
10731da177e4SLinus Torvalds 	if (delta2 < 0)
10741da177e4SLinus Torvalds 		delta2 = -delta2;
10751da177e4SLinus Torvalds 	if (delta3 < 0)
10761da177e4SLinus Torvalds 		delta3 = -delta3;
10771da177e4SLinus Torvalds 	if (delta > delta2)
10781da177e4SLinus Torvalds 		delta = delta2;
10791da177e4SLinus Torvalds 	if (delta > delta3)
10801da177e4SLinus Torvalds 		delta = delta3;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 	/*
10831da177e4SLinus Torvalds 	 * delta is now minimum absolute delta.
10841da177e4SLinus Torvalds 	 * Round down by 1 bit on general principles,
1085727d499aSYangtao Li 	 * and limit entropy estimate to 12 bits.
10861da177e4SLinus Torvalds 	 */
108704ec96b7SJason A. Donenfeld 	credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
10881da177e4SLinus Torvalds }
10891da177e4SLinus Torvalds 
1090d251575aSStephen Hemminger void add_input_randomness(unsigned int type, unsigned int code,
10911da177e4SLinus Torvalds 			  unsigned int value)
10921da177e4SLinus Torvalds {
10931da177e4SLinus Torvalds 	static unsigned char last_value;
109492c653cfSJason A. Donenfeld 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
10951da177e4SLinus Torvalds 
109692c653cfSJason A. Donenfeld 	/* Ignore autorepeat and the like. */
10971da177e4SLinus Torvalds 	if (value == last_value)
10981da177e4SLinus Torvalds 		return;
10991da177e4SLinus Torvalds 
11001da177e4SLinus Torvalds 	last_value = value;
11011da177e4SLinus Torvalds 	add_timer_randomness(&input_timer_state,
11021da177e4SLinus Torvalds 			     (type << 4) ^ code ^ (code >> 4) ^ value);
11031da177e4SLinus Torvalds }
110480fc9f53SDmitry Torokhov EXPORT_SYMBOL_GPL(add_input_randomness);
11051da177e4SLinus Torvalds 
110692c653cfSJason A. Donenfeld #ifdef CONFIG_BLOCK
110792c653cfSJason A. Donenfeld void add_disk_randomness(struct gendisk *disk)
110892c653cfSJason A. Donenfeld {
110992c653cfSJason A. Donenfeld 	if (!disk || !disk->random)
111092c653cfSJason A. Donenfeld 		return;
111192c653cfSJason A. Donenfeld 	/* First major is 1, so we get >= 0x200 here. */
111292c653cfSJason A. Donenfeld 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
111392c653cfSJason A. Donenfeld }
111492c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_disk_randomness);
111592c653cfSJason A. Donenfeld 
111692c653cfSJason A. Donenfeld void rand_initialize_disk(struct gendisk *disk)
111792c653cfSJason A. Donenfeld {
111892c653cfSJason A. Donenfeld 	struct timer_rand_state *state;
111992c653cfSJason A. Donenfeld 
112092c653cfSJason A. Donenfeld 	/*
112192c653cfSJason A. Donenfeld 	 * If kzalloc returns null, we just won't use that entropy
112292c653cfSJason A. Donenfeld 	 * source.
112392c653cfSJason A. Donenfeld 	 */
112492c653cfSJason A. Donenfeld 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
112592c653cfSJason A. Donenfeld 	if (state) {
112692c653cfSJason A. Donenfeld 		state->last_time = INITIAL_JIFFIES;
112792c653cfSJason A. Donenfeld 		disk->random = state;
112892c653cfSJason A. Donenfeld 	}
112992c653cfSJason A. Donenfeld }
113092c653cfSJason A. Donenfeld #endif
113192c653cfSJason A. Donenfeld 
113292c653cfSJason A. Donenfeld /*
113392c653cfSJason A. Donenfeld  * Interface for in-kernel drivers of true hardware RNGs.
113492c653cfSJason A. Donenfeld  * Those devices may produce endless random bits and will be throttled
113592c653cfSJason A. Donenfeld  * when our pool is full.
113692c653cfSJason A. Donenfeld  */
113792c653cfSJason A. Donenfeld void add_hwgenerator_randomness(const void *buffer, size_t count,
113892c653cfSJason A. Donenfeld 				size_t entropy)
113992c653cfSJason A. Donenfeld {
114092c653cfSJason A. Donenfeld 	if (unlikely(crng_init == 0)) {
1141*da792c6dSJason A. Donenfeld 		size_t ret = crng_pre_init_inject(buffer, count, false, true);
114292c653cfSJason A. Donenfeld 		mix_pool_bytes(buffer, ret);
114392c653cfSJason A. Donenfeld 		count -= ret;
114492c653cfSJason A. Donenfeld 		buffer += ret;
114592c653cfSJason A. Donenfeld 		if (!count || crng_init == 0)
114692c653cfSJason A. Donenfeld 			return;
114792c653cfSJason A. Donenfeld 	}
114892c653cfSJason A. Donenfeld 
114992c653cfSJason A. Donenfeld 	/*
115092c653cfSJason A. Donenfeld 	 * Throttle writing if we're above the trickle threshold.
115192c653cfSJason A. Donenfeld 	 * We'll be woken up again once below POOL_MIN_BITS, when
115292c653cfSJason A. Donenfeld 	 * the calling thread is about to terminate, or once
115392c653cfSJason A. Donenfeld 	 * CRNG_RESEED_INTERVAL has elapsed.
115492c653cfSJason A. Donenfeld 	 */
115592c653cfSJason A. Donenfeld 	wait_event_interruptible_timeout(random_write_wait,
115692c653cfSJason A. Donenfeld 			!system_wq || kthread_should_stop() ||
115792c653cfSJason A. Donenfeld 			input_pool.entropy_count < POOL_MIN_BITS,
115892c653cfSJason A. Donenfeld 			CRNG_RESEED_INTERVAL);
115992c653cfSJason A. Donenfeld 	mix_pool_bytes(buffer, count);
116092c653cfSJason A. Donenfeld 	credit_entropy_bits(entropy);
116192c653cfSJason A. Donenfeld }
116292c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
116392c653cfSJason A. Donenfeld 
116492c653cfSJason A. Donenfeld /*
116592c653cfSJason A. Donenfeld  * Handle random seed passed by bootloader.
116692c653cfSJason A. Donenfeld  * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
116792c653cfSJason A. Donenfeld  * it would be regarded as device data.
116892c653cfSJason A. Donenfeld  * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
116992c653cfSJason A. Donenfeld  */
117092c653cfSJason A. Donenfeld void add_bootloader_randomness(const void *buf, size_t size)
117192c653cfSJason A. Donenfeld {
117292c653cfSJason A. Donenfeld 	if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
117392c653cfSJason A. Donenfeld 		add_hwgenerator_randomness(buf, size, size * 8);
117492c653cfSJason A. Donenfeld 	else
117592c653cfSJason A. Donenfeld 		add_device_randomness(buf, size);
117692c653cfSJason A. Donenfeld }
117792c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_bootloader_randomness);
117892c653cfSJason A. Donenfeld 
117992c653cfSJason A. Donenfeld struct fast_pool {
118092c653cfSJason A. Donenfeld 	union {
118192c653cfSJason A. Donenfeld 		u32 pool32[4];
118292c653cfSJason A. Donenfeld 		u64 pool64[2];
118392c653cfSJason A. Donenfeld 	};
118458340f8eSJason A. Donenfeld 	struct work_struct mix;
118592c653cfSJason A. Donenfeld 	unsigned long last;
118658340f8eSJason A. Donenfeld 	atomic_t count;
118792c653cfSJason A. Donenfeld 	u16 reg_idx;
118892c653cfSJason A. Donenfeld };
118992c653cfSJason A. Donenfeld 
119092c653cfSJason A. Donenfeld /*
119192c653cfSJason A. Donenfeld  * This is a fast mixing routine used by the interrupt randomness
119292c653cfSJason A. Donenfeld  * collector. It's hardcoded for an 128 bit pool and assumes that any
119392c653cfSJason A. Donenfeld  * locks that might be needed are taken by the caller.
119492c653cfSJason A. Donenfeld  */
119592c653cfSJason A. Donenfeld static void fast_mix(u32 pool[4])
119692c653cfSJason A. Donenfeld {
119792c653cfSJason A. Donenfeld 	u32 a = pool[0],	b = pool[1];
119892c653cfSJason A. Donenfeld 	u32 c = pool[2],	d = pool[3];
119992c653cfSJason A. Donenfeld 
120092c653cfSJason A. Donenfeld 	a += b;			c += d;
120192c653cfSJason A. Donenfeld 	b = rol32(b, 6);	d = rol32(d, 27);
120292c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
120392c653cfSJason A. Donenfeld 
120492c653cfSJason A. Donenfeld 	a += b;			c += d;
120592c653cfSJason A. Donenfeld 	b = rol32(b, 16);	d = rol32(d, 14);
120692c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
120792c653cfSJason A. Donenfeld 
120892c653cfSJason A. Donenfeld 	a += b;			c += d;
120992c653cfSJason A. Donenfeld 	b = rol32(b, 6);	d = rol32(d, 27);
121092c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
121192c653cfSJason A. Donenfeld 
121292c653cfSJason A. Donenfeld 	a += b;			c += d;
121392c653cfSJason A. Donenfeld 	b = rol32(b, 16);	d = rol32(d, 14);
121492c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
121592c653cfSJason A. Donenfeld 
121692c653cfSJason A. Donenfeld 	pool[0] = a;  pool[1] = b;
121792c653cfSJason A. Donenfeld 	pool[2] = c;  pool[3] = d;
121892c653cfSJason A. Donenfeld }
121992c653cfSJason A. Donenfeld 
1220775f4b29STheodore Ts'o static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1221775f4b29STheodore Ts'o 
1222d38bb085SJason A. Donenfeld static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1223ee3e00e9STheodore Ts'o {
1224d38bb085SJason A. Donenfeld 	u32 *ptr = (u32 *)regs;
122592e75428STheodore Ts'o 	unsigned int idx;
1226ee3e00e9STheodore Ts'o 
1227ee3e00e9STheodore Ts'o 	if (regs == NULL)
1228ee3e00e9STheodore Ts'o 		return 0;
122992e75428STheodore Ts'o 	idx = READ_ONCE(f->reg_idx);
1230d38bb085SJason A. Donenfeld 	if (idx >= sizeof(struct pt_regs) / sizeof(u32))
123192e75428STheodore Ts'o 		idx = 0;
123292e75428STheodore Ts'o 	ptr += idx++;
123392e75428STheodore Ts'o 	WRITE_ONCE(f->reg_idx, idx);
12349dfa7bbaSMichael Schmitz 	return *ptr;
1235ee3e00e9STheodore Ts'o }
1236ee3e00e9STheodore Ts'o 
123758340f8eSJason A. Donenfeld static void mix_interrupt_randomness(struct work_struct *work)
123858340f8eSJason A. Donenfeld {
123958340f8eSJason A. Donenfeld 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
124058340f8eSJason A. Donenfeld 	u32 pool[4];
124158340f8eSJason A. Donenfeld 
124258340f8eSJason A. Donenfeld 	/* Check to see if we're running on the wrong CPU due to hotplug. */
124358340f8eSJason A. Donenfeld 	local_irq_disable();
124458340f8eSJason A. Donenfeld 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
124558340f8eSJason A. Donenfeld 		local_irq_enable();
124658340f8eSJason A. Donenfeld 		/*
124758340f8eSJason A. Donenfeld 		 * If we are unlucky enough to have been moved to another CPU,
124858340f8eSJason A. Donenfeld 		 * during CPU hotplug while the CPU was shutdown then we set
124958340f8eSJason A. Donenfeld 		 * our count to zero atomically so that when the CPU comes
125058340f8eSJason A. Donenfeld 		 * back online, it can enqueue work again. The _release here
125158340f8eSJason A. Donenfeld 		 * pairs with the atomic_inc_return_acquire in
125258340f8eSJason A. Donenfeld 		 * add_interrupt_randomness().
125358340f8eSJason A. Donenfeld 		 */
125458340f8eSJason A. Donenfeld 		atomic_set_release(&fast_pool->count, 0);
125558340f8eSJason A. Donenfeld 		return;
125658340f8eSJason A. Donenfeld 	}
125758340f8eSJason A. Donenfeld 
125858340f8eSJason A. Donenfeld 	/*
125958340f8eSJason A. Donenfeld 	 * Copy the pool to the stack so that the mixer always has a
126058340f8eSJason A. Donenfeld 	 * consistent view, before we reenable irqs again.
126158340f8eSJason A. Donenfeld 	 */
126258340f8eSJason A. Donenfeld 	memcpy(pool, fast_pool->pool32, sizeof(pool));
126358340f8eSJason A. Donenfeld 	atomic_set(&fast_pool->count, 0);
126458340f8eSJason A. Donenfeld 	fast_pool->last = jiffies;
126558340f8eSJason A. Donenfeld 	local_irq_enable();
126658340f8eSJason A. Donenfeld 
126758340f8eSJason A. Donenfeld 	mix_pool_bytes(pool, sizeof(pool));
126858340f8eSJason A. Donenfeld 	credit_entropy_bits(1);
126958340f8eSJason A. Donenfeld 	memzero_explicit(pool, sizeof(pool));
127058340f8eSJason A. Donenfeld }
127158340f8eSJason A. Donenfeld 
1272703f7066SSebastian Andrzej Siewior void add_interrupt_randomness(int irq)
12731da177e4SLinus Torvalds {
127458340f8eSJason A. Donenfeld 	enum { MIX_INFLIGHT = 1U << 31 };
12751b2a1a7eSChristoph Lameter 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1276775f4b29STheodore Ts'o 	struct pt_regs *regs = get_irq_regs();
1277775f4b29STheodore Ts'o 	unsigned long now = jiffies;
1278655b2264STheodore Ts'o 	cycles_t cycles = random_get_entropy();
127958340f8eSJason A. Donenfeld 	unsigned int new_count;
12803060d6feSYinghai Lu 
1281ee3e00e9STheodore Ts'o 	if (cycles == 0)
1282ee3e00e9STheodore Ts'o 		cycles = get_reg(fast_pool, regs);
12833060d6feSYinghai Lu 
1284b2f408feSJason A. Donenfeld 	if (sizeof(cycles) == 8)
1285b2f408feSJason A. Donenfeld 		fast_pool->pool64[0] ^= cycles ^ rol64(now, 32) ^ irq;
1286b2f408feSJason A. Donenfeld 	else {
1287b2f408feSJason A. Donenfeld 		fast_pool->pool32[0] ^= cycles ^ irq;
1288b2f408feSJason A. Donenfeld 		fast_pool->pool32[1] ^= now;
1289b2f408feSJason A. Donenfeld 	}
1290b2f408feSJason A. Donenfeld 
1291b2f408feSJason A. Donenfeld 	if (sizeof(unsigned long) == 8)
1292b2f408feSJason A. Donenfeld 		fast_pool->pool64[1] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1293b2f408feSJason A. Donenfeld 	else {
1294b2f408feSJason A. Donenfeld 		fast_pool->pool32[2] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1295b2f408feSJason A. Donenfeld 		fast_pool->pool32[3] ^= get_reg(fast_pool, regs);
1296b2f408feSJason A. Donenfeld 	}
1297b2f408feSJason A. Donenfeld 
1298b2f408feSJason A. Donenfeld 	fast_mix(fast_pool->pool32);
129958340f8eSJason A. Donenfeld 	/* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
130058340f8eSJason A. Donenfeld 	new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
1301775f4b29STheodore Ts'o 
130243838a23STheodore Ts'o 	if (unlikely(crng_init == 0)) {
130358340f8eSJason A. Donenfeld 		if (new_count >= 64 &&
1304*da792c6dSJason A. Donenfeld 		    crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32),
1305*da792c6dSJason A. Donenfeld 					 true, true) > 0) {
130658340f8eSJason A. Donenfeld 			atomic_set(&fast_pool->count, 0);
1307e192be9dSTheodore Ts'o 			fast_pool->last = now;
1308c30c575dSJason A. Donenfeld 			if (spin_trylock(&input_pool.lock)) {
1309b2f408feSJason A. Donenfeld 				_mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
1310c30c575dSJason A. Donenfeld 				spin_unlock(&input_pool.lock);
1311c30c575dSJason A. Donenfeld 			}
1312e192be9dSTheodore Ts'o 		}
1313e192be9dSTheodore Ts'o 		return;
1314e192be9dSTheodore Ts'o 	}
1315e192be9dSTheodore Ts'o 
131658340f8eSJason A. Donenfeld 	if (new_count & MIX_INFLIGHT)
13171da177e4SLinus Torvalds 		return;
1318840f9507STheodore Ts'o 
131958340f8eSJason A. Donenfeld 	if (new_count < 64 && !time_after(now, fast_pool->last + HZ))
13201da177e4SLinus Torvalds 		return;
13211da177e4SLinus Torvalds 
132258340f8eSJason A. Donenfeld 	if (unlikely(!fast_pool->mix.func))
132358340f8eSJason A. Donenfeld 		INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
132458340f8eSJason A. Donenfeld 	atomic_or(MIX_INFLIGHT, &fast_pool->count);
132558340f8eSJason A. Donenfeld 	queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
13261da177e4SLinus Torvalds }
13274b44f2d1SStephan Mueller EXPORT_SYMBOL_GPL(add_interrupt_randomness);
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds /*
133050ee7529SLinus Torvalds  * Each time the timer fires, we expect that we got an unpredictable
133150ee7529SLinus Torvalds  * jump in the cycle counter. Even if the timer is running on another
133250ee7529SLinus Torvalds  * CPU, the timer activity will be touching the stack of the CPU that is
133350ee7529SLinus Torvalds  * generating entropy..
133450ee7529SLinus Torvalds  *
133550ee7529SLinus Torvalds  * Note that we don't re-arm the timer in the timer itself - we are
133650ee7529SLinus Torvalds  * happy to be scheduled away, since that just makes the load more
133750ee7529SLinus Torvalds  * complex, but we do not want the timer to keep ticking unless the
133850ee7529SLinus Torvalds  * entropy loop is running.
133950ee7529SLinus Torvalds  *
134050ee7529SLinus Torvalds  * So the re-arming always happens in the entropy loop itself.
134150ee7529SLinus Torvalds  */
134250ee7529SLinus Torvalds static void entropy_timer(struct timer_list *t)
134350ee7529SLinus Torvalds {
134490ed1e67SJason A. Donenfeld 	credit_entropy_bits(1);
134550ee7529SLinus Torvalds }
134650ee7529SLinus Torvalds 
134750ee7529SLinus Torvalds /*
134850ee7529SLinus Torvalds  * If we have an actual cycle counter, see if we can
134950ee7529SLinus Torvalds  * generate enough entropy with timing noise
135050ee7529SLinus Torvalds  */
135150ee7529SLinus Torvalds static void try_to_generate_entropy(void)
135250ee7529SLinus Torvalds {
135350ee7529SLinus Torvalds 	struct {
135450ee7529SLinus Torvalds 		unsigned long now;
135550ee7529SLinus Torvalds 		struct timer_list timer;
135650ee7529SLinus Torvalds 	} stack;
135750ee7529SLinus Torvalds 
135850ee7529SLinus Torvalds 	stack.now = random_get_entropy();
135950ee7529SLinus Torvalds 
136050ee7529SLinus Torvalds 	/* Slow counter - or none. Don't even bother */
136150ee7529SLinus Torvalds 	if (stack.now == random_get_entropy())
136250ee7529SLinus Torvalds 		return;
136350ee7529SLinus Torvalds 
136450ee7529SLinus Torvalds 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
136550ee7529SLinus Torvalds 	while (!crng_ready()) {
136650ee7529SLinus Torvalds 		if (!timer_pending(&stack.timer))
136750ee7529SLinus Torvalds 			mod_timer(&stack.timer, jiffies + 1);
136890ed1e67SJason A. Donenfeld 		mix_pool_bytes(&stack.now, sizeof(stack.now));
136950ee7529SLinus Torvalds 		schedule();
137050ee7529SLinus Torvalds 		stack.now = random_get_entropy();
137150ee7529SLinus Torvalds 	}
137250ee7529SLinus Torvalds 
137350ee7529SLinus Torvalds 	del_timer_sync(&stack.timer);
137450ee7529SLinus Torvalds 	destroy_timer_on_stack(&stack.timer);
137590ed1e67SJason A. Donenfeld 	mix_pool_bytes(&stack.now, sizeof(stack.now));
137650ee7529SLinus Torvalds }
137750ee7529SLinus Torvalds 
1378a6adf8e7SJason A. Donenfeld 
1379a6adf8e7SJason A. Donenfeld /**********************************************************************
1380a6adf8e7SJason A. Donenfeld  *
1381a6adf8e7SJason A. Donenfeld  * Userspace reader/writer interfaces.
1382a6adf8e7SJason A. Donenfeld  *
1383a6adf8e7SJason A. Donenfeld  * getrandom(2) is the primary modern interface into the RNG and should
1384a6adf8e7SJason A. Donenfeld  * be used in preference to anything else.
1385a6adf8e7SJason A. Donenfeld  *
1386a6adf8e7SJason A. Donenfeld  * Reading from /dev/random has the same functionality as calling
1387a6adf8e7SJason A. Donenfeld  * getrandom(2) with flags=0. In earlier versions, however, it had
1388a6adf8e7SJason A. Donenfeld  * vastly different semantics and should therefore be avoided, to
1389a6adf8e7SJason A. Donenfeld  * prevent backwards compatibility issues.
1390a6adf8e7SJason A. Donenfeld  *
1391a6adf8e7SJason A. Donenfeld  * Reading from /dev/urandom has the same functionality as calling
1392a6adf8e7SJason A. Donenfeld  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1393a6adf8e7SJason A. Donenfeld  * waiting for the RNG to be ready, it should not be used.
1394a6adf8e7SJason A. Donenfeld  *
1395a6adf8e7SJason A. Donenfeld  * Writing to either /dev/random or /dev/urandom adds entropy to
1396a6adf8e7SJason A. Donenfeld  * the input pool but does not credit it.
1397a6adf8e7SJason A. Donenfeld  *
1398a6adf8e7SJason A. Donenfeld  * Polling on /dev/random indicates when the RNG is initialized, on
1399a6adf8e7SJason A. Donenfeld  * the read side, and when it wants new entropy, on the write side.
1400a6adf8e7SJason A. Donenfeld  *
1401a6adf8e7SJason A. Donenfeld  * Both /dev/random and /dev/urandom have the same set of ioctls for
1402a6adf8e7SJason A. Donenfeld  * adding entropy, getting the entropy count, zeroing the count, and
1403a6adf8e7SJason A. Donenfeld  * reseeding the crng.
1404a6adf8e7SJason A. Donenfeld  *
1405a6adf8e7SJason A. Donenfeld  **********************************************************************/
1406a6adf8e7SJason A. Donenfeld 
1407a6adf8e7SJason A. Donenfeld SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1408a6adf8e7SJason A. Donenfeld 		flags)
14091da177e4SLinus Torvalds {
1410a6adf8e7SJason A. Donenfeld 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1411a6adf8e7SJason A. Donenfeld 		return -EINVAL;
1412301f0595STheodore Ts'o 
1413a6adf8e7SJason A. Donenfeld 	/*
1414a6adf8e7SJason A. Donenfeld 	 * Requesting insecure and blocking randomness at the same time makes
1415a6adf8e7SJason A. Donenfeld 	 * no sense.
1416a6adf8e7SJason A. Donenfeld 	 */
1417a6adf8e7SJason A. Donenfeld 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1418a6adf8e7SJason A. Donenfeld 		return -EINVAL;
1419c6f1deb1SAndy Lutomirski 
1420a6adf8e7SJason A. Donenfeld 	if (count > INT_MAX)
1421a6adf8e7SJason A. Donenfeld 		count = INT_MAX;
14221da177e4SLinus Torvalds 
1423a6adf8e7SJason A. Donenfeld 	if (!(flags & GRND_INSECURE) && !crng_ready()) {
142430c08efeSAndy Lutomirski 		int ret;
142530c08efeSAndy Lutomirski 
1426a6adf8e7SJason A. Donenfeld 		if (flags & GRND_NONBLOCK)
1427a6adf8e7SJason A. Donenfeld 			return -EAGAIN;
142830c08efeSAndy Lutomirski 		ret = wait_for_random_bytes();
1429a6adf8e7SJason A. Donenfeld 		if (unlikely(ret))
143030c08efeSAndy Lutomirski 			return ret;
1431a6adf8e7SJason A. Donenfeld 	}
1432a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, count);
143330c08efeSAndy Lutomirski }
143430c08efeSAndy Lutomirski 
1435248045b8SJason A. Donenfeld static __poll_t random_poll(struct file *file, poll_table *wait)
143689b310a2SChristoph Hellwig {
1437a11e1d43SLinus Torvalds 	__poll_t mask;
143889b310a2SChristoph Hellwig 
143930c08efeSAndy Lutomirski 	poll_wait(file, &crng_init_wait, wait);
1440a11e1d43SLinus Torvalds 	poll_wait(file, &random_write_wait, wait);
1441a11e1d43SLinus Torvalds 	mask = 0;
144230c08efeSAndy Lutomirski 	if (crng_ready())
1443a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
1444489c7fc4SJason A. Donenfeld 	if (input_pool.entropy_count < POOL_MIN_BITS)
1445a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
14461da177e4SLinus Torvalds 	return mask;
14471da177e4SLinus Torvalds }
14481da177e4SLinus Torvalds 
144904ec96b7SJason A. Donenfeld static int write_pool(const char __user *ubuf, size_t count)
14507f397dcdSMatt Mackall {
145104ec96b7SJason A. Donenfeld 	size_t len;
14527b5164fbSJason A. Donenfeld 	int ret = 0;
145304ec96b7SJason A. Donenfeld 	u8 block[BLAKE2S_BLOCK_SIZE];
14547f397dcdSMatt Mackall 
145504ec96b7SJason A. Donenfeld 	while (count) {
145604ec96b7SJason A. Donenfeld 		len = min(count, sizeof(block));
14577b5164fbSJason A. Donenfeld 		if (copy_from_user(block, ubuf, len)) {
14587b5164fbSJason A. Donenfeld 			ret = -EFAULT;
14597b5164fbSJason A. Donenfeld 			goto out;
14607b5164fbSJason A. Donenfeld 		}
146104ec96b7SJason A. Donenfeld 		count -= len;
146204ec96b7SJason A. Donenfeld 		ubuf += len;
146304ec96b7SJason A. Donenfeld 		mix_pool_bytes(block, len);
146491f3f1e3SMatt Mackall 		cond_resched();
14657f397dcdSMatt Mackall 	}
14667f397dcdSMatt Mackall 
14677b5164fbSJason A. Donenfeld out:
14687b5164fbSJason A. Donenfeld 	memzero_explicit(block, sizeof(block));
14697b5164fbSJason A. Donenfeld 	return ret;
14707f397dcdSMatt Mackall }
14717f397dcdSMatt Mackall 
147290b75ee5SMatt Mackall static ssize_t random_write(struct file *file, const char __user *buffer,
14731da177e4SLinus Torvalds 			    size_t count, loff_t *ppos)
14741da177e4SLinus Torvalds {
147504ec96b7SJason A. Donenfeld 	int ret;
14767f397dcdSMatt Mackall 
147790ed1e67SJason A. Donenfeld 	ret = write_pool(buffer, count);
14787f397dcdSMatt Mackall 	if (ret)
14797f397dcdSMatt Mackall 		return ret;
14807f397dcdSMatt Mackall 
14817f397dcdSMatt Mackall 	return (ssize_t)count;
14821da177e4SLinus Torvalds }
14831da177e4SLinus Torvalds 
1484a6adf8e7SJason A. Donenfeld static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1485a6adf8e7SJason A. Donenfeld 			    loff_t *ppos)
1486a6adf8e7SJason A. Donenfeld {
1487a6adf8e7SJason A. Donenfeld 	static int maxwarn = 10;
1488a6adf8e7SJason A. Donenfeld 
1489a6adf8e7SJason A. Donenfeld 	if (!crng_ready() && maxwarn > 0) {
1490a6adf8e7SJason A. Donenfeld 		maxwarn--;
1491a6adf8e7SJason A. Donenfeld 		if (__ratelimit(&urandom_warning))
1492a6adf8e7SJason A. Donenfeld 			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1493a6adf8e7SJason A. Donenfeld 				  current->comm, nbytes);
1494a6adf8e7SJason A. Donenfeld 	}
1495a6adf8e7SJason A. Donenfeld 
1496a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, nbytes);
1497a6adf8e7SJason A. Donenfeld }
1498a6adf8e7SJason A. Donenfeld 
1499a6adf8e7SJason A. Donenfeld static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1500a6adf8e7SJason A. Donenfeld 			   loff_t *ppos)
1501a6adf8e7SJason A. Donenfeld {
1502a6adf8e7SJason A. Donenfeld 	int ret;
1503a6adf8e7SJason A. Donenfeld 
1504a6adf8e7SJason A. Donenfeld 	ret = wait_for_random_bytes();
1505a6adf8e7SJason A. Donenfeld 	if (ret != 0)
1506a6adf8e7SJason A. Donenfeld 		return ret;
1507a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, nbytes);
1508a6adf8e7SJason A. Donenfeld }
1509a6adf8e7SJason A. Donenfeld 
151043ae4860SMatt Mackall static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
15111da177e4SLinus Torvalds {
15121da177e4SLinus Torvalds 	int size, ent_count;
15131da177e4SLinus Torvalds 	int __user *p = (int __user *)arg;
15141da177e4SLinus Torvalds 	int retval;
15151da177e4SLinus Torvalds 
15161da177e4SLinus Torvalds 	switch (cmd) {
15171da177e4SLinus Torvalds 	case RNDGETENTCNT:
1518a6adf8e7SJason A. Donenfeld 		/* Inherently racy, no point locking. */
1519c5704490SJason A. Donenfeld 		if (put_user(input_pool.entropy_count, p))
15201da177e4SLinus Torvalds 			return -EFAULT;
15211da177e4SLinus Torvalds 		return 0;
15221da177e4SLinus Torvalds 	case RNDADDTOENTCNT:
15231da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15241da177e4SLinus Torvalds 			return -EPERM;
15251da177e4SLinus Torvalds 		if (get_user(ent_count, p))
15261da177e4SLinus Torvalds 			return -EFAULT;
1527a49c010eSJason A. Donenfeld 		if (ent_count < 0)
1528a49c010eSJason A. Donenfeld 			return -EINVAL;
1529a49c010eSJason A. Donenfeld 		credit_entropy_bits(ent_count);
1530a49c010eSJason A. Donenfeld 		return 0;
15311da177e4SLinus Torvalds 	case RNDADDENTROPY:
15321da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15331da177e4SLinus Torvalds 			return -EPERM;
15341da177e4SLinus Torvalds 		if (get_user(ent_count, p++))
15351da177e4SLinus Torvalds 			return -EFAULT;
15361da177e4SLinus Torvalds 		if (ent_count < 0)
15371da177e4SLinus Torvalds 			return -EINVAL;
15381da177e4SLinus Torvalds 		if (get_user(size, p++))
15391da177e4SLinus Torvalds 			return -EFAULT;
154090ed1e67SJason A. Donenfeld 		retval = write_pool((const char __user *)p, size);
15411da177e4SLinus Torvalds 		if (retval < 0)
15421da177e4SLinus Torvalds 			return retval;
1543a49c010eSJason A. Donenfeld 		credit_entropy_bits(ent_count);
1544a49c010eSJason A. Donenfeld 		return 0;
15451da177e4SLinus Torvalds 	case RNDZAPENTCNT:
15461da177e4SLinus Torvalds 	case RNDCLEARPOOL:
1547ae9ecd92STheodore Ts'o 		/*
1548ae9ecd92STheodore Ts'o 		 * Clear the entropy pool counters. We no longer clear
1549ae9ecd92STheodore Ts'o 		 * the entropy pool, as that's silly.
1550ae9ecd92STheodore Ts'o 		 */
15511da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15521da177e4SLinus Torvalds 			return -EPERM;
1553489c7fc4SJason A. Donenfeld 		if (xchg(&input_pool.entropy_count, 0)) {
1554042e293eSJason A. Donenfeld 			wake_up_interruptible(&random_write_wait);
1555042e293eSJason A. Donenfeld 			kill_fasync(&fasync, SIGIO, POLL_OUT);
1556042e293eSJason A. Donenfeld 		}
15571da177e4SLinus Torvalds 		return 0;
1558d848e5f8STheodore Ts'o 	case RNDRESEEDCRNG:
1559d848e5f8STheodore Ts'o 		if (!capable(CAP_SYS_ADMIN))
1560d848e5f8STheodore Ts'o 			return -EPERM;
1561d848e5f8STheodore Ts'o 		if (crng_init < 2)
1562d848e5f8STheodore Ts'o 			return -ENODATA;
1563a9412d51SJason A. Donenfeld 		crng_reseed();
1564d848e5f8STheodore Ts'o 		return 0;
15651da177e4SLinus Torvalds 	default:
15661da177e4SLinus Torvalds 		return -EINVAL;
15671da177e4SLinus Torvalds 	}
15681da177e4SLinus Torvalds }
15691da177e4SLinus Torvalds 
15709a6f70bbSJeff Dike static int random_fasync(int fd, struct file *filp, int on)
15719a6f70bbSJeff Dike {
15729a6f70bbSJeff Dike 	return fasync_helper(fd, filp, on, &fasync);
15739a6f70bbSJeff Dike }
15749a6f70bbSJeff Dike 
15752b8693c0SArjan van de Ven const struct file_operations random_fops = {
15761da177e4SLinus Torvalds 	.read = random_read,
15771da177e4SLinus Torvalds 	.write = random_write,
1578a11e1d43SLinus Torvalds 	.poll = random_poll,
157943ae4860SMatt Mackall 	.unlocked_ioctl = random_ioctl,
1580507e4e2bSArnd Bergmann 	.compat_ioctl = compat_ptr_ioctl,
15819a6f70bbSJeff Dike 	.fasync = random_fasync,
15826038f373SArnd Bergmann 	.llseek = noop_llseek,
15831da177e4SLinus Torvalds };
15841da177e4SLinus Torvalds 
15852b8693c0SArjan van de Ven const struct file_operations urandom_fops = {
15861da177e4SLinus Torvalds 	.read = urandom_read,
15871da177e4SLinus Torvalds 	.write = random_write,
158843ae4860SMatt Mackall 	.unlocked_ioctl = random_ioctl,
15894aa37c46SJason A. Donenfeld 	.compat_ioctl = compat_ptr_ioctl,
15909a6f70bbSJeff Dike 	.fasync = random_fasync,
15916038f373SArnd Bergmann 	.llseek = noop_llseek,
15921da177e4SLinus Torvalds };
15931da177e4SLinus Torvalds 
15940deff3c4SJason A. Donenfeld 
15951da177e4SLinus Torvalds /********************************************************************
15961da177e4SLinus Torvalds  *
15970deff3c4SJason A. Donenfeld  * Sysctl interface.
15980deff3c4SJason A. Donenfeld  *
15990deff3c4SJason A. Donenfeld  * These are partly unused legacy knobs with dummy values to not break
16000deff3c4SJason A. Donenfeld  * userspace and partly still useful things. They are usually accessible
16010deff3c4SJason A. Donenfeld  * in /proc/sys/kernel/random/ and are as follows:
16020deff3c4SJason A. Donenfeld  *
16030deff3c4SJason A. Donenfeld  * - boot_id - a UUID representing the current boot.
16040deff3c4SJason A. Donenfeld  *
16050deff3c4SJason A. Donenfeld  * - uuid - a random UUID, different each time the file is read.
16060deff3c4SJason A. Donenfeld  *
16070deff3c4SJason A. Donenfeld  * - poolsize - the number of bits of entropy that the input pool can
16080deff3c4SJason A. Donenfeld  *   hold, tied to the POOL_BITS constant.
16090deff3c4SJason A. Donenfeld  *
16100deff3c4SJason A. Donenfeld  * - entropy_avail - the number of bits of entropy currently in the
16110deff3c4SJason A. Donenfeld  *   input pool. Always <= poolsize.
16120deff3c4SJason A. Donenfeld  *
16130deff3c4SJason A. Donenfeld  * - write_wakeup_threshold - the amount of entropy in the input pool
16140deff3c4SJason A. Donenfeld  *   below which write polls to /dev/random will unblock, requesting
16150deff3c4SJason A. Donenfeld  *   more entropy, tied to the POOL_MIN_BITS constant. It is writable
16160deff3c4SJason A. Donenfeld  *   to avoid breaking old userspaces, but writing to it does not
16170deff3c4SJason A. Donenfeld  *   change any behavior of the RNG.
16180deff3c4SJason A. Donenfeld  *
16190deff3c4SJason A. Donenfeld  * - urandom_min_reseed_secs - fixed to the meaningless value "60".
16200deff3c4SJason A. Donenfeld  *   It is writable to avoid breaking old userspaces, but writing
16210deff3c4SJason A. Donenfeld  *   to it does not change any behavior of the RNG.
16221da177e4SLinus Torvalds  *
16231da177e4SLinus Torvalds  ********************************************************************/
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL
16261da177e4SLinus Torvalds 
16271da177e4SLinus Torvalds #include <linux/sysctl.h>
16281da177e4SLinus Torvalds 
16290deff3c4SJason A. Donenfeld static int sysctl_random_min_urandom_seed = 60;
16300deff3c4SJason A. Donenfeld static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
1631489c7fc4SJason A. Donenfeld static int sysctl_poolsize = POOL_BITS;
16321da177e4SLinus Torvalds static char sysctl_bootid[16];
16331da177e4SLinus Torvalds 
16341da177e4SLinus Torvalds /*
1635f22052b2SGreg Price  * This function is used to return both the bootid UUID, and random
16361da177e4SLinus Torvalds  * UUID.  The difference is in whether table->data is NULL; if it is,
16371da177e4SLinus Torvalds  * then a new UUID is generated and returned to the user.
16381da177e4SLinus Torvalds  *
1639f22052b2SGreg Price  * If the user accesses this via the proc interface, the UUID will be
1640f22052b2SGreg Price  * returned as an ASCII string in the standard UUID format; if via the
1641f22052b2SGreg Price  * sysctl system call, as 16 bytes of binary data.
16421da177e4SLinus Torvalds  */
1643248045b8SJason A. Donenfeld static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1644248045b8SJason A. Donenfeld 			size_t *lenp, loff_t *ppos)
16451da177e4SLinus Torvalds {
1646a151427eSJoe Perches 	struct ctl_table fake_table;
16471da177e4SLinus Torvalds 	unsigned char buf[64], tmp_uuid[16], *uuid;
16481da177e4SLinus Torvalds 
16491da177e4SLinus Torvalds 	uuid = table->data;
16501da177e4SLinus Torvalds 	if (!uuid) {
16511da177e4SLinus Torvalds 		uuid = tmp_uuid;
16521da177e4SLinus Torvalds 		generate_random_uuid(uuid);
165344e4360fSMathieu Desnoyers 	} else {
165444e4360fSMathieu Desnoyers 		static DEFINE_SPINLOCK(bootid_spinlock);
165544e4360fSMathieu Desnoyers 
165644e4360fSMathieu Desnoyers 		spin_lock(&bootid_spinlock);
165744e4360fSMathieu Desnoyers 		if (!uuid[8])
165844e4360fSMathieu Desnoyers 			generate_random_uuid(uuid);
165944e4360fSMathieu Desnoyers 		spin_unlock(&bootid_spinlock);
166044e4360fSMathieu Desnoyers 	}
16611da177e4SLinus Torvalds 
166235900771SJoe Perches 	sprintf(buf, "%pU", uuid);
166335900771SJoe Perches 
16641da177e4SLinus Torvalds 	fake_table.data = buf;
16651da177e4SLinus Torvalds 	fake_table.maxlen = sizeof(buf);
16661da177e4SLinus Torvalds 
16678d65af78SAlexey Dobriyan 	return proc_dostring(&fake_table, write, buffer, lenp, ppos);
16681da177e4SLinus Torvalds }
16691da177e4SLinus Torvalds 
16705475e8f0SXiaoming Ni static struct ctl_table random_table[] = {
16711da177e4SLinus Torvalds 	{
16721da177e4SLinus Torvalds 		.procname	= "poolsize",
16731da177e4SLinus Torvalds 		.data		= &sysctl_poolsize,
16741da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
16751da177e4SLinus Torvalds 		.mode		= 0444,
16766d456111SEric W. Biederman 		.proc_handler	= proc_dointvec,
16771da177e4SLinus Torvalds 	},
16781da177e4SLinus Torvalds 	{
16791da177e4SLinus Torvalds 		.procname	= "entropy_avail",
1680c5704490SJason A. Donenfeld 		.data		= &input_pool.entropy_count,
16811da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
16821da177e4SLinus Torvalds 		.mode		= 0444,
1683c5704490SJason A. Donenfeld 		.proc_handler	= proc_dointvec,
16841da177e4SLinus Torvalds 	},
16851da177e4SLinus Torvalds 	{
16861da177e4SLinus Torvalds 		.procname	= "write_wakeup_threshold",
16870deff3c4SJason A. Donenfeld 		.data		= &sysctl_random_write_wakeup_bits,
16881da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
16891da177e4SLinus Torvalds 		.mode		= 0644,
1690489c7fc4SJason A. Donenfeld 		.proc_handler	= proc_dointvec,
16911da177e4SLinus Torvalds 	},
16921da177e4SLinus Torvalds 	{
1693f5c2742cSTheodore Ts'o 		.procname	= "urandom_min_reseed_secs",
16940deff3c4SJason A. Donenfeld 		.data		= &sysctl_random_min_urandom_seed,
1695f5c2742cSTheodore Ts'o 		.maxlen		= sizeof(int),
1696f5c2742cSTheodore Ts'o 		.mode		= 0644,
1697f5c2742cSTheodore Ts'o 		.proc_handler	= proc_dointvec,
1698f5c2742cSTheodore Ts'o 	},
1699f5c2742cSTheodore Ts'o 	{
17001da177e4SLinus Torvalds 		.procname	= "boot_id",
17011da177e4SLinus Torvalds 		.data		= &sysctl_bootid,
17021da177e4SLinus Torvalds 		.maxlen		= 16,
17031da177e4SLinus Torvalds 		.mode		= 0444,
17046d456111SEric W. Biederman 		.proc_handler	= proc_do_uuid,
17051da177e4SLinus Torvalds 	},
17061da177e4SLinus Torvalds 	{
17071da177e4SLinus Torvalds 		.procname	= "uuid",
17081da177e4SLinus Torvalds 		.maxlen		= 16,
17091da177e4SLinus Torvalds 		.mode		= 0444,
17106d456111SEric W. Biederman 		.proc_handler	= proc_do_uuid,
17111da177e4SLinus Torvalds 	},
1712894d2491SEric W. Biederman 	{ }
17131da177e4SLinus Torvalds };
17145475e8f0SXiaoming Ni 
17155475e8f0SXiaoming Ni /*
17165475e8f0SXiaoming Ni  * rand_initialize() is called before sysctl_init(),
17175475e8f0SXiaoming Ni  * so we cannot call register_sysctl_init() in rand_initialize()
17185475e8f0SXiaoming Ni  */
17195475e8f0SXiaoming Ni static int __init random_sysctls_init(void)
17205475e8f0SXiaoming Ni {
17215475e8f0SXiaoming Ni 	register_sysctl_init("kernel/random", random_table);
17225475e8f0SXiaoming Ni 	return 0;
17235475e8f0SXiaoming Ni }
17245475e8f0SXiaoming Ni device_initcall(random_sysctls_init);
17250deff3c4SJason A. Donenfeld #endif
1726