xref: /linux/drivers/char/random.c (revision afba0b80b977b2a8f16234f2acd982f82710ba33)
1a07fdae3SJason A. Donenfeld // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
21da177e4SLinus Torvalds /*
39f9eff85SJason A. Donenfeld  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
49e95ce27SMatt Mackall  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
55f75d9f3SJason A. Donenfeld  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
61da177e4SLinus Torvalds  *
75f75d9f3SJason A. Donenfeld  * This driver produces cryptographically secure pseudorandom data. It is divided
85f75d9f3SJason A. Donenfeld  * into roughly six sections, each with a section header:
91da177e4SLinus Torvalds  *
105f75d9f3SJason A. Donenfeld  *   - Initialization and readiness waiting.
115f75d9f3SJason A. Donenfeld  *   - Fast key erasure RNG, the "crng".
125f75d9f3SJason A. Donenfeld  *   - Entropy accumulation and extraction routines.
135f75d9f3SJason A. Donenfeld  *   - Entropy collection routines.
145f75d9f3SJason A. Donenfeld  *   - Userspace reader/writer interfaces.
155f75d9f3SJason A. Donenfeld  *   - Sysctl interface.
161da177e4SLinus Torvalds  *
175f75d9f3SJason A. Donenfeld  * The high level overview is that there is one input pool, into which
185f75d9f3SJason A. Donenfeld  * various pieces of data are hashed. Some of that data is then "credited" as
195f75d9f3SJason A. Donenfeld  * having a certain number of bits of entropy. When enough bits of entropy are
205f75d9f3SJason A. Donenfeld  * available, the hash is finalized and handed as a key to a stream cipher that
215f75d9f3SJason A. Donenfeld  * expands it indefinitely for various consumers. This key is periodically
225f75d9f3SJason A. Donenfeld  * refreshed as the various entropy collectors, described below, add data to the
235f75d9f3SJason A. Donenfeld  * input pool and credit it. There is currently no Fortuna-like scheduler
245f75d9f3SJason A. Donenfeld  * involved, which can lead to malicious entropy sources causing a premature
255f75d9f3SJason A. Donenfeld  * reseed, and the entropy estimates are, at best, conservative guesses.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
2812cd53afSYangtao Li #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2912cd53afSYangtao Li 
301da177e4SLinus Torvalds #include <linux/utsname.h>
311da177e4SLinus Torvalds #include <linux/module.h>
321da177e4SLinus Torvalds #include <linux/kernel.h>
331da177e4SLinus Torvalds #include <linux/major.h>
341da177e4SLinus Torvalds #include <linux/string.h>
351da177e4SLinus Torvalds #include <linux/fcntl.h>
361da177e4SLinus Torvalds #include <linux/slab.h>
371da177e4SLinus Torvalds #include <linux/random.h>
381da177e4SLinus Torvalds #include <linux/poll.h>
391da177e4SLinus Torvalds #include <linux/init.h>
401da177e4SLinus Torvalds #include <linux/fs.h>
411da177e4SLinus Torvalds #include <linux/genhd.h>
421da177e4SLinus Torvalds #include <linux/interrupt.h>
4327ac792cSAndrea Righi #include <linux/mm.h>
44dd0f0cf5SMichael Ellerman #include <linux/nodemask.h>
451da177e4SLinus Torvalds #include <linux/spinlock.h>
46c84dbf61STorsten Duwe #include <linux/kthread.h>
471da177e4SLinus Torvalds #include <linux/percpu.h>
48775f4b29STheodore Ts'o #include <linux/ptrace.h>
496265e169STheodore Ts'o #include <linux/workqueue.h>
50d178a1ebSYinghai Lu #include <linux/irq.h>
514e00b339STheodore Ts'o #include <linux/ratelimit.h>
52c6e9d6f3STheodore Ts'o #include <linux/syscalls.h>
53c6e9d6f3STheodore Ts'o #include <linux/completion.h>
548da4b8c4SAndy Shevchenko #include <linux/uuid.h>
5587e7d5abSJason A. Donenfeld #include <linux/uaccess.h>
561ca1b917SEric Biggers #include <crypto/chacha.h>
579f9eff85SJason A. Donenfeld #include <crypto/blake2s.h>
581da177e4SLinus Torvalds #include <asm/processor.h>
591da177e4SLinus Torvalds #include <asm/irq.h>
60775f4b29STheodore Ts'o #include <asm/irq_regs.h>
611da177e4SLinus Torvalds #include <asm/io.h>
621da177e4SLinus Torvalds 
635f1bb112SJason A. Donenfeld /*********************************************************************
645f1bb112SJason A. Donenfeld  *
655f1bb112SJason A. Donenfeld  * Initialization and readiness waiting.
665f1bb112SJason A. Donenfeld  *
675f1bb112SJason A. Donenfeld  * Much of the RNG infrastructure is devoted to various dependencies
685f1bb112SJason A. Donenfeld  * being able to wait until the RNG has collected enough entropy and
695f1bb112SJason A. Donenfeld  * is ready for safe consumption.
705f1bb112SJason A. Donenfeld  *
715f1bb112SJason A. Donenfeld  *********************************************************************/
725f1bb112SJason A. Donenfeld 
735f1bb112SJason A. Donenfeld /*
745f1bb112SJason A. Donenfeld  * crng_init =  0 --> Uninitialized
755f1bb112SJason A. Donenfeld  *		1 --> Initialized
765f1bb112SJason A. Donenfeld  *		2 --> Initialized from input_pool
775f1bb112SJason A. Donenfeld  *
785f1bb112SJason A. Donenfeld  * crng_init is protected by base_crng->lock, and only increases
795f1bb112SJason A. Donenfeld  * its value (from 0->1->2).
805f1bb112SJason A. Donenfeld  */
815f1bb112SJason A. Donenfeld static int crng_init = 0;
825f1bb112SJason A. Donenfeld #define crng_ready() (likely(crng_init > 1))
835f1bb112SJason A. Donenfeld /* Various types of waiters for crng_init->2 transition. */
845f1bb112SJason A. Donenfeld static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
855f1bb112SJason A. Donenfeld static struct fasync_struct *fasync;
865f1bb112SJason A. Donenfeld static DEFINE_SPINLOCK(random_ready_list_lock);
875f1bb112SJason A. Donenfeld static LIST_HEAD(random_ready_list);
885f1bb112SJason A. Donenfeld 
895f1bb112SJason A. Donenfeld /* Control how we warn userspace. */
905f1bb112SJason A. Donenfeld static struct ratelimit_state unseeded_warning =
915f1bb112SJason A. Donenfeld 	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
925f1bb112SJason A. Donenfeld static struct ratelimit_state urandom_warning =
935f1bb112SJason A. Donenfeld 	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
945f1bb112SJason A. Donenfeld static int ratelimit_disable __read_mostly;
955f1bb112SJason A. Donenfeld module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
965f1bb112SJason A. Donenfeld MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
975f1bb112SJason A. Donenfeld 
985f1bb112SJason A. Donenfeld /*
995f1bb112SJason A. Donenfeld  * Returns whether or not the input pool has been seeded and thus guaranteed
1005f1bb112SJason A. Donenfeld  * to supply cryptographically secure random numbers. This applies to: the
1015f1bb112SJason A. Donenfeld  * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1025f1bb112SJason A. Donenfeld  * ,u64,int,long} family of functions.
1035f1bb112SJason A. Donenfeld  *
1045f1bb112SJason A. Donenfeld  * Returns: true if the input pool has been seeded.
1055f1bb112SJason A. Donenfeld  *          false if the input pool has not been seeded.
1065f1bb112SJason A. Donenfeld  */
1075f1bb112SJason A. Donenfeld bool rng_is_initialized(void)
1085f1bb112SJason A. Donenfeld {
1095f1bb112SJason A. Donenfeld 	return crng_ready();
1105f1bb112SJason A. Donenfeld }
1115f1bb112SJason A. Donenfeld EXPORT_SYMBOL(rng_is_initialized);
1125f1bb112SJason A. Donenfeld 
1135f1bb112SJason A. Donenfeld /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
1145f1bb112SJason A. Donenfeld static void try_to_generate_entropy(void);
1155f1bb112SJason A. Donenfeld 
1165f1bb112SJason A. Donenfeld /*
1175f1bb112SJason A. Donenfeld  * Wait for the input pool to be seeded and thus guaranteed to supply
1185f1bb112SJason A. Donenfeld  * cryptographically secure random numbers. This applies to: the /dev/urandom
1195f1bb112SJason A. Donenfeld  * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1205f1bb112SJason A. Donenfeld  * family of functions. Using any of these functions without first calling
1215f1bb112SJason A. Donenfeld  * this function forfeits the guarantee of security.
1225f1bb112SJason A. Donenfeld  *
1235f1bb112SJason A. Donenfeld  * Returns: 0 if the input pool has been seeded.
1245f1bb112SJason A. Donenfeld  *          -ERESTARTSYS if the function was interrupted by a signal.
1255f1bb112SJason A. Donenfeld  */
1265f1bb112SJason A. Donenfeld int wait_for_random_bytes(void)
1275f1bb112SJason A. Donenfeld {
1285f1bb112SJason A. Donenfeld 	if (likely(crng_ready()))
1295f1bb112SJason A. Donenfeld 		return 0;
1305f1bb112SJason A. Donenfeld 
1315f1bb112SJason A. Donenfeld 	do {
1325f1bb112SJason A. Donenfeld 		int ret;
1335f1bb112SJason A. Donenfeld 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1345f1bb112SJason A. Donenfeld 		if (ret)
1355f1bb112SJason A. Donenfeld 			return ret > 0 ? 0 : ret;
1365f1bb112SJason A. Donenfeld 
1375f1bb112SJason A. Donenfeld 		try_to_generate_entropy();
1385f1bb112SJason A. Donenfeld 	} while (!crng_ready());
1395f1bb112SJason A. Donenfeld 
1405f1bb112SJason A. Donenfeld 	return 0;
1415f1bb112SJason A. Donenfeld }
1425f1bb112SJason A. Donenfeld EXPORT_SYMBOL(wait_for_random_bytes);
1435f1bb112SJason A. Donenfeld 
1445f1bb112SJason A. Donenfeld /*
1455f1bb112SJason A. Donenfeld  * Add a callback function that will be invoked when the input
1465f1bb112SJason A. Donenfeld  * pool is initialised.
1475f1bb112SJason A. Donenfeld  *
1485f1bb112SJason A. Donenfeld  * returns: 0 if callback is successfully added
1495f1bb112SJason A. Donenfeld  *	    -EALREADY if pool is already initialised (callback not called)
1505f1bb112SJason A. Donenfeld  *	    -ENOENT if module for callback is not alive
1515f1bb112SJason A. Donenfeld  */
1525f1bb112SJason A. Donenfeld int add_random_ready_callback(struct random_ready_callback *rdy)
1535f1bb112SJason A. Donenfeld {
1545f1bb112SJason A. Donenfeld 	struct module *owner;
1555f1bb112SJason A. Donenfeld 	unsigned long flags;
1565f1bb112SJason A. Donenfeld 	int err = -EALREADY;
1575f1bb112SJason A. Donenfeld 
1585f1bb112SJason A. Donenfeld 	if (crng_ready())
1595f1bb112SJason A. Donenfeld 		return err;
1605f1bb112SJason A. Donenfeld 
1615f1bb112SJason A. Donenfeld 	owner = rdy->owner;
1625f1bb112SJason A. Donenfeld 	if (!try_module_get(owner))
1635f1bb112SJason A. Donenfeld 		return -ENOENT;
1645f1bb112SJason A. Donenfeld 
1655f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
1665f1bb112SJason A. Donenfeld 	if (crng_ready())
1675f1bb112SJason A. Donenfeld 		goto out;
1685f1bb112SJason A. Donenfeld 
1695f1bb112SJason A. Donenfeld 	owner = NULL;
1705f1bb112SJason A. Donenfeld 
1715f1bb112SJason A. Donenfeld 	list_add(&rdy->list, &random_ready_list);
1725f1bb112SJason A. Donenfeld 	err = 0;
1735f1bb112SJason A. Donenfeld 
1745f1bb112SJason A. Donenfeld out:
1755f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1765f1bb112SJason A. Donenfeld 
1775f1bb112SJason A. Donenfeld 	module_put(owner);
1785f1bb112SJason A. Donenfeld 
1795f1bb112SJason A. Donenfeld 	return err;
1805f1bb112SJason A. Donenfeld }
1815f1bb112SJason A. Donenfeld EXPORT_SYMBOL(add_random_ready_callback);
1825f1bb112SJason A. Donenfeld 
1835f1bb112SJason A. Donenfeld /*
1845f1bb112SJason A. Donenfeld  * Delete a previously registered readiness callback function.
1855f1bb112SJason A. Donenfeld  */
1865f1bb112SJason A. Donenfeld void del_random_ready_callback(struct random_ready_callback *rdy)
1875f1bb112SJason A. Donenfeld {
1885f1bb112SJason A. Donenfeld 	unsigned long flags;
1895f1bb112SJason A. Donenfeld 	struct module *owner = NULL;
1905f1bb112SJason A. Donenfeld 
1915f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
1925f1bb112SJason A. Donenfeld 	if (!list_empty(&rdy->list)) {
1935f1bb112SJason A. Donenfeld 		list_del_init(&rdy->list);
1945f1bb112SJason A. Donenfeld 		owner = rdy->owner;
1955f1bb112SJason A. Donenfeld 	}
1965f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1975f1bb112SJason A. Donenfeld 
1985f1bb112SJason A. Donenfeld 	module_put(owner);
1995f1bb112SJason A. Donenfeld }
2005f1bb112SJason A. Donenfeld EXPORT_SYMBOL(del_random_ready_callback);
2015f1bb112SJason A. Donenfeld 
2025f1bb112SJason A. Donenfeld static void process_random_ready_list(void)
2035f1bb112SJason A. Donenfeld {
2045f1bb112SJason A. Donenfeld 	unsigned long flags;
2055f1bb112SJason A. Donenfeld 	struct random_ready_callback *rdy, *tmp;
2065f1bb112SJason A. Donenfeld 
2075f1bb112SJason A. Donenfeld 	spin_lock_irqsave(&random_ready_list_lock, flags);
2085f1bb112SJason A. Donenfeld 	list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
2095f1bb112SJason A. Donenfeld 		struct module *owner = rdy->owner;
2105f1bb112SJason A. Donenfeld 
2115f1bb112SJason A. Donenfeld 		list_del_init(&rdy->list);
2125f1bb112SJason A. Donenfeld 		rdy->func(rdy);
2135f1bb112SJason A. Donenfeld 		module_put(owner);
2145f1bb112SJason A. Donenfeld 	}
2155f1bb112SJason A. Donenfeld 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
2165f1bb112SJason A. Donenfeld }
2175f1bb112SJason A. Donenfeld 
2185f1bb112SJason A. Donenfeld #define warn_unseeded_randomness(previous) \
2195f1bb112SJason A. Donenfeld 	_warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
2205f1bb112SJason A. Donenfeld 
2215f1bb112SJason A. Donenfeld static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
2225f1bb112SJason A. Donenfeld {
2235f1bb112SJason A. Donenfeld #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
2245f1bb112SJason A. Donenfeld 	const bool print_once = false;
2255f1bb112SJason A. Donenfeld #else
2265f1bb112SJason A. Donenfeld 	static bool print_once __read_mostly;
2275f1bb112SJason A. Donenfeld #endif
2285f1bb112SJason A. Donenfeld 
2295f1bb112SJason A. Donenfeld 	if (print_once || crng_ready() ||
2305f1bb112SJason A. Donenfeld 	    (previous && (caller == READ_ONCE(*previous))))
2315f1bb112SJason A. Donenfeld 		return;
2325f1bb112SJason A. Donenfeld 	WRITE_ONCE(*previous, caller);
2335f1bb112SJason A. Donenfeld #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
2345f1bb112SJason A. Donenfeld 	print_once = true;
2355f1bb112SJason A. Donenfeld #endif
2365f1bb112SJason A. Donenfeld 	if (__ratelimit(&unseeded_warning))
2375f1bb112SJason A. Donenfeld 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
2385f1bb112SJason A. Donenfeld 				func_name, caller, crng_init);
2395f1bb112SJason A. Donenfeld }
2405f1bb112SJason A. Donenfeld 
2415f1bb112SJason A. Donenfeld 
2423655adc7SJason A. Donenfeld /*********************************************************************
2433655adc7SJason A. Donenfeld  *
2443655adc7SJason A. Donenfeld  * Fast key erasure RNG, the "crng".
2453655adc7SJason A. Donenfeld  *
2463655adc7SJason A. Donenfeld  * These functions expand entropy from the entropy extractor into
2473655adc7SJason A. Donenfeld  * long streams for external consumption using the "fast key erasure"
2483655adc7SJason A. Donenfeld  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
2493655adc7SJason A. Donenfeld  *
2503655adc7SJason A. Donenfeld  * There are a few exported interfaces for use by other drivers:
2513655adc7SJason A. Donenfeld  *
2523655adc7SJason A. Donenfeld  *	void get_random_bytes(void *buf, size_t nbytes)
2533655adc7SJason A. Donenfeld  *	u32 get_random_u32()
2543655adc7SJason A. Donenfeld  *	u64 get_random_u64()
2553655adc7SJason A. Donenfeld  *	unsigned int get_random_int()
2563655adc7SJason A. Donenfeld  *	unsigned long get_random_long()
2573655adc7SJason A. Donenfeld  *
2583655adc7SJason A. Donenfeld  * These interfaces will return the requested number of random bytes
2593655adc7SJason A. Donenfeld  * into the given buffer or as a return value. This is equivalent to
2603655adc7SJason A. Donenfeld  * a read from /dev/urandom. The integer family of functions may be
2613655adc7SJason A. Donenfeld  * higher performance for one-off random integers, because they do a
2623655adc7SJason A. Donenfeld  * bit of buffering.
2633655adc7SJason A. Donenfeld  *
2643655adc7SJason A. Donenfeld  *********************************************************************/
2653655adc7SJason A. Donenfeld 
2663655adc7SJason A. Donenfeld enum {
2673655adc7SJason A. Donenfeld 	CRNG_RESEED_INTERVAL = 300 * HZ,
2683655adc7SJason A. Donenfeld 	CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
2693655adc7SJason A. Donenfeld };
2703655adc7SJason A. Donenfeld 
2713655adc7SJason A. Donenfeld static struct {
2723655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
2733655adc7SJason A. Donenfeld 	unsigned long birth;
2743655adc7SJason A. Donenfeld 	unsigned long generation;
2753655adc7SJason A. Donenfeld 	spinlock_t lock;
2763655adc7SJason A. Donenfeld } base_crng = {
2773655adc7SJason A. Donenfeld 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
2783655adc7SJason A. Donenfeld };
2793655adc7SJason A. Donenfeld 
2803655adc7SJason A. Donenfeld struct crng {
2813655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE];
2823655adc7SJason A. Donenfeld 	unsigned long generation;
2833655adc7SJason A. Donenfeld 	local_lock_t lock;
2843655adc7SJason A. Donenfeld };
2853655adc7SJason A. Donenfeld 
2863655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct crng, crngs) = {
2873655adc7SJason A. Donenfeld 	.generation = ULONG_MAX,
2883655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(crngs.lock),
2893655adc7SJason A. Donenfeld };
2903655adc7SJason A. Donenfeld 
2913655adc7SJason A. Donenfeld /* Used by crng_reseed() to extract a new seed from the input pool. */
2923655adc7SJason A. Donenfeld static bool drain_entropy(void *buf, size_t nbytes);
2933655adc7SJason A. Donenfeld 
2943655adc7SJason A. Donenfeld /*
2953655adc7SJason A. Donenfeld  * This extracts a new crng key from the input pool, but only if there is a
2963655adc7SJason A. Donenfeld  * sufficient amount of entropy available, in order to mitigate bruteforcing
2973655adc7SJason A. Donenfeld  * of newly added bits.
2983655adc7SJason A. Donenfeld  */
2993655adc7SJason A. Donenfeld static void crng_reseed(void)
3003655adc7SJason A. Donenfeld {
3013655adc7SJason A. Donenfeld 	unsigned long flags;
3023655adc7SJason A. Donenfeld 	unsigned long next_gen;
3033655adc7SJason A. Donenfeld 	u8 key[CHACHA_KEY_SIZE];
3043655adc7SJason A. Donenfeld 	bool finalize_init = false;
3053655adc7SJason A. Donenfeld 
3063655adc7SJason A. Donenfeld 	/* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
3073655adc7SJason A. Donenfeld 	if (!drain_entropy(key, sizeof(key)))
3083655adc7SJason A. Donenfeld 		return;
3093655adc7SJason A. Donenfeld 
3103655adc7SJason A. Donenfeld 	/*
3113655adc7SJason A. Donenfeld 	 * We copy the new key into the base_crng, overwriting the old one,
3123655adc7SJason A. Donenfeld 	 * and update the generation counter. We avoid hitting ULONG_MAX,
3133655adc7SJason A. Donenfeld 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
3143655adc7SJason A. Donenfeld 	 * forces new CPUs that come online to always initialize.
3153655adc7SJason A. Donenfeld 	 */
3163655adc7SJason A. Donenfeld 	spin_lock_irqsave(&base_crng.lock, flags);
3173655adc7SJason A. Donenfeld 	memcpy(base_crng.key, key, sizeof(base_crng.key));
3183655adc7SJason A. Donenfeld 	next_gen = base_crng.generation + 1;
3193655adc7SJason A. Donenfeld 	if (next_gen == ULONG_MAX)
3203655adc7SJason A. Donenfeld 		++next_gen;
3213655adc7SJason A. Donenfeld 	WRITE_ONCE(base_crng.generation, next_gen);
3223655adc7SJason A. Donenfeld 	WRITE_ONCE(base_crng.birth, jiffies);
3233655adc7SJason A. Donenfeld 	if (crng_init < 2) {
3243655adc7SJason A. Donenfeld 		crng_init = 2;
3253655adc7SJason A. Donenfeld 		finalize_init = true;
3263655adc7SJason A. Donenfeld 	}
3273655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
3283655adc7SJason A. Donenfeld 	memzero_explicit(key, sizeof(key));
3293655adc7SJason A. Donenfeld 	if (finalize_init) {
3303655adc7SJason A. Donenfeld 		process_random_ready_list();
3313655adc7SJason A. Donenfeld 		wake_up_interruptible(&crng_init_wait);
3323655adc7SJason A. Donenfeld 		kill_fasync(&fasync, SIGIO, POLL_IN);
3333655adc7SJason A. Donenfeld 		pr_notice("crng init done\n");
3343655adc7SJason A. Donenfeld 		if (unseeded_warning.missed) {
3353655adc7SJason A. Donenfeld 			pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
3363655adc7SJason A. Donenfeld 				  unseeded_warning.missed);
3373655adc7SJason A. Donenfeld 			unseeded_warning.missed = 0;
3383655adc7SJason A. Donenfeld 		}
3393655adc7SJason A. Donenfeld 		if (urandom_warning.missed) {
3403655adc7SJason A. Donenfeld 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
3413655adc7SJason A. Donenfeld 				  urandom_warning.missed);
3423655adc7SJason A. Donenfeld 			urandom_warning.missed = 0;
3433655adc7SJason A. Donenfeld 		}
3443655adc7SJason A. Donenfeld 	}
3453655adc7SJason A. Donenfeld }
3463655adc7SJason A. Donenfeld 
3473655adc7SJason A. Donenfeld /*
3483655adc7SJason A. Donenfeld  * This generates a ChaCha block using the provided key, and then
3493655adc7SJason A. Donenfeld  * immediately overwites that key with half the block. It returns
3503655adc7SJason A. Donenfeld  * the resultant ChaCha state to the user, along with the second
3513655adc7SJason A. Donenfeld  * half of the block containing 32 bytes of random data that may
3523655adc7SJason A. Donenfeld  * be used; random_data_len may not be greater than 32.
3533655adc7SJason A. Donenfeld  */
3543655adc7SJason A. Donenfeld static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
3553655adc7SJason A. Donenfeld 				  u32 chacha_state[CHACHA_STATE_WORDS],
3563655adc7SJason A. Donenfeld 				  u8 *random_data, size_t random_data_len)
3573655adc7SJason A. Donenfeld {
3583655adc7SJason A. Donenfeld 	u8 first_block[CHACHA_BLOCK_SIZE];
3593655adc7SJason A. Donenfeld 
3603655adc7SJason A. Donenfeld 	BUG_ON(random_data_len > 32);
3613655adc7SJason A. Donenfeld 
3623655adc7SJason A. Donenfeld 	chacha_init_consts(chacha_state);
3633655adc7SJason A. Donenfeld 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
3643655adc7SJason A. Donenfeld 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
3653655adc7SJason A. Donenfeld 	chacha20_block(chacha_state, first_block);
3663655adc7SJason A. Donenfeld 
3673655adc7SJason A. Donenfeld 	memcpy(key, first_block, CHACHA_KEY_SIZE);
3683655adc7SJason A. Donenfeld 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
3693655adc7SJason A. Donenfeld 	memzero_explicit(first_block, sizeof(first_block));
3703655adc7SJason A. Donenfeld }
3713655adc7SJason A. Donenfeld 
3723655adc7SJason A. Donenfeld /*
3733655adc7SJason A. Donenfeld  * This function returns a ChaCha state that you may use for generating
3743655adc7SJason A. Donenfeld  * random data. It also returns up to 32 bytes on its own of random data
3753655adc7SJason A. Donenfeld  * that may be used; random_data_len may not be greater than 32.
3763655adc7SJason A. Donenfeld  */
3773655adc7SJason A. Donenfeld static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
3783655adc7SJason A. Donenfeld 			    u8 *random_data, size_t random_data_len)
3793655adc7SJason A. Donenfeld {
3803655adc7SJason A. Donenfeld 	unsigned long flags;
3813655adc7SJason A. Donenfeld 	struct crng *crng;
3823655adc7SJason A. Donenfeld 
3833655adc7SJason A. Donenfeld 	BUG_ON(random_data_len > 32);
3843655adc7SJason A. Donenfeld 
3853655adc7SJason A. Donenfeld 	/*
3863655adc7SJason A. Donenfeld 	 * For the fast path, we check whether we're ready, unlocked first, and
3873655adc7SJason A. Donenfeld 	 * then re-check once locked later. In the case where we're really not
3883655adc7SJason A. Donenfeld 	 * ready, we do fast key erasure with the base_crng directly, because
3893655adc7SJason A. Donenfeld 	 * this is what crng_{fast,slow}_load mutate during early init.
3903655adc7SJason A. Donenfeld 	 */
3913655adc7SJason A. Donenfeld 	if (unlikely(!crng_ready())) {
3923655adc7SJason A. Donenfeld 		bool ready;
3933655adc7SJason A. Donenfeld 
3943655adc7SJason A. Donenfeld 		spin_lock_irqsave(&base_crng.lock, flags);
3953655adc7SJason A. Donenfeld 		ready = crng_ready();
3963655adc7SJason A. Donenfeld 		if (!ready)
3973655adc7SJason A. Donenfeld 			crng_fast_key_erasure(base_crng.key, chacha_state,
3983655adc7SJason A. Donenfeld 					      random_data, random_data_len);
3993655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4003655adc7SJason A. Donenfeld 		if (!ready)
4013655adc7SJason A. Donenfeld 			return;
4023655adc7SJason A. Donenfeld 	}
4033655adc7SJason A. Donenfeld 
4043655adc7SJason A. Donenfeld 	/*
4053655adc7SJason A. Donenfeld 	 * If the base_crng is more than 5 minutes old, we reseed, which
4063655adc7SJason A. Donenfeld 	 * in turn bumps the generation counter that we check below.
4073655adc7SJason A. Donenfeld 	 */
4083655adc7SJason A. Donenfeld 	if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL)))
4093655adc7SJason A. Donenfeld 		crng_reseed();
4103655adc7SJason A. Donenfeld 
4113655adc7SJason A. Donenfeld 	local_lock_irqsave(&crngs.lock, flags);
4123655adc7SJason A. Donenfeld 	crng = raw_cpu_ptr(&crngs);
4133655adc7SJason A. Donenfeld 
4143655adc7SJason A. Donenfeld 	/*
4153655adc7SJason A. Donenfeld 	 * If our per-cpu crng is older than the base_crng, then it means
4163655adc7SJason A. Donenfeld 	 * somebody reseeded the base_crng. In that case, we do fast key
4173655adc7SJason A. Donenfeld 	 * erasure on the base_crng, and use its output as the new key
4183655adc7SJason A. Donenfeld 	 * for our per-cpu crng. This brings us up to date with base_crng.
4193655adc7SJason A. Donenfeld 	 */
4203655adc7SJason A. Donenfeld 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
4213655adc7SJason A. Donenfeld 		spin_lock(&base_crng.lock);
4223655adc7SJason A. Donenfeld 		crng_fast_key_erasure(base_crng.key, chacha_state,
4233655adc7SJason A. Donenfeld 				      crng->key, sizeof(crng->key));
4243655adc7SJason A. Donenfeld 		crng->generation = base_crng.generation;
4253655adc7SJason A. Donenfeld 		spin_unlock(&base_crng.lock);
4263655adc7SJason A. Donenfeld 	}
4273655adc7SJason A. Donenfeld 
4283655adc7SJason A. Donenfeld 	/*
4293655adc7SJason A. Donenfeld 	 * Finally, when we've made it this far, our per-cpu crng has an up
4303655adc7SJason A. Donenfeld 	 * to date key, and we can do fast key erasure with it to produce
4313655adc7SJason A. Donenfeld 	 * some random data and a ChaCha state for the caller. All other
4323655adc7SJason A. Donenfeld 	 * branches of this function are "unlikely", so most of the time we
4333655adc7SJason A. Donenfeld 	 * should wind up here immediately.
4343655adc7SJason A. Donenfeld 	 */
4353655adc7SJason A. Donenfeld 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
4363655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&crngs.lock, flags);
4373655adc7SJason A. Donenfeld }
4383655adc7SJason A. Donenfeld 
4393655adc7SJason A. Donenfeld /*
4403655adc7SJason A. Donenfeld  * This function is for crng_init == 0 only.
4413655adc7SJason A. Donenfeld  *
4423655adc7SJason A. Donenfeld  * crng_fast_load() can be called by code in the interrupt service
4433655adc7SJason A. Donenfeld  * path.  So we can't afford to dilly-dally. Returns the number of
4443655adc7SJason A. Donenfeld  * bytes processed from cp.
4453655adc7SJason A. Donenfeld  */
4463655adc7SJason A. Donenfeld static size_t crng_fast_load(const void *cp, size_t len)
4473655adc7SJason A. Donenfeld {
4483655adc7SJason A. Donenfeld 	static int crng_init_cnt = 0;
4493655adc7SJason A. Donenfeld 	unsigned long flags;
4503655adc7SJason A. Donenfeld 	const u8 *src = (const u8 *)cp;
4513655adc7SJason A. Donenfeld 	size_t ret = 0;
4523655adc7SJason A. Donenfeld 
4533655adc7SJason A. Donenfeld 	if (!spin_trylock_irqsave(&base_crng.lock, flags))
4543655adc7SJason A. Donenfeld 		return 0;
4553655adc7SJason A. Donenfeld 	if (crng_init != 0) {
4563655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4573655adc7SJason A. Donenfeld 		return 0;
4583655adc7SJason A. Donenfeld 	}
4593655adc7SJason A. Donenfeld 	while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
4603655adc7SJason A. Donenfeld 		base_crng.key[crng_init_cnt % sizeof(base_crng.key)] ^= *src;
4613655adc7SJason A. Donenfeld 		src++; crng_init_cnt++; len--; ret++;
4623655adc7SJason A. Donenfeld 	}
4633655adc7SJason A. Donenfeld 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
4643655adc7SJason A. Donenfeld 		++base_crng.generation;
4653655adc7SJason A. Donenfeld 		crng_init = 1;
4663655adc7SJason A. Donenfeld 	}
4673655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
4683655adc7SJason A. Donenfeld 	if (crng_init == 1)
4693655adc7SJason A. Donenfeld 		pr_notice("fast init done\n");
4703655adc7SJason A. Donenfeld 	return ret;
4713655adc7SJason A. Donenfeld }
4723655adc7SJason A. Donenfeld 
4733655adc7SJason A. Donenfeld /*
4743655adc7SJason A. Donenfeld  * This function is for crng_init == 0 only.
4753655adc7SJason A. Donenfeld  *
4763655adc7SJason A. Donenfeld  * crng_slow_load() is called by add_device_randomness, which has two
4773655adc7SJason A. Donenfeld  * attributes.  (1) We can't trust the buffer passed to it is
4783655adc7SJason A. Donenfeld  * guaranteed to be unpredictable (so it might not have any entropy at
4793655adc7SJason A. Donenfeld  * all), and (2) it doesn't have the performance constraints of
4803655adc7SJason A. Donenfeld  * crng_fast_load().
4813655adc7SJason A. Donenfeld  *
4823655adc7SJason A. Donenfeld  * So, we simply hash the contents in with the current key. Finally,
4833655adc7SJason A. Donenfeld  * we do *not* advance crng_init_cnt since buffer we may get may be
4843655adc7SJason A. Donenfeld  * something like a fixed DMI table (for example), which might very
4853655adc7SJason A. Donenfeld  * well be unique to the machine, but is otherwise unvarying.
4863655adc7SJason A. Donenfeld  */
4873655adc7SJason A. Donenfeld static void crng_slow_load(const void *cp, size_t len)
4883655adc7SJason A. Donenfeld {
4893655adc7SJason A. Donenfeld 	unsigned long flags;
4903655adc7SJason A. Donenfeld 	struct blake2s_state hash;
4913655adc7SJason A. Donenfeld 
4923655adc7SJason A. Donenfeld 	blake2s_init(&hash, sizeof(base_crng.key));
4933655adc7SJason A. Donenfeld 
4943655adc7SJason A. Donenfeld 	if (!spin_trylock_irqsave(&base_crng.lock, flags))
4953655adc7SJason A. Donenfeld 		return;
4963655adc7SJason A. Donenfeld 	if (crng_init != 0) {
4973655adc7SJason A. Donenfeld 		spin_unlock_irqrestore(&base_crng.lock, flags);
4983655adc7SJason A. Donenfeld 		return;
4993655adc7SJason A. Donenfeld 	}
5003655adc7SJason A. Donenfeld 
5013655adc7SJason A. Donenfeld 	blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
5023655adc7SJason A. Donenfeld 	blake2s_update(&hash, cp, len);
5033655adc7SJason A. Donenfeld 	blake2s_final(&hash, base_crng.key);
5043655adc7SJason A. Donenfeld 
5053655adc7SJason A. Donenfeld 	spin_unlock_irqrestore(&base_crng.lock, flags);
5063655adc7SJason A. Donenfeld }
5073655adc7SJason A. Donenfeld 
5083655adc7SJason A. Donenfeld static void _get_random_bytes(void *buf, size_t nbytes)
5093655adc7SJason A. Donenfeld {
5103655adc7SJason A. Donenfeld 	u32 chacha_state[CHACHA_STATE_WORDS];
5113655adc7SJason A. Donenfeld 	u8 tmp[CHACHA_BLOCK_SIZE];
5123655adc7SJason A. Donenfeld 	size_t len;
5133655adc7SJason A. Donenfeld 
5143655adc7SJason A. Donenfeld 	if (!nbytes)
5153655adc7SJason A. Donenfeld 		return;
5163655adc7SJason A. Donenfeld 
5173655adc7SJason A. Donenfeld 	len = min_t(size_t, 32, nbytes);
5183655adc7SJason A. Donenfeld 	crng_make_state(chacha_state, buf, len);
5193655adc7SJason A. Donenfeld 	nbytes -= len;
5203655adc7SJason A. Donenfeld 	buf += len;
5213655adc7SJason A. Donenfeld 
5223655adc7SJason A. Donenfeld 	while (nbytes) {
5233655adc7SJason A. Donenfeld 		if (nbytes < CHACHA_BLOCK_SIZE) {
5243655adc7SJason A. Donenfeld 			chacha20_block(chacha_state, tmp);
5253655adc7SJason A. Donenfeld 			memcpy(buf, tmp, nbytes);
5263655adc7SJason A. Donenfeld 			memzero_explicit(tmp, sizeof(tmp));
5273655adc7SJason A. Donenfeld 			break;
5283655adc7SJason A. Donenfeld 		}
5293655adc7SJason A. Donenfeld 
5303655adc7SJason A. Donenfeld 		chacha20_block(chacha_state, buf);
5313655adc7SJason A. Donenfeld 		if (unlikely(chacha_state[12] == 0))
5323655adc7SJason A. Donenfeld 			++chacha_state[13];
5333655adc7SJason A. Donenfeld 		nbytes -= CHACHA_BLOCK_SIZE;
5343655adc7SJason A. Donenfeld 		buf += CHACHA_BLOCK_SIZE;
5353655adc7SJason A. Donenfeld 	}
5363655adc7SJason A. Donenfeld 
5373655adc7SJason A. Donenfeld 	memzero_explicit(chacha_state, sizeof(chacha_state));
5383655adc7SJason A. Donenfeld }
5393655adc7SJason A. Donenfeld 
5403655adc7SJason A. Donenfeld /*
5413655adc7SJason A. Donenfeld  * This function is the exported kernel interface.  It returns some
5423655adc7SJason A. Donenfeld  * number of good random numbers, suitable for key generation, seeding
5433655adc7SJason A. Donenfeld  * TCP sequence numbers, etc.  It does not rely on the hardware random
5443655adc7SJason A. Donenfeld  * number generator.  For random bytes direct from the hardware RNG
5453655adc7SJason A. Donenfeld  * (when available), use get_random_bytes_arch(). In order to ensure
5463655adc7SJason A. Donenfeld  * that the randomness provided by this function is okay, the function
5473655adc7SJason A. Donenfeld  * wait_for_random_bytes() should be called and return 0 at least once
5483655adc7SJason A. Donenfeld  * at any point prior.
5493655adc7SJason A. Donenfeld  */
5503655adc7SJason A. Donenfeld void get_random_bytes(void *buf, size_t nbytes)
5513655adc7SJason A. Donenfeld {
5523655adc7SJason A. Donenfeld 	static void *previous;
5533655adc7SJason A. Donenfeld 
5543655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
5553655adc7SJason A. Donenfeld 	_get_random_bytes(buf, nbytes);
5563655adc7SJason A. Donenfeld }
5573655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_bytes);
5583655adc7SJason A. Donenfeld 
5593655adc7SJason A. Donenfeld static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
5603655adc7SJason A. Donenfeld {
5613655adc7SJason A. Donenfeld 	bool large_request = nbytes > 256;
5623655adc7SJason A. Donenfeld 	ssize_t ret = 0;
5633655adc7SJason A. Donenfeld 	size_t len;
5643655adc7SJason A. Donenfeld 	u32 chacha_state[CHACHA_STATE_WORDS];
5653655adc7SJason A. Donenfeld 	u8 output[CHACHA_BLOCK_SIZE];
5663655adc7SJason A. Donenfeld 
5673655adc7SJason A. Donenfeld 	if (!nbytes)
5683655adc7SJason A. Donenfeld 		return 0;
5693655adc7SJason A. Donenfeld 
5703655adc7SJason A. Donenfeld 	len = min_t(size_t, 32, nbytes);
5713655adc7SJason A. Donenfeld 	crng_make_state(chacha_state, output, len);
5723655adc7SJason A. Donenfeld 
5733655adc7SJason A. Donenfeld 	if (copy_to_user(buf, output, len))
5743655adc7SJason A. Donenfeld 		return -EFAULT;
5753655adc7SJason A. Donenfeld 	nbytes -= len;
5763655adc7SJason A. Donenfeld 	buf += len;
5773655adc7SJason A. Donenfeld 	ret += len;
5783655adc7SJason A. Donenfeld 
5793655adc7SJason A. Donenfeld 	while (nbytes) {
5803655adc7SJason A. Donenfeld 		if (large_request && need_resched()) {
5813655adc7SJason A. Donenfeld 			if (signal_pending(current))
5823655adc7SJason A. Donenfeld 				break;
5833655adc7SJason A. Donenfeld 			schedule();
5843655adc7SJason A. Donenfeld 		}
5853655adc7SJason A. Donenfeld 
5863655adc7SJason A. Donenfeld 		chacha20_block(chacha_state, output);
5873655adc7SJason A. Donenfeld 		if (unlikely(chacha_state[12] == 0))
5883655adc7SJason A. Donenfeld 			++chacha_state[13];
5893655adc7SJason A. Donenfeld 
5903655adc7SJason A. Donenfeld 		len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
5913655adc7SJason A. Donenfeld 		if (copy_to_user(buf, output, len)) {
5923655adc7SJason A. Donenfeld 			ret = -EFAULT;
5933655adc7SJason A. Donenfeld 			break;
5943655adc7SJason A. Donenfeld 		}
5953655adc7SJason A. Donenfeld 
5963655adc7SJason A. Donenfeld 		nbytes -= len;
5973655adc7SJason A. Donenfeld 		buf += len;
5983655adc7SJason A. Donenfeld 		ret += len;
5993655adc7SJason A. Donenfeld 	}
6003655adc7SJason A. Donenfeld 
6013655adc7SJason A. Donenfeld 	memzero_explicit(chacha_state, sizeof(chacha_state));
6023655adc7SJason A. Donenfeld 	memzero_explicit(output, sizeof(output));
6033655adc7SJason A. Donenfeld 	return ret;
6043655adc7SJason A. Donenfeld }
6053655adc7SJason A. Donenfeld 
6063655adc7SJason A. Donenfeld /*
6073655adc7SJason A. Donenfeld  * Batched entropy returns random integers. The quality of the random
6083655adc7SJason A. Donenfeld  * number is good as /dev/urandom. In order to ensure that the randomness
6093655adc7SJason A. Donenfeld  * provided by this function is okay, the function wait_for_random_bytes()
6103655adc7SJason A. Donenfeld  * should be called and return 0 at least once at any point prior.
6113655adc7SJason A. Donenfeld  */
6123655adc7SJason A. Donenfeld struct batched_entropy {
6133655adc7SJason A. Donenfeld 	union {
6143655adc7SJason A. Donenfeld 		/*
6153655adc7SJason A. Donenfeld 		 * We make this 1.5x a ChaCha block, so that we get the
6163655adc7SJason A. Donenfeld 		 * remaining 32 bytes from fast key erasure, plus one full
6173655adc7SJason A. Donenfeld 		 * block from the detached ChaCha state. We can increase
6183655adc7SJason A. Donenfeld 		 * the size of this later if needed so long as we keep the
6193655adc7SJason A. Donenfeld 		 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
6203655adc7SJason A. Donenfeld 		 */
6213655adc7SJason A. Donenfeld 		u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
6223655adc7SJason A. Donenfeld 		u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
6233655adc7SJason A. Donenfeld 	};
6243655adc7SJason A. Donenfeld 	local_lock_t lock;
6253655adc7SJason A. Donenfeld 	unsigned long generation;
6263655adc7SJason A. Donenfeld 	unsigned int position;
6273655adc7SJason A. Donenfeld };
6283655adc7SJason A. Donenfeld 
6293655adc7SJason A. Donenfeld 
6303655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
6313655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
6323655adc7SJason A. Donenfeld 	.position = UINT_MAX
6333655adc7SJason A. Donenfeld };
6343655adc7SJason A. Donenfeld 
6353655adc7SJason A. Donenfeld u64 get_random_u64(void)
6363655adc7SJason A. Donenfeld {
6373655adc7SJason A. Donenfeld 	u64 ret;
6383655adc7SJason A. Donenfeld 	unsigned long flags;
6393655adc7SJason A. Donenfeld 	struct batched_entropy *batch;
6403655adc7SJason A. Donenfeld 	static void *previous;
6413655adc7SJason A. Donenfeld 	unsigned long next_gen;
6423655adc7SJason A. Donenfeld 
6433655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
6443655adc7SJason A. Donenfeld 
6453655adc7SJason A. Donenfeld 	local_lock_irqsave(&batched_entropy_u64.lock, flags);
6463655adc7SJason A. Donenfeld 	batch = raw_cpu_ptr(&batched_entropy_u64);
6473655adc7SJason A. Donenfeld 
6483655adc7SJason A. Donenfeld 	next_gen = READ_ONCE(base_crng.generation);
6493655adc7SJason A. Donenfeld 	if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
6503655adc7SJason A. Donenfeld 	    next_gen != batch->generation) {
6513655adc7SJason A. Donenfeld 		_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
6523655adc7SJason A. Donenfeld 		batch->position = 0;
6533655adc7SJason A. Donenfeld 		batch->generation = next_gen;
6543655adc7SJason A. Donenfeld 	}
6553655adc7SJason A. Donenfeld 
6563655adc7SJason A. Donenfeld 	ret = batch->entropy_u64[batch->position];
6573655adc7SJason A. Donenfeld 	batch->entropy_u64[batch->position] = 0;
6583655adc7SJason A. Donenfeld 	++batch->position;
6593655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
6603655adc7SJason A. Donenfeld 	return ret;
6613655adc7SJason A. Donenfeld }
6623655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_u64);
6633655adc7SJason A. Donenfeld 
6643655adc7SJason A. Donenfeld static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
6653655adc7SJason A. Donenfeld 	.lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
6663655adc7SJason A. Donenfeld 	.position = UINT_MAX
6673655adc7SJason A. Donenfeld };
6683655adc7SJason A. Donenfeld 
6693655adc7SJason A. Donenfeld u32 get_random_u32(void)
6703655adc7SJason A. Donenfeld {
6713655adc7SJason A. Donenfeld 	u32 ret;
6723655adc7SJason A. Donenfeld 	unsigned long flags;
6733655adc7SJason A. Donenfeld 	struct batched_entropy *batch;
6743655adc7SJason A. Donenfeld 	static void *previous;
6753655adc7SJason A. Donenfeld 	unsigned long next_gen;
6763655adc7SJason A. Donenfeld 
6773655adc7SJason A. Donenfeld 	warn_unseeded_randomness(&previous);
6783655adc7SJason A. Donenfeld 
6793655adc7SJason A. Donenfeld 	local_lock_irqsave(&batched_entropy_u32.lock, flags);
6803655adc7SJason A. Donenfeld 	batch = raw_cpu_ptr(&batched_entropy_u32);
6813655adc7SJason A. Donenfeld 
6823655adc7SJason A. Donenfeld 	next_gen = READ_ONCE(base_crng.generation);
6833655adc7SJason A. Donenfeld 	if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
6843655adc7SJason A. Donenfeld 	    next_gen != batch->generation) {
6853655adc7SJason A. Donenfeld 		_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
6863655adc7SJason A. Donenfeld 		batch->position = 0;
6873655adc7SJason A. Donenfeld 		batch->generation = next_gen;
6883655adc7SJason A. Donenfeld 	}
6893655adc7SJason A. Donenfeld 
6903655adc7SJason A. Donenfeld 	ret = batch->entropy_u32[batch->position];
6913655adc7SJason A. Donenfeld 	batch->entropy_u32[batch->position] = 0;
6923655adc7SJason A. Donenfeld 	++batch->position;
6933655adc7SJason A. Donenfeld 	local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
6943655adc7SJason A. Donenfeld 	return ret;
6953655adc7SJason A. Donenfeld }
6963655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_u32);
6973655adc7SJason A. Donenfeld 
6983655adc7SJason A. Donenfeld /**
6993655adc7SJason A. Donenfeld  * randomize_page - Generate a random, page aligned address
7003655adc7SJason A. Donenfeld  * @start:	The smallest acceptable address the caller will take.
7013655adc7SJason A. Donenfeld  * @range:	The size of the area, starting at @start, within which the
7023655adc7SJason A. Donenfeld  *		random address must fall.
7033655adc7SJason A. Donenfeld  *
7043655adc7SJason A. Donenfeld  * If @start + @range would overflow, @range is capped.
7053655adc7SJason A. Donenfeld  *
7063655adc7SJason A. Donenfeld  * NOTE: Historical use of randomize_range, which this replaces, presumed that
7073655adc7SJason A. Donenfeld  * @start was already page aligned.  We now align it regardless.
7083655adc7SJason A. Donenfeld  *
7093655adc7SJason A. Donenfeld  * Return: A page aligned address within [start, start + range).  On error,
7103655adc7SJason A. Donenfeld  * @start is returned.
7113655adc7SJason A. Donenfeld  */
7123655adc7SJason A. Donenfeld unsigned long randomize_page(unsigned long start, unsigned long range)
7133655adc7SJason A. Donenfeld {
7143655adc7SJason A. Donenfeld 	if (!PAGE_ALIGNED(start)) {
7153655adc7SJason A. Donenfeld 		range -= PAGE_ALIGN(start) - start;
7163655adc7SJason A. Donenfeld 		start = PAGE_ALIGN(start);
7173655adc7SJason A. Donenfeld 	}
7183655adc7SJason A. Donenfeld 
7193655adc7SJason A. Donenfeld 	if (start > ULONG_MAX - range)
7203655adc7SJason A. Donenfeld 		range = ULONG_MAX - start;
7213655adc7SJason A. Donenfeld 
7223655adc7SJason A. Donenfeld 	range >>= PAGE_SHIFT;
7233655adc7SJason A. Donenfeld 
7243655adc7SJason A. Donenfeld 	if (range == 0)
7253655adc7SJason A. Donenfeld 		return start;
7263655adc7SJason A. Donenfeld 
7273655adc7SJason A. Donenfeld 	return start + (get_random_long() % range << PAGE_SHIFT);
7283655adc7SJason A. Donenfeld }
7293655adc7SJason A. Donenfeld 
7303655adc7SJason A. Donenfeld /*
7313655adc7SJason A. Donenfeld  * This function will use the architecture-specific hardware random
7323655adc7SJason A. Donenfeld  * number generator if it is available. It is not recommended for
7333655adc7SJason A. Donenfeld  * use. Use get_random_bytes() instead. It returns the number of
7343655adc7SJason A. Donenfeld  * bytes filled in.
7353655adc7SJason A. Donenfeld  */
7363655adc7SJason A. Donenfeld size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
7373655adc7SJason A. Donenfeld {
7383655adc7SJason A. Donenfeld 	size_t left = nbytes;
7393655adc7SJason A. Donenfeld 	u8 *p = buf;
7403655adc7SJason A. Donenfeld 
7413655adc7SJason A. Donenfeld 	while (left) {
7423655adc7SJason A. Donenfeld 		unsigned long v;
7433655adc7SJason A. Donenfeld 		size_t chunk = min_t(size_t, left, sizeof(unsigned long));
7443655adc7SJason A. Donenfeld 
7453655adc7SJason A. Donenfeld 		if (!arch_get_random_long(&v))
7463655adc7SJason A. Donenfeld 			break;
7473655adc7SJason A. Donenfeld 
7483655adc7SJason A. Donenfeld 		memcpy(p, &v, chunk);
7493655adc7SJason A. Donenfeld 		p += chunk;
7503655adc7SJason A. Donenfeld 		left -= chunk;
7513655adc7SJason A. Donenfeld 	}
7523655adc7SJason A. Donenfeld 
7533655adc7SJason A. Donenfeld 	return nbytes - left;
7543655adc7SJason A. Donenfeld }
7553655adc7SJason A. Donenfeld EXPORT_SYMBOL(get_random_bytes_arch);
7563655adc7SJason A. Donenfeld 
757a5ed7cb1SJason A. Donenfeld 
758a5ed7cb1SJason A. Donenfeld /**********************************************************************
759a5ed7cb1SJason A. Donenfeld  *
760a5ed7cb1SJason A. Donenfeld  * Entropy accumulation and extraction routines.
761a5ed7cb1SJason A. Donenfeld  *
762a5ed7cb1SJason A. Donenfeld  * Callers may add entropy via:
763a5ed7cb1SJason A. Donenfeld  *
764a5ed7cb1SJason A. Donenfeld  *     static void mix_pool_bytes(const void *in, size_t nbytes)
765a5ed7cb1SJason A. Donenfeld  *
766a5ed7cb1SJason A. Donenfeld  * After which, if added entropy should be credited:
767a5ed7cb1SJason A. Donenfeld  *
768a5ed7cb1SJason A. Donenfeld  *     static void credit_entropy_bits(size_t nbits)
769a5ed7cb1SJason A. Donenfeld  *
770a5ed7cb1SJason A. Donenfeld  * Finally, extract entropy via these two, with the latter one
771a5ed7cb1SJason A. Donenfeld  * setting the entropy count to zero and extracting only if there
772a5ed7cb1SJason A. Donenfeld  * is POOL_MIN_BITS entropy credited prior:
773a5ed7cb1SJason A. Donenfeld  *
774a5ed7cb1SJason A. Donenfeld  *     static void extract_entropy(void *buf, size_t nbytes)
775a5ed7cb1SJason A. Donenfeld  *     static bool drain_entropy(void *buf, size_t nbytes)
776a5ed7cb1SJason A. Donenfeld  *
777a5ed7cb1SJason A. Donenfeld  **********************************************************************/
778a5ed7cb1SJason A. Donenfeld 
779c5704490SJason A. Donenfeld enum {
7806e8ec255SJason A. Donenfeld 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
781c5704490SJason A. Donenfeld 	POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
7821da177e4SLinus Torvalds };
7831da177e4SLinus Torvalds 
784a5ed7cb1SJason A. Donenfeld /* For notifying userspace should write into /dev/random. */
785a11e1d43SLinus Torvalds static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
7861da177e4SLinus Torvalds 
78790ed1e67SJason A. Donenfeld static struct {
7886e8ec255SJason A. Donenfeld 	struct blake2s_state hash;
78943358209SMatt Mackall 	spinlock_t lock;
79004ec96b7SJason A. Donenfeld 	unsigned int entropy_count;
79190ed1e67SJason A. Donenfeld } input_pool = {
7926e8ec255SJason A. Donenfeld 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
7936e8ec255SJason A. Donenfeld 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
7946e8ec255SJason A. Donenfeld 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
7956e8ec255SJason A. Donenfeld 	.hash.outlen = BLAKE2S_HASH_SIZE,
796eece09ecSThomas Gleixner 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
7971da177e4SLinus Torvalds };
7981da177e4SLinus Torvalds 
799a5ed7cb1SJason A. Donenfeld static void _mix_pool_bytes(const void *in, size_t nbytes)
800a5ed7cb1SJason A. Donenfeld {
801a5ed7cb1SJason A. Donenfeld 	blake2s_update(&input_pool.hash, in, nbytes);
802a5ed7cb1SJason A. Donenfeld }
80390ed1e67SJason A. Donenfeld 
8041da177e4SLinus Torvalds /*
805e68e5b66SMatt Mackall  * This function adds bytes into the entropy "pool".  It does not
8061da177e4SLinus Torvalds  * update the entropy estimate.  The caller should call
807adc782daSMatt Mackall  * credit_entropy_bits if this is appropriate.
8081da177e4SLinus Torvalds  */
80904ec96b7SJason A. Donenfeld static void mix_pool_bytes(const void *in, size_t nbytes)
8101da177e4SLinus Torvalds {
811902c098aSTheodore Ts'o 	unsigned long flags;
812902c098aSTheodore Ts'o 
81390ed1e67SJason A. Donenfeld 	spin_lock_irqsave(&input_pool.lock, flags);
81490ed1e67SJason A. Donenfeld 	_mix_pool_bytes(in, nbytes);
81590ed1e67SJason A. Donenfeld 	spin_unlock_irqrestore(&input_pool.lock, flags);
8161da177e4SLinus Torvalds }
8171da177e4SLinus Torvalds 
818a5ed7cb1SJason A. Donenfeld static void credit_entropy_bits(size_t nbits)
819a5ed7cb1SJason A. Donenfeld {
820a5ed7cb1SJason A. Donenfeld 	unsigned int entropy_count, orig, add;
821a5ed7cb1SJason A. Donenfeld 
822a5ed7cb1SJason A. Donenfeld 	if (!nbits)
823a5ed7cb1SJason A. Donenfeld 		return;
824a5ed7cb1SJason A. Donenfeld 
825a5ed7cb1SJason A. Donenfeld 	add = min_t(size_t, nbits, POOL_BITS);
826a5ed7cb1SJason A. Donenfeld 
827a5ed7cb1SJason A. Donenfeld 	do {
828a5ed7cb1SJason A. Donenfeld 		orig = READ_ONCE(input_pool.entropy_count);
829a5ed7cb1SJason A. Donenfeld 		entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
830a5ed7cb1SJason A. Donenfeld 	} while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
831a5ed7cb1SJason A. Donenfeld 
832a5ed7cb1SJason A. Donenfeld 	if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
833a5ed7cb1SJason A. Donenfeld 		crng_reseed();
834a5ed7cb1SJason A. Donenfeld }
835a5ed7cb1SJason A. Donenfeld 
836a5ed7cb1SJason A. Donenfeld /*
837a5ed7cb1SJason A. Donenfeld  * This is an HKDF-like construction for using the hashed collected entropy
838a5ed7cb1SJason A. Donenfeld  * as a PRF key, that's then expanded block-by-block.
839a5ed7cb1SJason A. Donenfeld  */
840a5ed7cb1SJason A. Donenfeld static void extract_entropy(void *buf, size_t nbytes)
841a5ed7cb1SJason A. Donenfeld {
842a5ed7cb1SJason A. Donenfeld 	unsigned long flags;
843a5ed7cb1SJason A. Donenfeld 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
844a5ed7cb1SJason A. Donenfeld 	struct {
845a5ed7cb1SJason A. Donenfeld 		unsigned long rdseed[32 / sizeof(long)];
846a5ed7cb1SJason A. Donenfeld 		size_t counter;
847a5ed7cb1SJason A. Donenfeld 	} block;
848a5ed7cb1SJason A. Donenfeld 	size_t i;
849a5ed7cb1SJason A. Donenfeld 
850a5ed7cb1SJason A. Donenfeld 	for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
851a5ed7cb1SJason A. Donenfeld 		if (!arch_get_random_seed_long(&block.rdseed[i]) &&
852a5ed7cb1SJason A. Donenfeld 		    !arch_get_random_long(&block.rdseed[i]))
853a5ed7cb1SJason A. Donenfeld 			block.rdseed[i] = random_get_entropy();
854a5ed7cb1SJason A. Donenfeld 	}
855a5ed7cb1SJason A. Donenfeld 
856a5ed7cb1SJason A. Donenfeld 	spin_lock_irqsave(&input_pool.lock, flags);
857a5ed7cb1SJason A. Donenfeld 
858a5ed7cb1SJason A. Donenfeld 	/* seed = HASHPRF(last_key, entropy_input) */
859a5ed7cb1SJason A. Donenfeld 	blake2s_final(&input_pool.hash, seed);
860a5ed7cb1SJason A. Donenfeld 
861a5ed7cb1SJason A. Donenfeld 	/* next_key = HASHPRF(seed, RDSEED || 0) */
862a5ed7cb1SJason A. Donenfeld 	block.counter = 0;
863a5ed7cb1SJason A. Donenfeld 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
864a5ed7cb1SJason A. Donenfeld 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
865a5ed7cb1SJason A. Donenfeld 
866a5ed7cb1SJason A. Donenfeld 	spin_unlock_irqrestore(&input_pool.lock, flags);
867a5ed7cb1SJason A. Donenfeld 	memzero_explicit(next_key, sizeof(next_key));
868a5ed7cb1SJason A. Donenfeld 
869a5ed7cb1SJason A. Donenfeld 	while (nbytes) {
870a5ed7cb1SJason A. Donenfeld 		i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
871a5ed7cb1SJason A. Donenfeld 		/* output = HASHPRF(seed, RDSEED || ++counter) */
872a5ed7cb1SJason A. Donenfeld 		++block.counter;
873a5ed7cb1SJason A. Donenfeld 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
874a5ed7cb1SJason A. Donenfeld 		nbytes -= i;
875a5ed7cb1SJason A. Donenfeld 		buf += i;
876a5ed7cb1SJason A. Donenfeld 	}
877a5ed7cb1SJason A. Donenfeld 
878a5ed7cb1SJason A. Donenfeld 	memzero_explicit(seed, sizeof(seed));
879a5ed7cb1SJason A. Donenfeld 	memzero_explicit(&block, sizeof(block));
880a5ed7cb1SJason A. Donenfeld }
881a5ed7cb1SJason A. Donenfeld 
882a5ed7cb1SJason A. Donenfeld /*
883a5ed7cb1SJason A. Donenfeld  * First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we
884a5ed7cb1SJason A. Donenfeld  * set the entropy count to zero (but don't actually touch any data). Only then
885a5ed7cb1SJason A. Donenfeld  * can we extract a new key with extract_entropy().
886a5ed7cb1SJason A. Donenfeld  */
887a5ed7cb1SJason A. Donenfeld static bool drain_entropy(void *buf, size_t nbytes)
888a5ed7cb1SJason A. Donenfeld {
889a5ed7cb1SJason A. Donenfeld 	unsigned int entropy_count;
890a5ed7cb1SJason A. Donenfeld 	do {
891a5ed7cb1SJason A. Donenfeld 		entropy_count = READ_ONCE(input_pool.entropy_count);
892a5ed7cb1SJason A. Donenfeld 		if (entropy_count < POOL_MIN_BITS)
893a5ed7cb1SJason A. Donenfeld 			return false;
894a5ed7cb1SJason A. Donenfeld 	} while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
895a5ed7cb1SJason A. Donenfeld 	extract_entropy(buf, nbytes);
896a5ed7cb1SJason A. Donenfeld 	wake_up_interruptible(&random_write_wait);
897a5ed7cb1SJason A. Donenfeld 	kill_fasync(&fasync, SIGIO, POLL_OUT);
898a5ed7cb1SJason A. Donenfeld 	return true;
899a5ed7cb1SJason A. Donenfeld }
900a5ed7cb1SJason A. Donenfeld 
90192c653cfSJason A. Donenfeld 
90292c653cfSJason A. Donenfeld /**********************************************************************
90392c653cfSJason A. Donenfeld  *
90492c653cfSJason A. Donenfeld  * Entropy collection routines.
90592c653cfSJason A. Donenfeld  *
90692c653cfSJason A. Donenfeld  * The following exported functions are used for pushing entropy into
90792c653cfSJason A. Donenfeld  * the above entropy accumulation routines:
90892c653cfSJason A. Donenfeld  *
90992c653cfSJason A. Donenfeld  *	void add_device_randomness(const void *buf, size_t size);
91092c653cfSJason A. Donenfeld  *	void add_input_randomness(unsigned int type, unsigned int code,
91192c653cfSJason A. Donenfeld  *	                          unsigned int value);
91292c653cfSJason A. Donenfeld  *	void add_disk_randomness(struct gendisk *disk);
91392c653cfSJason A. Donenfeld  *	void add_hwgenerator_randomness(const void *buffer, size_t count,
91492c653cfSJason A. Donenfeld  *					size_t entropy);
91592c653cfSJason A. Donenfeld  *	void add_bootloader_randomness(const void *buf, size_t size);
91692c653cfSJason A. Donenfeld  *	void add_interrupt_randomness(int irq);
91792c653cfSJason A. Donenfeld  *
91892c653cfSJason A. Donenfeld  * add_device_randomness() adds data to the input pool that
91992c653cfSJason A. Donenfeld  * is likely to differ between two devices (or possibly even per boot).
92092c653cfSJason A. Donenfeld  * This would be things like MAC addresses or serial numbers, or the
92192c653cfSJason A. Donenfeld  * read-out of the RTC. This does *not* credit any actual entropy to
92292c653cfSJason A. Donenfeld  * the pool, but it initializes the pool to different values for devices
92392c653cfSJason A. Donenfeld  * that might otherwise be identical and have very little entropy
92492c653cfSJason A. Donenfeld  * available to them (particularly common in the embedded world).
92592c653cfSJason A. Donenfeld  *
92692c653cfSJason A. Donenfeld  * add_input_randomness() uses the input layer interrupt timing, as well
92792c653cfSJason A. Donenfeld  * as the event type information from the hardware.
92892c653cfSJason A. Donenfeld  *
92992c653cfSJason A. Donenfeld  * add_disk_randomness() uses what amounts to the seek time of block
93092c653cfSJason A. Donenfeld  * layer request events, on a per-disk_devt basis, as input to the
93192c653cfSJason A. Donenfeld  * entropy pool. Note that high-speed solid state drives with very low
93292c653cfSJason A. Donenfeld  * seek times do not make for good sources of entropy, as their seek
93392c653cfSJason A. Donenfeld  * times are usually fairly consistent.
93492c653cfSJason A. Donenfeld  *
93592c653cfSJason A. Donenfeld  * The above two routines try to estimate how many bits of entropy
93692c653cfSJason A. Donenfeld  * to credit. They do this by keeping track of the first and second
93792c653cfSJason A. Donenfeld  * order deltas of the event timings.
93892c653cfSJason A. Donenfeld  *
93992c653cfSJason A. Donenfeld  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
94092c653cfSJason A. Donenfeld  * entropy as specified by the caller. If the entropy pool is full it will
94192c653cfSJason A. Donenfeld  * block until more entropy is needed.
94292c653cfSJason A. Donenfeld  *
94392c653cfSJason A. Donenfeld  * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
94492c653cfSJason A. Donenfeld  * add_device_randomness(), depending on whether or not the configuration
94592c653cfSJason A. Donenfeld  * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
94692c653cfSJason A. Donenfeld  *
94792c653cfSJason A. Donenfeld  * add_interrupt_randomness() uses the interrupt timing as random
94892c653cfSJason A. Donenfeld  * inputs to the entropy pool. Using the cycle counters and the irq source
94992c653cfSJason A. Donenfeld  * as inputs, it feeds the input pool roughly once a second or after 64
95092c653cfSJason A. Donenfeld  * interrupts, crediting 1 bit of entropy for whichever comes first.
95192c653cfSJason A. Donenfeld  *
95292c653cfSJason A. Donenfeld  **********************************************************************/
95392c653cfSJason A. Donenfeld 
95492c653cfSJason A. Donenfeld static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
95592c653cfSJason A. Donenfeld static int __init parse_trust_cpu(char *arg)
95692c653cfSJason A. Donenfeld {
95792c653cfSJason A. Donenfeld 	return kstrtobool(arg, &trust_cpu);
95892c653cfSJason A. Donenfeld }
95992c653cfSJason A. Donenfeld early_param("random.trust_cpu", parse_trust_cpu);
960775f4b29STheodore Ts'o 
961775f4b29STheodore Ts'o /*
96292c653cfSJason A. Donenfeld  * The first collection of entropy occurs at system boot while interrupts
96392c653cfSJason A. Donenfeld  * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
96492c653cfSJason A. Donenfeld  * Depending on the above configuration knob, RDSEED may be considered
96592c653cfSJason A. Donenfeld  * sufficient for initialization. Note that much earlier setup may already
96692c653cfSJason A. Donenfeld  * have pushed entropy into the input pool by the time we get here.
967775f4b29STheodore Ts'o  */
96892c653cfSJason A. Donenfeld int __init rand_initialize(void)
969775f4b29STheodore Ts'o {
97092c653cfSJason A. Donenfeld 	size_t i;
97192c653cfSJason A. Donenfeld 	ktime_t now = ktime_get_real();
97292c653cfSJason A. Donenfeld 	bool arch_init = true;
97392c653cfSJason A. Donenfeld 	unsigned long rv;
974775f4b29STheodore Ts'o 
97592c653cfSJason A. Donenfeld 	for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
97692c653cfSJason A. Donenfeld 		if (!arch_get_random_seed_long_early(&rv) &&
97792c653cfSJason A. Donenfeld 		    !arch_get_random_long_early(&rv)) {
97892c653cfSJason A. Donenfeld 			rv = random_get_entropy();
97992c653cfSJason A. Donenfeld 			arch_init = false;
98092c653cfSJason A. Donenfeld 		}
981*afba0b80SJason A. Donenfeld 		_mix_pool_bytes(&rv, sizeof(rv));
98292c653cfSJason A. Donenfeld 	}
983*afba0b80SJason A. Donenfeld 	_mix_pool_bytes(&now, sizeof(now));
984*afba0b80SJason A. Donenfeld 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));
985655b2264STheodore Ts'o 
98692c653cfSJason A. Donenfeld 	extract_entropy(base_crng.key, sizeof(base_crng.key));
98792c653cfSJason A. Donenfeld 	++base_crng.generation;
98843759d4fSTheodore Ts'o 
98992c653cfSJason A. Donenfeld 	if (arch_init && trust_cpu && crng_init < 2) {
99092c653cfSJason A. Donenfeld 		crng_init = 2;
99192c653cfSJason A. Donenfeld 		pr_notice("crng init done (trusting CPU's manufacturer)\n");
992775f4b29STheodore Ts'o 	}
993775f4b29STheodore Ts'o 
99492c653cfSJason A. Donenfeld 	if (ratelimit_disable) {
99592c653cfSJason A. Donenfeld 		urandom_warning.interval = 0;
99692c653cfSJason A. Donenfeld 		unseeded_warning.interval = 0;
99792c653cfSJason A. Donenfeld 	}
99892c653cfSJason A. Donenfeld 	return 0;
99992c653cfSJason A. Donenfeld }
10001da177e4SLinus Torvalds 
10011da177e4SLinus Torvalds /* There is one of these per entropy source */
10021da177e4SLinus Torvalds struct timer_rand_state {
10031da177e4SLinus Torvalds 	cycles_t last_time;
10041da177e4SLinus Torvalds 	long last_delta, last_delta2;
10051da177e4SLinus Torvalds };
10061da177e4SLinus Torvalds 
1007a2080a67SLinus Torvalds /*
1008e192be9dSTheodore Ts'o  * Add device- or boot-specific data to the input pool to help
1009e192be9dSTheodore Ts'o  * initialize it.
1010a2080a67SLinus Torvalds  *
1011e192be9dSTheodore Ts'o  * None of this adds any entropy; it is meant to avoid the problem of
1012e192be9dSTheodore Ts'o  * the entropy pool having similar initial state across largely
1013e192be9dSTheodore Ts'o  * identical devices.
1014a2080a67SLinus Torvalds  */
101504ec96b7SJason A. Donenfeld void add_device_randomness(const void *buf, size_t size)
1016a2080a67SLinus Torvalds {
101761875f30STheodore Ts'o 	unsigned long time = random_get_entropy() ^ jiffies;
10183ef4cb2dSTheodore Ts'o 	unsigned long flags;
1019a2080a67SLinus Torvalds 
1020dc12baacSTheodore Ts'o 	if (!crng_ready() && size)
1021dc12baacSTheodore Ts'o 		crng_slow_load(buf, size);
1022ee7998c5SKees Cook 
10233ef4cb2dSTheodore Ts'o 	spin_lock_irqsave(&input_pool.lock, flags);
102490ed1e67SJason A. Donenfeld 	_mix_pool_bytes(buf, size);
102590ed1e67SJason A. Donenfeld 	_mix_pool_bytes(&time, sizeof(time));
10263ef4cb2dSTheodore Ts'o 	spin_unlock_irqrestore(&input_pool.lock, flags);
1027a2080a67SLinus Torvalds }
1028a2080a67SLinus Torvalds EXPORT_SYMBOL(add_device_randomness);
1029a2080a67SLinus Torvalds 
10301da177e4SLinus Torvalds /*
10311da177e4SLinus Torvalds  * This function adds entropy to the entropy "pool" by using timing
10321da177e4SLinus Torvalds  * delays.  It uses the timer_rand_state structure to make an estimate
10331da177e4SLinus Torvalds  * of how many bits of entropy this call has added to the pool.
10341da177e4SLinus Torvalds  *
10351da177e4SLinus Torvalds  * The number "num" is also added to the pool - it should somehow describe
10361da177e4SLinus Torvalds  * the type of event which just happened.  This is currently 0-255 for
10371da177e4SLinus Torvalds  * keyboard scan codes, and 256 upwards for interrupts.
10381da177e4SLinus Torvalds  *
10391da177e4SLinus Torvalds  */
104004ec96b7SJason A. Donenfeld static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
10411da177e4SLinus Torvalds {
10421da177e4SLinus Torvalds 	struct {
10431da177e4SLinus Torvalds 		long jiffies;
1044d38bb085SJason A. Donenfeld 		unsigned int cycles;
1045d38bb085SJason A. Donenfeld 		unsigned int num;
10461da177e4SLinus Torvalds 	} sample;
10471da177e4SLinus Torvalds 	long delta, delta2, delta3;
10481da177e4SLinus Torvalds 
10491da177e4SLinus Torvalds 	sample.jiffies = jiffies;
105061875f30STheodore Ts'o 	sample.cycles = random_get_entropy();
10511da177e4SLinus Torvalds 	sample.num = num;
105290ed1e67SJason A. Donenfeld 	mix_pool_bytes(&sample, sizeof(sample));
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds 	/*
10551da177e4SLinus Torvalds 	 * Calculate number of bits of randomness we probably added.
10561da177e4SLinus Torvalds 	 * We take into account the first, second and third-order deltas
10571da177e4SLinus Torvalds 	 * in order to make our estimate.
10581da177e4SLinus Torvalds 	 */
1059e00d996aSQian Cai 	delta = sample.jiffies - READ_ONCE(state->last_time);
1060e00d996aSQian Cai 	WRITE_ONCE(state->last_time, sample.jiffies);
10611da177e4SLinus Torvalds 
1062e00d996aSQian Cai 	delta2 = delta - READ_ONCE(state->last_delta);
1063e00d996aSQian Cai 	WRITE_ONCE(state->last_delta, delta);
10641da177e4SLinus Torvalds 
1065e00d996aSQian Cai 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1066e00d996aSQian Cai 	WRITE_ONCE(state->last_delta2, delta2);
10671da177e4SLinus Torvalds 
10681da177e4SLinus Torvalds 	if (delta < 0)
10691da177e4SLinus Torvalds 		delta = -delta;
10701da177e4SLinus Torvalds 	if (delta2 < 0)
10711da177e4SLinus Torvalds 		delta2 = -delta2;
10721da177e4SLinus Torvalds 	if (delta3 < 0)
10731da177e4SLinus Torvalds 		delta3 = -delta3;
10741da177e4SLinus Torvalds 	if (delta > delta2)
10751da177e4SLinus Torvalds 		delta = delta2;
10761da177e4SLinus Torvalds 	if (delta > delta3)
10771da177e4SLinus Torvalds 		delta = delta3;
10781da177e4SLinus Torvalds 
10791da177e4SLinus Torvalds 	/*
10801da177e4SLinus Torvalds 	 * delta is now minimum absolute delta.
10811da177e4SLinus Torvalds 	 * Round down by 1 bit on general principles,
1082727d499aSYangtao Li 	 * and limit entropy estimate to 12 bits.
10831da177e4SLinus Torvalds 	 */
108404ec96b7SJason A. Donenfeld 	credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
10851da177e4SLinus Torvalds }
10861da177e4SLinus Torvalds 
1087d251575aSStephen Hemminger void add_input_randomness(unsigned int type, unsigned int code,
10881da177e4SLinus Torvalds 			  unsigned int value)
10891da177e4SLinus Torvalds {
10901da177e4SLinus Torvalds 	static unsigned char last_value;
109192c653cfSJason A. Donenfeld 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
10921da177e4SLinus Torvalds 
109392c653cfSJason A. Donenfeld 	/* Ignore autorepeat and the like. */
10941da177e4SLinus Torvalds 	if (value == last_value)
10951da177e4SLinus Torvalds 		return;
10961da177e4SLinus Torvalds 
10971da177e4SLinus Torvalds 	last_value = value;
10981da177e4SLinus Torvalds 	add_timer_randomness(&input_timer_state,
10991da177e4SLinus Torvalds 			     (type << 4) ^ code ^ (code >> 4) ^ value);
11001da177e4SLinus Torvalds }
110180fc9f53SDmitry Torokhov EXPORT_SYMBOL_GPL(add_input_randomness);
11021da177e4SLinus Torvalds 
110392c653cfSJason A. Donenfeld #ifdef CONFIG_BLOCK
110492c653cfSJason A. Donenfeld void add_disk_randomness(struct gendisk *disk)
110592c653cfSJason A. Donenfeld {
110692c653cfSJason A. Donenfeld 	if (!disk || !disk->random)
110792c653cfSJason A. Donenfeld 		return;
110892c653cfSJason A. Donenfeld 	/* First major is 1, so we get >= 0x200 here. */
110992c653cfSJason A. Donenfeld 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
111092c653cfSJason A. Donenfeld }
111192c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_disk_randomness);
111292c653cfSJason A. Donenfeld 
111392c653cfSJason A. Donenfeld void rand_initialize_disk(struct gendisk *disk)
111492c653cfSJason A. Donenfeld {
111592c653cfSJason A. Donenfeld 	struct timer_rand_state *state;
111692c653cfSJason A. Donenfeld 
111792c653cfSJason A. Donenfeld 	/*
111892c653cfSJason A. Donenfeld 	 * If kzalloc returns null, we just won't use that entropy
111992c653cfSJason A. Donenfeld 	 * source.
112092c653cfSJason A. Donenfeld 	 */
112192c653cfSJason A. Donenfeld 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
112292c653cfSJason A. Donenfeld 	if (state) {
112392c653cfSJason A. Donenfeld 		state->last_time = INITIAL_JIFFIES;
112492c653cfSJason A. Donenfeld 		disk->random = state;
112592c653cfSJason A. Donenfeld 	}
112692c653cfSJason A. Donenfeld }
112792c653cfSJason A. Donenfeld #endif
112892c653cfSJason A. Donenfeld 
112992c653cfSJason A. Donenfeld /*
113092c653cfSJason A. Donenfeld  * Interface for in-kernel drivers of true hardware RNGs.
113192c653cfSJason A. Donenfeld  * Those devices may produce endless random bits and will be throttled
113292c653cfSJason A. Donenfeld  * when our pool is full.
113392c653cfSJason A. Donenfeld  */
113492c653cfSJason A. Donenfeld void add_hwgenerator_randomness(const void *buffer, size_t count,
113592c653cfSJason A. Donenfeld 				size_t entropy)
113692c653cfSJason A. Donenfeld {
113792c653cfSJason A. Donenfeld 	if (unlikely(crng_init == 0)) {
113892c653cfSJason A. Donenfeld 		size_t ret = crng_fast_load(buffer, count);
113992c653cfSJason A. Donenfeld 		mix_pool_bytes(buffer, ret);
114092c653cfSJason A. Donenfeld 		count -= ret;
114192c653cfSJason A. Donenfeld 		buffer += ret;
114292c653cfSJason A. Donenfeld 		if (!count || crng_init == 0)
114392c653cfSJason A. Donenfeld 			return;
114492c653cfSJason A. Donenfeld 	}
114592c653cfSJason A. Donenfeld 
114692c653cfSJason A. Donenfeld 	/*
114792c653cfSJason A. Donenfeld 	 * Throttle writing if we're above the trickle threshold.
114892c653cfSJason A. Donenfeld 	 * We'll be woken up again once below POOL_MIN_BITS, when
114992c653cfSJason A. Donenfeld 	 * the calling thread is about to terminate, or once
115092c653cfSJason A. Donenfeld 	 * CRNG_RESEED_INTERVAL has elapsed.
115192c653cfSJason A. Donenfeld 	 */
115292c653cfSJason A. Donenfeld 	wait_event_interruptible_timeout(random_write_wait,
115392c653cfSJason A. Donenfeld 			!system_wq || kthread_should_stop() ||
115492c653cfSJason A. Donenfeld 			input_pool.entropy_count < POOL_MIN_BITS,
115592c653cfSJason A. Donenfeld 			CRNG_RESEED_INTERVAL);
115692c653cfSJason A. Donenfeld 	mix_pool_bytes(buffer, count);
115792c653cfSJason A. Donenfeld 	credit_entropy_bits(entropy);
115892c653cfSJason A. Donenfeld }
115992c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
116092c653cfSJason A. Donenfeld 
116192c653cfSJason A. Donenfeld /*
116292c653cfSJason A. Donenfeld  * Handle random seed passed by bootloader.
116392c653cfSJason A. Donenfeld  * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
116492c653cfSJason A. Donenfeld  * it would be regarded as device data.
116592c653cfSJason A. Donenfeld  * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
116692c653cfSJason A. Donenfeld  */
116792c653cfSJason A. Donenfeld void add_bootloader_randomness(const void *buf, size_t size)
116892c653cfSJason A. Donenfeld {
116992c653cfSJason A. Donenfeld 	if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
117092c653cfSJason A. Donenfeld 		add_hwgenerator_randomness(buf, size, size * 8);
117192c653cfSJason A. Donenfeld 	else
117292c653cfSJason A. Donenfeld 		add_device_randomness(buf, size);
117392c653cfSJason A. Donenfeld }
117492c653cfSJason A. Donenfeld EXPORT_SYMBOL_GPL(add_bootloader_randomness);
117592c653cfSJason A. Donenfeld 
117692c653cfSJason A. Donenfeld struct fast_pool {
117792c653cfSJason A. Donenfeld 	union {
117892c653cfSJason A. Donenfeld 		u32 pool32[4];
117992c653cfSJason A. Donenfeld 		u64 pool64[2];
118092c653cfSJason A. Donenfeld 	};
118158340f8eSJason A. Donenfeld 	struct work_struct mix;
118292c653cfSJason A. Donenfeld 	unsigned long last;
118358340f8eSJason A. Donenfeld 	atomic_t count;
118492c653cfSJason A. Donenfeld 	u16 reg_idx;
118592c653cfSJason A. Donenfeld };
118692c653cfSJason A. Donenfeld 
118792c653cfSJason A. Donenfeld /*
118892c653cfSJason A. Donenfeld  * This is a fast mixing routine used by the interrupt randomness
118992c653cfSJason A. Donenfeld  * collector. It's hardcoded for an 128 bit pool and assumes that any
119092c653cfSJason A. Donenfeld  * locks that might be needed are taken by the caller.
119192c653cfSJason A. Donenfeld  */
119292c653cfSJason A. Donenfeld static void fast_mix(u32 pool[4])
119392c653cfSJason A. Donenfeld {
119492c653cfSJason A. Donenfeld 	u32 a = pool[0],	b = pool[1];
119592c653cfSJason A. Donenfeld 	u32 c = pool[2],	d = pool[3];
119692c653cfSJason A. Donenfeld 
119792c653cfSJason A. Donenfeld 	a += b;			c += d;
119892c653cfSJason A. Donenfeld 	b = rol32(b, 6);	d = rol32(d, 27);
119992c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
120092c653cfSJason A. Donenfeld 
120192c653cfSJason A. Donenfeld 	a += b;			c += d;
120292c653cfSJason A. Donenfeld 	b = rol32(b, 16);	d = rol32(d, 14);
120392c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
120492c653cfSJason A. Donenfeld 
120592c653cfSJason A. Donenfeld 	a += b;			c += d;
120692c653cfSJason A. Donenfeld 	b = rol32(b, 6);	d = rol32(d, 27);
120792c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
120892c653cfSJason A. Donenfeld 
120992c653cfSJason A. Donenfeld 	a += b;			c += d;
121092c653cfSJason A. Donenfeld 	b = rol32(b, 16);	d = rol32(d, 14);
121192c653cfSJason A. Donenfeld 	d ^= a;			b ^= c;
121292c653cfSJason A. Donenfeld 
121392c653cfSJason A. Donenfeld 	pool[0] = a;  pool[1] = b;
121492c653cfSJason A. Donenfeld 	pool[2] = c;  pool[3] = d;
121592c653cfSJason A. Donenfeld }
121692c653cfSJason A. Donenfeld 
1217775f4b29STheodore Ts'o static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1218775f4b29STheodore Ts'o 
1219d38bb085SJason A. Donenfeld static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1220ee3e00e9STheodore Ts'o {
1221d38bb085SJason A. Donenfeld 	u32 *ptr = (u32 *)regs;
122292e75428STheodore Ts'o 	unsigned int idx;
1223ee3e00e9STheodore Ts'o 
1224ee3e00e9STheodore Ts'o 	if (regs == NULL)
1225ee3e00e9STheodore Ts'o 		return 0;
122692e75428STheodore Ts'o 	idx = READ_ONCE(f->reg_idx);
1227d38bb085SJason A. Donenfeld 	if (idx >= sizeof(struct pt_regs) / sizeof(u32))
122892e75428STheodore Ts'o 		idx = 0;
122992e75428STheodore Ts'o 	ptr += idx++;
123092e75428STheodore Ts'o 	WRITE_ONCE(f->reg_idx, idx);
12319dfa7bbaSMichael Schmitz 	return *ptr;
1232ee3e00e9STheodore Ts'o }
1233ee3e00e9STheodore Ts'o 
123458340f8eSJason A. Donenfeld static void mix_interrupt_randomness(struct work_struct *work)
123558340f8eSJason A. Donenfeld {
123658340f8eSJason A. Donenfeld 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
123758340f8eSJason A. Donenfeld 	u32 pool[4];
123858340f8eSJason A. Donenfeld 
123958340f8eSJason A. Donenfeld 	/* Check to see if we're running on the wrong CPU due to hotplug. */
124058340f8eSJason A. Donenfeld 	local_irq_disable();
124158340f8eSJason A. Donenfeld 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
124258340f8eSJason A. Donenfeld 		local_irq_enable();
124358340f8eSJason A. Donenfeld 		/*
124458340f8eSJason A. Donenfeld 		 * If we are unlucky enough to have been moved to another CPU,
124558340f8eSJason A. Donenfeld 		 * during CPU hotplug while the CPU was shutdown then we set
124658340f8eSJason A. Donenfeld 		 * our count to zero atomically so that when the CPU comes
124758340f8eSJason A. Donenfeld 		 * back online, it can enqueue work again. The _release here
124858340f8eSJason A. Donenfeld 		 * pairs with the atomic_inc_return_acquire in
124958340f8eSJason A. Donenfeld 		 * add_interrupt_randomness().
125058340f8eSJason A. Donenfeld 		 */
125158340f8eSJason A. Donenfeld 		atomic_set_release(&fast_pool->count, 0);
125258340f8eSJason A. Donenfeld 		return;
125358340f8eSJason A. Donenfeld 	}
125458340f8eSJason A. Donenfeld 
125558340f8eSJason A. Donenfeld 	/*
125658340f8eSJason A. Donenfeld 	 * Copy the pool to the stack so that the mixer always has a
125758340f8eSJason A. Donenfeld 	 * consistent view, before we reenable irqs again.
125858340f8eSJason A. Donenfeld 	 */
125958340f8eSJason A. Donenfeld 	memcpy(pool, fast_pool->pool32, sizeof(pool));
126058340f8eSJason A. Donenfeld 	atomic_set(&fast_pool->count, 0);
126158340f8eSJason A. Donenfeld 	fast_pool->last = jiffies;
126258340f8eSJason A. Donenfeld 	local_irq_enable();
126358340f8eSJason A. Donenfeld 
126458340f8eSJason A. Donenfeld 	mix_pool_bytes(pool, sizeof(pool));
126558340f8eSJason A. Donenfeld 	credit_entropy_bits(1);
126658340f8eSJason A. Donenfeld 	memzero_explicit(pool, sizeof(pool));
126758340f8eSJason A. Donenfeld }
126858340f8eSJason A. Donenfeld 
1269703f7066SSebastian Andrzej Siewior void add_interrupt_randomness(int irq)
12701da177e4SLinus Torvalds {
127158340f8eSJason A. Donenfeld 	enum { MIX_INFLIGHT = 1U << 31 };
12721b2a1a7eSChristoph Lameter 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1273775f4b29STheodore Ts'o 	struct pt_regs *regs = get_irq_regs();
1274775f4b29STheodore Ts'o 	unsigned long now = jiffies;
1275655b2264STheodore Ts'o 	cycles_t cycles = random_get_entropy();
127658340f8eSJason A. Donenfeld 	unsigned int new_count;
12773060d6feSYinghai Lu 
1278ee3e00e9STheodore Ts'o 	if (cycles == 0)
1279ee3e00e9STheodore Ts'o 		cycles = get_reg(fast_pool, regs);
12803060d6feSYinghai Lu 
1281b2f408feSJason A. Donenfeld 	if (sizeof(cycles) == 8)
1282b2f408feSJason A. Donenfeld 		fast_pool->pool64[0] ^= cycles ^ rol64(now, 32) ^ irq;
1283b2f408feSJason A. Donenfeld 	else {
1284b2f408feSJason A. Donenfeld 		fast_pool->pool32[0] ^= cycles ^ irq;
1285b2f408feSJason A. Donenfeld 		fast_pool->pool32[1] ^= now;
1286b2f408feSJason A. Donenfeld 	}
1287b2f408feSJason A. Donenfeld 
1288b2f408feSJason A. Donenfeld 	if (sizeof(unsigned long) == 8)
1289b2f408feSJason A. Donenfeld 		fast_pool->pool64[1] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1290b2f408feSJason A. Donenfeld 	else {
1291b2f408feSJason A. Donenfeld 		fast_pool->pool32[2] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1292b2f408feSJason A. Donenfeld 		fast_pool->pool32[3] ^= get_reg(fast_pool, regs);
1293b2f408feSJason A. Donenfeld 	}
1294b2f408feSJason A. Donenfeld 
1295b2f408feSJason A. Donenfeld 	fast_mix(fast_pool->pool32);
129658340f8eSJason A. Donenfeld 	/* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
129758340f8eSJason A. Donenfeld 	new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
1298775f4b29STheodore Ts'o 
129943838a23STheodore Ts'o 	if (unlikely(crng_init == 0)) {
130058340f8eSJason A. Donenfeld 		if (new_count >= 64 &&
1301b2f408feSJason A. Donenfeld 		    crng_fast_load(fast_pool->pool32, sizeof(fast_pool->pool32)) > 0) {
130258340f8eSJason A. Donenfeld 			atomic_set(&fast_pool->count, 0);
1303e192be9dSTheodore Ts'o 			fast_pool->last = now;
1304c30c575dSJason A. Donenfeld 			if (spin_trylock(&input_pool.lock)) {
1305b2f408feSJason A. Donenfeld 				_mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
1306c30c575dSJason A. Donenfeld 				spin_unlock(&input_pool.lock);
1307c30c575dSJason A. Donenfeld 			}
1308e192be9dSTheodore Ts'o 		}
1309e192be9dSTheodore Ts'o 		return;
1310e192be9dSTheodore Ts'o 	}
1311e192be9dSTheodore Ts'o 
131258340f8eSJason A. Donenfeld 	if (new_count & MIX_INFLIGHT)
13131da177e4SLinus Torvalds 		return;
1314840f9507STheodore Ts'o 
131558340f8eSJason A. Donenfeld 	if (new_count < 64 && !time_after(now, fast_pool->last + HZ))
13161da177e4SLinus Torvalds 		return;
13171da177e4SLinus Torvalds 
131858340f8eSJason A. Donenfeld 	if (unlikely(!fast_pool->mix.func))
131958340f8eSJason A. Donenfeld 		INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
132058340f8eSJason A. Donenfeld 	atomic_or(MIX_INFLIGHT, &fast_pool->count);
132158340f8eSJason A. Donenfeld 	queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
13221da177e4SLinus Torvalds }
13234b44f2d1SStephan Mueller EXPORT_SYMBOL_GPL(add_interrupt_randomness);
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds /*
132650ee7529SLinus Torvalds  * Each time the timer fires, we expect that we got an unpredictable
132750ee7529SLinus Torvalds  * jump in the cycle counter. Even if the timer is running on another
132850ee7529SLinus Torvalds  * CPU, the timer activity will be touching the stack of the CPU that is
132950ee7529SLinus Torvalds  * generating entropy..
133050ee7529SLinus Torvalds  *
133150ee7529SLinus Torvalds  * Note that we don't re-arm the timer in the timer itself - we are
133250ee7529SLinus Torvalds  * happy to be scheduled away, since that just makes the load more
133350ee7529SLinus Torvalds  * complex, but we do not want the timer to keep ticking unless the
133450ee7529SLinus Torvalds  * entropy loop is running.
133550ee7529SLinus Torvalds  *
133650ee7529SLinus Torvalds  * So the re-arming always happens in the entropy loop itself.
133750ee7529SLinus Torvalds  */
133850ee7529SLinus Torvalds static void entropy_timer(struct timer_list *t)
133950ee7529SLinus Torvalds {
134090ed1e67SJason A. Donenfeld 	credit_entropy_bits(1);
134150ee7529SLinus Torvalds }
134250ee7529SLinus Torvalds 
134350ee7529SLinus Torvalds /*
134450ee7529SLinus Torvalds  * If we have an actual cycle counter, see if we can
134550ee7529SLinus Torvalds  * generate enough entropy with timing noise
134650ee7529SLinus Torvalds  */
134750ee7529SLinus Torvalds static void try_to_generate_entropy(void)
134850ee7529SLinus Torvalds {
134950ee7529SLinus Torvalds 	struct {
135050ee7529SLinus Torvalds 		unsigned long now;
135150ee7529SLinus Torvalds 		struct timer_list timer;
135250ee7529SLinus Torvalds 	} stack;
135350ee7529SLinus Torvalds 
135450ee7529SLinus Torvalds 	stack.now = random_get_entropy();
135550ee7529SLinus Torvalds 
135650ee7529SLinus Torvalds 	/* Slow counter - or none. Don't even bother */
135750ee7529SLinus Torvalds 	if (stack.now == random_get_entropy())
135850ee7529SLinus Torvalds 		return;
135950ee7529SLinus Torvalds 
136050ee7529SLinus Torvalds 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
136150ee7529SLinus Torvalds 	while (!crng_ready()) {
136250ee7529SLinus Torvalds 		if (!timer_pending(&stack.timer))
136350ee7529SLinus Torvalds 			mod_timer(&stack.timer, jiffies + 1);
136490ed1e67SJason A. Donenfeld 		mix_pool_bytes(&stack.now, sizeof(stack.now));
136550ee7529SLinus Torvalds 		schedule();
136650ee7529SLinus Torvalds 		stack.now = random_get_entropy();
136750ee7529SLinus Torvalds 	}
136850ee7529SLinus Torvalds 
136950ee7529SLinus Torvalds 	del_timer_sync(&stack.timer);
137050ee7529SLinus Torvalds 	destroy_timer_on_stack(&stack.timer);
137190ed1e67SJason A. Donenfeld 	mix_pool_bytes(&stack.now, sizeof(stack.now));
137250ee7529SLinus Torvalds }
137350ee7529SLinus Torvalds 
1374a6adf8e7SJason A. Donenfeld 
1375a6adf8e7SJason A. Donenfeld /**********************************************************************
1376a6adf8e7SJason A. Donenfeld  *
1377a6adf8e7SJason A. Donenfeld  * Userspace reader/writer interfaces.
1378a6adf8e7SJason A. Donenfeld  *
1379a6adf8e7SJason A. Donenfeld  * getrandom(2) is the primary modern interface into the RNG and should
1380a6adf8e7SJason A. Donenfeld  * be used in preference to anything else.
1381a6adf8e7SJason A. Donenfeld  *
1382a6adf8e7SJason A. Donenfeld  * Reading from /dev/random has the same functionality as calling
1383a6adf8e7SJason A. Donenfeld  * getrandom(2) with flags=0. In earlier versions, however, it had
1384a6adf8e7SJason A. Donenfeld  * vastly different semantics and should therefore be avoided, to
1385a6adf8e7SJason A. Donenfeld  * prevent backwards compatibility issues.
1386a6adf8e7SJason A. Donenfeld  *
1387a6adf8e7SJason A. Donenfeld  * Reading from /dev/urandom has the same functionality as calling
1388a6adf8e7SJason A. Donenfeld  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1389a6adf8e7SJason A. Donenfeld  * waiting for the RNG to be ready, it should not be used.
1390a6adf8e7SJason A. Donenfeld  *
1391a6adf8e7SJason A. Donenfeld  * Writing to either /dev/random or /dev/urandom adds entropy to
1392a6adf8e7SJason A. Donenfeld  * the input pool but does not credit it.
1393a6adf8e7SJason A. Donenfeld  *
1394a6adf8e7SJason A. Donenfeld  * Polling on /dev/random indicates when the RNG is initialized, on
1395a6adf8e7SJason A. Donenfeld  * the read side, and when it wants new entropy, on the write side.
1396a6adf8e7SJason A. Donenfeld  *
1397a6adf8e7SJason A. Donenfeld  * Both /dev/random and /dev/urandom have the same set of ioctls for
1398a6adf8e7SJason A. Donenfeld  * adding entropy, getting the entropy count, zeroing the count, and
1399a6adf8e7SJason A. Donenfeld  * reseeding the crng.
1400a6adf8e7SJason A. Donenfeld  *
1401a6adf8e7SJason A. Donenfeld  **********************************************************************/
1402a6adf8e7SJason A. Donenfeld 
1403a6adf8e7SJason A. Donenfeld SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1404a6adf8e7SJason A. Donenfeld 		flags)
14051da177e4SLinus Torvalds {
1406a6adf8e7SJason A. Donenfeld 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1407a6adf8e7SJason A. Donenfeld 		return -EINVAL;
1408301f0595STheodore Ts'o 
1409a6adf8e7SJason A. Donenfeld 	/*
1410a6adf8e7SJason A. Donenfeld 	 * Requesting insecure and blocking randomness at the same time makes
1411a6adf8e7SJason A. Donenfeld 	 * no sense.
1412a6adf8e7SJason A. Donenfeld 	 */
1413a6adf8e7SJason A. Donenfeld 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1414a6adf8e7SJason A. Donenfeld 		return -EINVAL;
1415c6f1deb1SAndy Lutomirski 
1416a6adf8e7SJason A. Donenfeld 	if (count > INT_MAX)
1417a6adf8e7SJason A. Donenfeld 		count = INT_MAX;
14181da177e4SLinus Torvalds 
1419a6adf8e7SJason A. Donenfeld 	if (!(flags & GRND_INSECURE) && !crng_ready()) {
142030c08efeSAndy Lutomirski 		int ret;
142130c08efeSAndy Lutomirski 
1422a6adf8e7SJason A. Donenfeld 		if (flags & GRND_NONBLOCK)
1423a6adf8e7SJason A. Donenfeld 			return -EAGAIN;
142430c08efeSAndy Lutomirski 		ret = wait_for_random_bytes();
1425a6adf8e7SJason A. Donenfeld 		if (unlikely(ret))
142630c08efeSAndy Lutomirski 			return ret;
1427a6adf8e7SJason A. Donenfeld 	}
1428a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, count);
142930c08efeSAndy Lutomirski }
143030c08efeSAndy Lutomirski 
1431248045b8SJason A. Donenfeld static __poll_t random_poll(struct file *file, poll_table *wait)
143289b310a2SChristoph Hellwig {
1433a11e1d43SLinus Torvalds 	__poll_t mask;
143489b310a2SChristoph Hellwig 
143530c08efeSAndy Lutomirski 	poll_wait(file, &crng_init_wait, wait);
1436a11e1d43SLinus Torvalds 	poll_wait(file, &random_write_wait, wait);
1437a11e1d43SLinus Torvalds 	mask = 0;
143830c08efeSAndy Lutomirski 	if (crng_ready())
1439a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
1440489c7fc4SJason A. Donenfeld 	if (input_pool.entropy_count < POOL_MIN_BITS)
1441a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
14421da177e4SLinus Torvalds 	return mask;
14431da177e4SLinus Torvalds }
14441da177e4SLinus Torvalds 
144504ec96b7SJason A. Donenfeld static int write_pool(const char __user *ubuf, size_t count)
14467f397dcdSMatt Mackall {
144704ec96b7SJason A. Donenfeld 	size_t len;
14487b5164fbSJason A. Donenfeld 	int ret = 0;
144904ec96b7SJason A. Donenfeld 	u8 block[BLAKE2S_BLOCK_SIZE];
14507f397dcdSMatt Mackall 
145104ec96b7SJason A. Donenfeld 	while (count) {
145204ec96b7SJason A. Donenfeld 		len = min(count, sizeof(block));
14537b5164fbSJason A. Donenfeld 		if (copy_from_user(block, ubuf, len)) {
14547b5164fbSJason A. Donenfeld 			ret = -EFAULT;
14557b5164fbSJason A. Donenfeld 			goto out;
14567b5164fbSJason A. Donenfeld 		}
145704ec96b7SJason A. Donenfeld 		count -= len;
145804ec96b7SJason A. Donenfeld 		ubuf += len;
145904ec96b7SJason A. Donenfeld 		mix_pool_bytes(block, len);
146091f3f1e3SMatt Mackall 		cond_resched();
14617f397dcdSMatt Mackall 	}
14627f397dcdSMatt Mackall 
14637b5164fbSJason A. Donenfeld out:
14647b5164fbSJason A. Donenfeld 	memzero_explicit(block, sizeof(block));
14657b5164fbSJason A. Donenfeld 	return ret;
14667f397dcdSMatt Mackall }
14677f397dcdSMatt Mackall 
146890b75ee5SMatt Mackall static ssize_t random_write(struct file *file, const char __user *buffer,
14691da177e4SLinus Torvalds 			    size_t count, loff_t *ppos)
14701da177e4SLinus Torvalds {
147104ec96b7SJason A. Donenfeld 	int ret;
14727f397dcdSMatt Mackall 
147390ed1e67SJason A. Donenfeld 	ret = write_pool(buffer, count);
14747f397dcdSMatt Mackall 	if (ret)
14757f397dcdSMatt Mackall 		return ret;
14767f397dcdSMatt Mackall 
14777f397dcdSMatt Mackall 	return (ssize_t)count;
14781da177e4SLinus Torvalds }
14791da177e4SLinus Torvalds 
1480a6adf8e7SJason A. Donenfeld static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1481a6adf8e7SJason A. Donenfeld 			    loff_t *ppos)
1482a6adf8e7SJason A. Donenfeld {
1483a6adf8e7SJason A. Donenfeld 	static int maxwarn = 10;
1484a6adf8e7SJason A. Donenfeld 
1485a6adf8e7SJason A. Donenfeld 	if (!crng_ready() && maxwarn > 0) {
1486a6adf8e7SJason A. Donenfeld 		maxwarn--;
1487a6adf8e7SJason A. Donenfeld 		if (__ratelimit(&urandom_warning))
1488a6adf8e7SJason A. Donenfeld 			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1489a6adf8e7SJason A. Donenfeld 				  current->comm, nbytes);
1490a6adf8e7SJason A. Donenfeld 	}
1491a6adf8e7SJason A. Donenfeld 
1492a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, nbytes);
1493a6adf8e7SJason A. Donenfeld }
1494a6adf8e7SJason A. Donenfeld 
1495a6adf8e7SJason A. Donenfeld static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1496a6adf8e7SJason A. Donenfeld 			   loff_t *ppos)
1497a6adf8e7SJason A. Donenfeld {
1498a6adf8e7SJason A. Donenfeld 	int ret;
1499a6adf8e7SJason A. Donenfeld 
1500a6adf8e7SJason A. Donenfeld 	ret = wait_for_random_bytes();
1501a6adf8e7SJason A. Donenfeld 	if (ret != 0)
1502a6adf8e7SJason A. Donenfeld 		return ret;
1503a6adf8e7SJason A. Donenfeld 	return get_random_bytes_user(buf, nbytes);
1504a6adf8e7SJason A. Donenfeld }
1505a6adf8e7SJason A. Donenfeld 
150643ae4860SMatt Mackall static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
15071da177e4SLinus Torvalds {
15081da177e4SLinus Torvalds 	int size, ent_count;
15091da177e4SLinus Torvalds 	int __user *p = (int __user *)arg;
15101da177e4SLinus Torvalds 	int retval;
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds 	switch (cmd) {
15131da177e4SLinus Torvalds 	case RNDGETENTCNT:
1514a6adf8e7SJason A. Donenfeld 		/* Inherently racy, no point locking. */
1515c5704490SJason A. Donenfeld 		if (put_user(input_pool.entropy_count, p))
15161da177e4SLinus Torvalds 			return -EFAULT;
15171da177e4SLinus Torvalds 		return 0;
15181da177e4SLinus Torvalds 	case RNDADDTOENTCNT:
15191da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15201da177e4SLinus Torvalds 			return -EPERM;
15211da177e4SLinus Torvalds 		if (get_user(ent_count, p))
15221da177e4SLinus Torvalds 			return -EFAULT;
1523a49c010eSJason A. Donenfeld 		if (ent_count < 0)
1524a49c010eSJason A. Donenfeld 			return -EINVAL;
1525a49c010eSJason A. Donenfeld 		credit_entropy_bits(ent_count);
1526a49c010eSJason A. Donenfeld 		return 0;
15271da177e4SLinus Torvalds 	case RNDADDENTROPY:
15281da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15291da177e4SLinus Torvalds 			return -EPERM;
15301da177e4SLinus Torvalds 		if (get_user(ent_count, p++))
15311da177e4SLinus Torvalds 			return -EFAULT;
15321da177e4SLinus Torvalds 		if (ent_count < 0)
15331da177e4SLinus Torvalds 			return -EINVAL;
15341da177e4SLinus Torvalds 		if (get_user(size, p++))
15351da177e4SLinus Torvalds 			return -EFAULT;
153690ed1e67SJason A. Donenfeld 		retval = write_pool((const char __user *)p, size);
15371da177e4SLinus Torvalds 		if (retval < 0)
15381da177e4SLinus Torvalds 			return retval;
1539a49c010eSJason A. Donenfeld 		credit_entropy_bits(ent_count);
1540a49c010eSJason A. Donenfeld 		return 0;
15411da177e4SLinus Torvalds 	case RNDZAPENTCNT:
15421da177e4SLinus Torvalds 	case RNDCLEARPOOL:
1543ae9ecd92STheodore Ts'o 		/*
1544ae9ecd92STheodore Ts'o 		 * Clear the entropy pool counters. We no longer clear
1545ae9ecd92STheodore Ts'o 		 * the entropy pool, as that's silly.
1546ae9ecd92STheodore Ts'o 		 */
15471da177e4SLinus Torvalds 		if (!capable(CAP_SYS_ADMIN))
15481da177e4SLinus Torvalds 			return -EPERM;
1549489c7fc4SJason A. Donenfeld 		if (xchg(&input_pool.entropy_count, 0)) {
1550042e293eSJason A. Donenfeld 			wake_up_interruptible(&random_write_wait);
1551042e293eSJason A. Donenfeld 			kill_fasync(&fasync, SIGIO, POLL_OUT);
1552042e293eSJason A. Donenfeld 		}
15531da177e4SLinus Torvalds 		return 0;
1554d848e5f8STheodore Ts'o 	case RNDRESEEDCRNG:
1555d848e5f8STheodore Ts'o 		if (!capable(CAP_SYS_ADMIN))
1556d848e5f8STheodore Ts'o 			return -EPERM;
1557d848e5f8STheodore Ts'o 		if (crng_init < 2)
1558d848e5f8STheodore Ts'o 			return -ENODATA;
1559a9412d51SJason A. Donenfeld 		crng_reseed();
1560d848e5f8STheodore Ts'o 		return 0;
15611da177e4SLinus Torvalds 	default:
15621da177e4SLinus Torvalds 		return -EINVAL;
15631da177e4SLinus Torvalds 	}
15641da177e4SLinus Torvalds }
15651da177e4SLinus Torvalds 
15669a6f70bbSJeff Dike static int random_fasync(int fd, struct file *filp, int on)
15679a6f70bbSJeff Dike {
15689a6f70bbSJeff Dike 	return fasync_helper(fd, filp, on, &fasync);
15699a6f70bbSJeff Dike }
15709a6f70bbSJeff Dike 
15712b8693c0SArjan van de Ven const struct file_operations random_fops = {
15721da177e4SLinus Torvalds 	.read = random_read,
15731da177e4SLinus Torvalds 	.write = random_write,
1574a11e1d43SLinus Torvalds 	.poll = random_poll,
157543ae4860SMatt Mackall 	.unlocked_ioctl = random_ioctl,
1576507e4e2bSArnd Bergmann 	.compat_ioctl = compat_ptr_ioctl,
15779a6f70bbSJeff Dike 	.fasync = random_fasync,
15786038f373SArnd Bergmann 	.llseek = noop_llseek,
15791da177e4SLinus Torvalds };
15801da177e4SLinus Torvalds 
15812b8693c0SArjan van de Ven const struct file_operations urandom_fops = {
15821da177e4SLinus Torvalds 	.read = urandom_read,
15831da177e4SLinus Torvalds 	.write = random_write,
158443ae4860SMatt Mackall 	.unlocked_ioctl = random_ioctl,
15854aa37c46SJason A. Donenfeld 	.compat_ioctl = compat_ptr_ioctl,
15869a6f70bbSJeff Dike 	.fasync = random_fasync,
15876038f373SArnd Bergmann 	.llseek = noop_llseek,
15881da177e4SLinus Torvalds };
15891da177e4SLinus Torvalds 
15900deff3c4SJason A. Donenfeld 
15911da177e4SLinus Torvalds /********************************************************************
15921da177e4SLinus Torvalds  *
15930deff3c4SJason A. Donenfeld  * Sysctl interface.
15940deff3c4SJason A. Donenfeld  *
15950deff3c4SJason A. Donenfeld  * These are partly unused legacy knobs with dummy values to not break
15960deff3c4SJason A. Donenfeld  * userspace and partly still useful things. They are usually accessible
15970deff3c4SJason A. Donenfeld  * in /proc/sys/kernel/random/ and are as follows:
15980deff3c4SJason A. Donenfeld  *
15990deff3c4SJason A. Donenfeld  * - boot_id - a UUID representing the current boot.
16000deff3c4SJason A. Donenfeld  *
16010deff3c4SJason A. Donenfeld  * - uuid - a random UUID, different each time the file is read.
16020deff3c4SJason A. Donenfeld  *
16030deff3c4SJason A. Donenfeld  * - poolsize - the number of bits of entropy that the input pool can
16040deff3c4SJason A. Donenfeld  *   hold, tied to the POOL_BITS constant.
16050deff3c4SJason A. Donenfeld  *
16060deff3c4SJason A. Donenfeld  * - entropy_avail - the number of bits of entropy currently in the
16070deff3c4SJason A. Donenfeld  *   input pool. Always <= poolsize.
16080deff3c4SJason A. Donenfeld  *
16090deff3c4SJason A. Donenfeld  * - write_wakeup_threshold - the amount of entropy in the input pool
16100deff3c4SJason A. Donenfeld  *   below which write polls to /dev/random will unblock, requesting
16110deff3c4SJason A. Donenfeld  *   more entropy, tied to the POOL_MIN_BITS constant. It is writable
16120deff3c4SJason A. Donenfeld  *   to avoid breaking old userspaces, but writing to it does not
16130deff3c4SJason A. Donenfeld  *   change any behavior of the RNG.
16140deff3c4SJason A. Donenfeld  *
16150deff3c4SJason A. Donenfeld  * - urandom_min_reseed_secs - fixed to the meaningless value "60".
16160deff3c4SJason A. Donenfeld  *   It is writable to avoid breaking old userspaces, but writing
16170deff3c4SJason A. Donenfeld  *   to it does not change any behavior of the RNG.
16181da177e4SLinus Torvalds  *
16191da177e4SLinus Torvalds  ********************************************************************/
16201da177e4SLinus Torvalds 
16211da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds #include <linux/sysctl.h>
16241da177e4SLinus Torvalds 
16250deff3c4SJason A. Donenfeld static int sysctl_random_min_urandom_seed = 60;
16260deff3c4SJason A. Donenfeld static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
1627489c7fc4SJason A. Donenfeld static int sysctl_poolsize = POOL_BITS;
16281da177e4SLinus Torvalds static char sysctl_bootid[16];
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds /*
1631f22052b2SGreg Price  * This function is used to return both the bootid UUID, and random
16321da177e4SLinus Torvalds  * UUID.  The difference is in whether table->data is NULL; if it is,
16331da177e4SLinus Torvalds  * then a new UUID is generated and returned to the user.
16341da177e4SLinus Torvalds  *
1635f22052b2SGreg Price  * If the user accesses this via the proc interface, the UUID will be
1636f22052b2SGreg Price  * returned as an ASCII string in the standard UUID format; if via the
1637f22052b2SGreg Price  * sysctl system call, as 16 bytes of binary data.
16381da177e4SLinus Torvalds  */
1639248045b8SJason A. Donenfeld static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1640248045b8SJason A. Donenfeld 			size_t *lenp, loff_t *ppos)
16411da177e4SLinus Torvalds {
1642a151427eSJoe Perches 	struct ctl_table fake_table;
16431da177e4SLinus Torvalds 	unsigned char buf[64], tmp_uuid[16], *uuid;
16441da177e4SLinus Torvalds 
16451da177e4SLinus Torvalds 	uuid = table->data;
16461da177e4SLinus Torvalds 	if (!uuid) {
16471da177e4SLinus Torvalds 		uuid = tmp_uuid;
16481da177e4SLinus Torvalds 		generate_random_uuid(uuid);
164944e4360fSMathieu Desnoyers 	} else {
165044e4360fSMathieu Desnoyers 		static DEFINE_SPINLOCK(bootid_spinlock);
165144e4360fSMathieu Desnoyers 
165244e4360fSMathieu Desnoyers 		spin_lock(&bootid_spinlock);
165344e4360fSMathieu Desnoyers 		if (!uuid[8])
165444e4360fSMathieu Desnoyers 			generate_random_uuid(uuid);
165544e4360fSMathieu Desnoyers 		spin_unlock(&bootid_spinlock);
165644e4360fSMathieu Desnoyers 	}
16571da177e4SLinus Torvalds 
165835900771SJoe Perches 	sprintf(buf, "%pU", uuid);
165935900771SJoe Perches 
16601da177e4SLinus Torvalds 	fake_table.data = buf;
16611da177e4SLinus Torvalds 	fake_table.maxlen = sizeof(buf);
16621da177e4SLinus Torvalds 
16638d65af78SAlexey Dobriyan 	return proc_dostring(&fake_table, write, buffer, lenp, ppos);
16641da177e4SLinus Torvalds }
16651da177e4SLinus Torvalds 
16665475e8f0SXiaoming Ni static struct ctl_table random_table[] = {
16671da177e4SLinus Torvalds 	{
16681da177e4SLinus Torvalds 		.procname	= "poolsize",
16691da177e4SLinus Torvalds 		.data		= &sysctl_poolsize,
16701da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
16711da177e4SLinus Torvalds 		.mode		= 0444,
16726d456111SEric W. Biederman 		.proc_handler	= proc_dointvec,
16731da177e4SLinus Torvalds 	},
16741da177e4SLinus Torvalds 	{
16751da177e4SLinus Torvalds 		.procname	= "entropy_avail",
1676c5704490SJason A. Donenfeld 		.data		= &input_pool.entropy_count,
16771da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
16781da177e4SLinus Torvalds 		.mode		= 0444,
1679c5704490SJason A. Donenfeld 		.proc_handler	= proc_dointvec,
16801da177e4SLinus Torvalds 	},
16811da177e4SLinus Torvalds 	{
16821da177e4SLinus Torvalds 		.procname	= "write_wakeup_threshold",
16830deff3c4SJason A. Donenfeld 		.data		= &sysctl_random_write_wakeup_bits,
16841da177e4SLinus Torvalds 		.maxlen		= sizeof(int),
16851da177e4SLinus Torvalds 		.mode		= 0644,
1686489c7fc4SJason A. Donenfeld 		.proc_handler	= proc_dointvec,
16871da177e4SLinus Torvalds 	},
16881da177e4SLinus Torvalds 	{
1689f5c2742cSTheodore Ts'o 		.procname	= "urandom_min_reseed_secs",
16900deff3c4SJason A. Donenfeld 		.data		= &sysctl_random_min_urandom_seed,
1691f5c2742cSTheodore Ts'o 		.maxlen		= sizeof(int),
1692f5c2742cSTheodore Ts'o 		.mode		= 0644,
1693f5c2742cSTheodore Ts'o 		.proc_handler	= proc_dointvec,
1694f5c2742cSTheodore Ts'o 	},
1695f5c2742cSTheodore Ts'o 	{
16961da177e4SLinus Torvalds 		.procname	= "boot_id",
16971da177e4SLinus Torvalds 		.data		= &sysctl_bootid,
16981da177e4SLinus Torvalds 		.maxlen		= 16,
16991da177e4SLinus Torvalds 		.mode		= 0444,
17006d456111SEric W. Biederman 		.proc_handler	= proc_do_uuid,
17011da177e4SLinus Torvalds 	},
17021da177e4SLinus Torvalds 	{
17031da177e4SLinus Torvalds 		.procname	= "uuid",
17041da177e4SLinus Torvalds 		.maxlen		= 16,
17051da177e4SLinus Torvalds 		.mode		= 0444,
17066d456111SEric W. Biederman 		.proc_handler	= proc_do_uuid,
17071da177e4SLinus Torvalds 	},
1708894d2491SEric W. Biederman 	{ }
17091da177e4SLinus Torvalds };
17105475e8f0SXiaoming Ni 
17115475e8f0SXiaoming Ni /*
17125475e8f0SXiaoming Ni  * rand_initialize() is called before sysctl_init(),
17135475e8f0SXiaoming Ni  * so we cannot call register_sysctl_init() in rand_initialize()
17145475e8f0SXiaoming Ni  */
17155475e8f0SXiaoming Ni static int __init random_sysctls_init(void)
17165475e8f0SXiaoming Ni {
17175475e8f0SXiaoming Ni 	register_sysctl_init("kernel/random", random_table);
17185475e8f0SXiaoming Ni 	return 0;
17195475e8f0SXiaoming Ni }
17205475e8f0SXiaoming Ni device_initcall(random_sysctls_init);
17210deff3c4SJason A. Donenfeld #endif
1722