xref: /linux/drivers/char/random.c (revision 83439a0f1ce6a592f95e41338320b5f01b98a356)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6  *
7  * This driver produces cryptographically secure pseudorandom data. It is divided
8  * into roughly six sections, each with a section header:
9  *
10  *   - Initialization and readiness waiting.
11  *   - Fast key erasure RNG, the "crng".
12  *   - Entropy accumulation and extraction routines.
13  *   - Entropy collection routines.
14  *   - Userspace reader/writer interfaces.
15  *   - Sysctl interface.
16  *
17  * The high level overview is that there is one input pool, into which
18  * various pieces of data are hashed. Prior to initialization, some of that
19  * data is then "credited" as having a certain number of bits of entropy.
20  * When enough bits of entropy are available, the hash is finalized and
21  * handed as a key to a stream cipher that expands it indefinitely for
22  * various consumers. This key is periodically refreshed as the various
23  * entropy collectors, described below, add data to the input pool.
24  */
25 
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/fs.h>
39 #include <linux/blkdev.h>
40 #include <linux/interrupt.h>
41 #include <linux/mm.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/suspend.h>
55 #include <linux/siphash.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
59 #include <asm/irq.h>
60 #include <asm/irq_regs.h>
61 #include <asm/io.h>
62 
63 /*********************************************************************
64  *
65  * Initialization and readiness waiting.
66  *
67  * Much of the RNG infrastructure is devoted to various dependencies
68  * being able to wait until the RNG has collected enough entropy and
69  * is ready for safe consumption.
70  *
71  *********************************************************************/
72 
73 /*
74  * crng_init is protected by base_crng->lock, and only increases
75  * its value (from empty->early->ready).
76  */
77 static enum {
78 	CRNG_EMPTY = 0, /* Little to no entropy collected */
79 	CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
80 	CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
81 } crng_init __read_mostly = CRNG_EMPTY;
82 static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
83 #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
84 /* Various types of waiters for crng_init->CRNG_READY transition. */
85 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
86 static struct fasync_struct *fasync;
87 
88 /* Control how we warn userspace. */
89 static struct ratelimit_state urandom_warning =
90 	RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
91 static int ratelimit_disable __read_mostly =
92 	IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
93 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
94 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
95 
96 /*
97  * Returns whether or not the input pool has been seeded and thus guaranteed
98  * to supply cryptographically secure random numbers. This applies to: the
99  * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
100  * u16,u32,u64,int,long} family of functions.
101  *
102  * Returns: true if the input pool has been seeded.
103  *          false if the input pool has not been seeded.
104  */
105 bool rng_is_initialized(void)
106 {
107 	return crng_ready();
108 }
109 EXPORT_SYMBOL(rng_is_initialized);
110 
111 static void __cold crng_set_ready(struct work_struct *work)
112 {
113 	static_branch_enable(&crng_is_ready);
114 }
115 
116 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
117 static void try_to_generate_entropy(void);
118 
119 /*
120  * Wait for the input pool to be seeded and thus guaranteed to supply
121  * cryptographically secure random numbers. This applies to: the /dev/urandom
122  * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
123  * int,long} family of functions. Using any of these functions without first
124  * calling this function forfeits the guarantee of security.
125  *
126  * Returns: 0 if the input pool has been seeded.
127  *          -ERESTARTSYS if the function was interrupted by a signal.
128  */
129 int wait_for_random_bytes(void)
130 {
131 	while (!crng_ready()) {
132 		int ret;
133 
134 		try_to_generate_entropy();
135 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
136 		if (ret)
137 			return ret > 0 ? 0 : ret;
138 	}
139 	return 0;
140 }
141 EXPORT_SYMBOL(wait_for_random_bytes);
142 
143 #define warn_unseeded_randomness() \
144 	if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
145 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
146 				__func__, (void *)_RET_IP_, crng_init)
147 
148 
149 /*********************************************************************
150  *
151  * Fast key erasure RNG, the "crng".
152  *
153  * These functions expand entropy from the entropy extractor into
154  * long streams for external consumption using the "fast key erasure"
155  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
156  *
157  * There are a few exported interfaces for use by other drivers:
158  *
159  *	void get_random_bytes(void *buf, size_t len)
160  *	u8 get_random_u8()
161  *	u16 get_random_u16()
162  *	u32 get_random_u32()
163  *	u64 get_random_u64()
164  *	unsigned int get_random_int()
165  *	unsigned long get_random_long()
166  *
167  * These interfaces will return the requested number of random bytes
168  * into the given buffer or as a return value. This is equivalent to
169  * a read from /dev/urandom. The u8, u16, u32, u64, int, and long
170  * family of functions may be higher performance for one-off random
171  * integers, because they do a bit of buffering and do not invoke
172  * reseeding until the buffer is emptied.
173  *
174  *********************************************************************/
175 
176 enum {
177 	CRNG_RESEED_START_INTERVAL = HZ,
178 	CRNG_RESEED_INTERVAL = 60 * HZ
179 };
180 
181 static struct {
182 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
183 	unsigned long birth;
184 	unsigned long generation;
185 	spinlock_t lock;
186 } base_crng = {
187 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
188 };
189 
190 struct crng {
191 	u8 key[CHACHA_KEY_SIZE];
192 	unsigned long generation;
193 	local_lock_t lock;
194 };
195 
196 static DEFINE_PER_CPU(struct crng, crngs) = {
197 	.generation = ULONG_MAX,
198 	.lock = INIT_LOCAL_LOCK(crngs.lock),
199 };
200 
201 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
202 static void extract_entropy(void *buf, size_t len);
203 
204 /* This extracts a new crng key from the input pool. */
205 static void crng_reseed(void)
206 {
207 	unsigned long flags;
208 	unsigned long next_gen;
209 	u8 key[CHACHA_KEY_SIZE];
210 
211 	extract_entropy(key, sizeof(key));
212 
213 	/*
214 	 * We copy the new key into the base_crng, overwriting the old one,
215 	 * and update the generation counter. We avoid hitting ULONG_MAX,
216 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
217 	 * forces new CPUs that come online to always initialize.
218 	 */
219 	spin_lock_irqsave(&base_crng.lock, flags);
220 	memcpy(base_crng.key, key, sizeof(base_crng.key));
221 	next_gen = base_crng.generation + 1;
222 	if (next_gen == ULONG_MAX)
223 		++next_gen;
224 	WRITE_ONCE(base_crng.generation, next_gen);
225 	WRITE_ONCE(base_crng.birth, jiffies);
226 	if (!static_branch_likely(&crng_is_ready))
227 		crng_init = CRNG_READY;
228 	spin_unlock_irqrestore(&base_crng.lock, flags);
229 	memzero_explicit(key, sizeof(key));
230 }
231 
232 /*
233  * This generates a ChaCha block using the provided key, and then
234  * immediately overwrites that key with half the block. It returns
235  * the resultant ChaCha state to the user, along with the second
236  * half of the block containing 32 bytes of random data that may
237  * be used; random_data_len may not be greater than 32.
238  *
239  * The returned ChaCha state contains within it a copy of the old
240  * key value, at index 4, so the state should always be zeroed out
241  * immediately after using in order to maintain forward secrecy.
242  * If the state cannot be erased in a timely manner, then it is
243  * safer to set the random_data parameter to &chacha_state[4] so
244  * that this function overwrites it before returning.
245  */
246 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
247 				  u32 chacha_state[CHACHA_STATE_WORDS],
248 				  u8 *random_data, size_t random_data_len)
249 {
250 	u8 first_block[CHACHA_BLOCK_SIZE];
251 
252 	BUG_ON(random_data_len > 32);
253 
254 	chacha_init_consts(chacha_state);
255 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
256 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
257 	chacha20_block(chacha_state, first_block);
258 
259 	memcpy(key, first_block, CHACHA_KEY_SIZE);
260 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
261 	memzero_explicit(first_block, sizeof(first_block));
262 }
263 
264 /*
265  * Return the interval until the next reseeding, which is normally
266  * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
267  * proportional to the uptime.
268  */
269 static unsigned int crng_reseed_interval(void)
270 {
271 	static bool early_boot = true;
272 
273 	if (unlikely(READ_ONCE(early_boot))) {
274 		time64_t uptime = ktime_get_seconds();
275 		if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
276 			WRITE_ONCE(early_boot, false);
277 		else
278 			return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
279 				     (unsigned int)uptime / 2 * HZ);
280 	}
281 	return CRNG_RESEED_INTERVAL;
282 }
283 
284 /*
285  * This function returns a ChaCha state that you may use for generating
286  * random data. It also returns up to 32 bytes on its own of random data
287  * that may be used; random_data_len may not be greater than 32.
288  */
289 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
290 			    u8 *random_data, size_t random_data_len)
291 {
292 	unsigned long flags;
293 	struct crng *crng;
294 
295 	BUG_ON(random_data_len > 32);
296 
297 	/*
298 	 * For the fast path, we check whether we're ready, unlocked first, and
299 	 * then re-check once locked later. In the case where we're really not
300 	 * ready, we do fast key erasure with the base_crng directly, extracting
301 	 * when crng_init is CRNG_EMPTY.
302 	 */
303 	if (!crng_ready()) {
304 		bool ready;
305 
306 		spin_lock_irqsave(&base_crng.lock, flags);
307 		ready = crng_ready();
308 		if (!ready) {
309 			if (crng_init == CRNG_EMPTY)
310 				extract_entropy(base_crng.key, sizeof(base_crng.key));
311 			crng_fast_key_erasure(base_crng.key, chacha_state,
312 					      random_data, random_data_len);
313 		}
314 		spin_unlock_irqrestore(&base_crng.lock, flags);
315 		if (!ready)
316 			return;
317 	}
318 
319 	/*
320 	 * If the base_crng is old enough, we reseed, which in turn bumps the
321 	 * generation counter that we check below.
322 	 */
323 	if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
324 		crng_reseed();
325 
326 	local_lock_irqsave(&crngs.lock, flags);
327 	crng = raw_cpu_ptr(&crngs);
328 
329 	/*
330 	 * If our per-cpu crng is older than the base_crng, then it means
331 	 * somebody reseeded the base_crng. In that case, we do fast key
332 	 * erasure on the base_crng, and use its output as the new key
333 	 * for our per-cpu crng. This brings us up to date with base_crng.
334 	 */
335 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
336 		spin_lock(&base_crng.lock);
337 		crng_fast_key_erasure(base_crng.key, chacha_state,
338 				      crng->key, sizeof(crng->key));
339 		crng->generation = base_crng.generation;
340 		spin_unlock(&base_crng.lock);
341 	}
342 
343 	/*
344 	 * Finally, when we've made it this far, our per-cpu crng has an up
345 	 * to date key, and we can do fast key erasure with it to produce
346 	 * some random data and a ChaCha state for the caller. All other
347 	 * branches of this function are "unlikely", so most of the time we
348 	 * should wind up here immediately.
349 	 */
350 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
351 	local_unlock_irqrestore(&crngs.lock, flags);
352 }
353 
354 static void _get_random_bytes(void *buf, size_t len)
355 {
356 	u32 chacha_state[CHACHA_STATE_WORDS];
357 	u8 tmp[CHACHA_BLOCK_SIZE];
358 	size_t first_block_len;
359 
360 	if (!len)
361 		return;
362 
363 	first_block_len = min_t(size_t, 32, len);
364 	crng_make_state(chacha_state, buf, first_block_len);
365 	len -= first_block_len;
366 	buf += first_block_len;
367 
368 	while (len) {
369 		if (len < CHACHA_BLOCK_SIZE) {
370 			chacha20_block(chacha_state, tmp);
371 			memcpy(buf, tmp, len);
372 			memzero_explicit(tmp, sizeof(tmp));
373 			break;
374 		}
375 
376 		chacha20_block(chacha_state, buf);
377 		if (unlikely(chacha_state[12] == 0))
378 			++chacha_state[13];
379 		len -= CHACHA_BLOCK_SIZE;
380 		buf += CHACHA_BLOCK_SIZE;
381 	}
382 
383 	memzero_explicit(chacha_state, sizeof(chacha_state));
384 }
385 
386 /*
387  * This function is the exported kernel interface. It returns some number of
388  * good random numbers, suitable for key generation, seeding TCP sequence
389  * numbers, etc. In order to ensure that the randomness returned by this
390  * function is okay, the function wait_for_random_bytes() should be called and
391  * return 0 at least once at any point prior.
392  */
393 void get_random_bytes(void *buf, size_t len)
394 {
395 	warn_unseeded_randomness();
396 	_get_random_bytes(buf, len);
397 }
398 EXPORT_SYMBOL(get_random_bytes);
399 
400 static ssize_t get_random_bytes_user(struct iov_iter *iter)
401 {
402 	u32 chacha_state[CHACHA_STATE_WORDS];
403 	u8 block[CHACHA_BLOCK_SIZE];
404 	size_t ret = 0, copied;
405 
406 	if (unlikely(!iov_iter_count(iter)))
407 		return 0;
408 
409 	/*
410 	 * Immediately overwrite the ChaCha key at index 4 with random
411 	 * bytes, in case userspace causes copy_to_iter() below to sleep
412 	 * forever, so that we still retain forward secrecy in that case.
413 	 */
414 	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
415 	/*
416 	 * However, if we're doing a read of len <= 32, we don't need to
417 	 * use chacha_state after, so we can simply return those bytes to
418 	 * the user directly.
419 	 */
420 	if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
421 		ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
422 		goto out_zero_chacha;
423 	}
424 
425 	for (;;) {
426 		chacha20_block(chacha_state, block);
427 		if (unlikely(chacha_state[12] == 0))
428 			++chacha_state[13];
429 
430 		copied = copy_to_iter(block, sizeof(block), iter);
431 		ret += copied;
432 		if (!iov_iter_count(iter) || copied != sizeof(block))
433 			break;
434 
435 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
436 		if (ret % PAGE_SIZE == 0) {
437 			if (signal_pending(current))
438 				break;
439 			cond_resched();
440 		}
441 	}
442 
443 	memzero_explicit(block, sizeof(block));
444 out_zero_chacha:
445 	memzero_explicit(chacha_state, sizeof(chacha_state));
446 	return ret ? ret : -EFAULT;
447 }
448 
449 /*
450  * Batched entropy returns random integers. The quality of the random
451  * number is good as /dev/urandom. In order to ensure that the randomness
452  * provided by this function is okay, the function wait_for_random_bytes()
453  * should be called and return 0 at least once at any point prior.
454  */
455 
456 #define DEFINE_BATCHED_ENTROPY(type)						\
457 struct batch_ ##type {								\
458 	/*									\
459 	 * We make this 1.5x a ChaCha block, so that we get the			\
460 	 * remaining 32 bytes from fast key erasure, plus one full		\
461 	 * block from the detached ChaCha state. We can increase		\
462 	 * the size of this later if needed so long as we keep the		\
463 	 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.		\
464 	 */									\
465 	type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))];		\
466 	local_lock_t lock;							\
467 	unsigned long generation;						\
468 	unsigned int position;							\
469 };										\
470 										\
471 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {	\
472 	.lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock),			\
473 	.position = UINT_MAX							\
474 };										\
475 										\
476 type get_random_ ##type(void)							\
477 {										\
478 	type ret;								\
479 	unsigned long flags;							\
480 	struct batch_ ##type *batch;						\
481 	unsigned long next_gen;							\
482 										\
483 	warn_unseeded_randomness();						\
484 										\
485 	if  (!crng_ready()) {							\
486 		_get_random_bytes(&ret, sizeof(ret));				\
487 		return ret;							\
488 	}									\
489 										\
490 	local_lock_irqsave(&batched_entropy_ ##type.lock, flags);		\
491 	batch = raw_cpu_ptr(&batched_entropy_##type);				\
492 										\
493 	next_gen = READ_ONCE(base_crng.generation);				\
494 	if (batch->position >= ARRAY_SIZE(batch->entropy) ||			\
495 	    next_gen != batch->generation) {					\
496 		_get_random_bytes(batch->entropy, sizeof(batch->entropy));	\
497 		batch->position = 0;						\
498 		batch->generation = next_gen;					\
499 	}									\
500 										\
501 	ret = batch->entropy[batch->position];					\
502 	batch->entropy[batch->position] = 0;					\
503 	++batch->position;							\
504 	local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags);		\
505 	return ret;								\
506 }										\
507 EXPORT_SYMBOL(get_random_ ##type);
508 
509 DEFINE_BATCHED_ENTROPY(u8)
510 DEFINE_BATCHED_ENTROPY(u16)
511 DEFINE_BATCHED_ENTROPY(u32)
512 DEFINE_BATCHED_ENTROPY(u64)
513 
514 #ifdef CONFIG_SMP
515 /*
516  * This function is called when the CPU is coming up, with entry
517  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
518  */
519 int __cold random_prepare_cpu(unsigned int cpu)
520 {
521 	/*
522 	 * When the cpu comes back online, immediately invalidate both
523 	 * the per-cpu crng and all batches, so that we serve fresh
524 	 * randomness.
525 	 */
526 	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
527 	per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
528 	per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
529 	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
530 	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
531 	return 0;
532 }
533 #endif
534 
535 
536 /**********************************************************************
537  *
538  * Entropy accumulation and extraction routines.
539  *
540  * Callers may add entropy via:
541  *
542  *     static void mix_pool_bytes(const void *buf, size_t len)
543  *
544  * After which, if added entropy should be credited:
545  *
546  *     static void credit_init_bits(size_t bits)
547  *
548  * Finally, extract entropy via:
549  *
550  *     static void extract_entropy(void *buf, size_t len)
551  *
552  **********************************************************************/
553 
554 enum {
555 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
556 	POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
557 	POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
558 };
559 
560 static struct {
561 	struct blake2s_state hash;
562 	spinlock_t lock;
563 	unsigned int init_bits;
564 } input_pool = {
565 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
566 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
567 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
568 	.hash.outlen = BLAKE2S_HASH_SIZE,
569 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
570 };
571 
572 static void _mix_pool_bytes(const void *buf, size_t len)
573 {
574 	blake2s_update(&input_pool.hash, buf, len);
575 }
576 
577 /*
578  * This function adds bytes into the input pool. It does not
579  * update the initialization bit counter; the caller should call
580  * credit_init_bits if this is appropriate.
581  */
582 static void mix_pool_bytes(const void *buf, size_t len)
583 {
584 	unsigned long flags;
585 
586 	spin_lock_irqsave(&input_pool.lock, flags);
587 	_mix_pool_bytes(buf, len);
588 	spin_unlock_irqrestore(&input_pool.lock, flags);
589 }
590 
591 /*
592  * This is an HKDF-like construction for using the hashed collected entropy
593  * as a PRF key, that's then expanded block-by-block.
594  */
595 static void extract_entropy(void *buf, size_t len)
596 {
597 	unsigned long flags;
598 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
599 	struct {
600 		unsigned long rdseed[32 / sizeof(long)];
601 		size_t counter;
602 	} block;
603 	size_t i, longs;
604 
605 	for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
606 		longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
607 		if (longs) {
608 			i += longs;
609 			continue;
610 		}
611 		longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
612 		if (longs) {
613 			i += longs;
614 			continue;
615 		}
616 		block.rdseed[i++] = random_get_entropy();
617 	}
618 
619 	spin_lock_irqsave(&input_pool.lock, flags);
620 
621 	/* seed = HASHPRF(last_key, entropy_input) */
622 	blake2s_final(&input_pool.hash, seed);
623 
624 	/* next_key = HASHPRF(seed, RDSEED || 0) */
625 	block.counter = 0;
626 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
627 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
628 
629 	spin_unlock_irqrestore(&input_pool.lock, flags);
630 	memzero_explicit(next_key, sizeof(next_key));
631 
632 	while (len) {
633 		i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
634 		/* output = HASHPRF(seed, RDSEED || ++counter) */
635 		++block.counter;
636 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
637 		len -= i;
638 		buf += i;
639 	}
640 
641 	memzero_explicit(seed, sizeof(seed));
642 	memzero_explicit(&block, sizeof(block));
643 }
644 
645 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
646 
647 static void __cold _credit_init_bits(size_t bits)
648 {
649 	static struct execute_work set_ready;
650 	unsigned int new, orig, add;
651 	unsigned long flags;
652 
653 	if (!bits)
654 		return;
655 
656 	add = min_t(size_t, bits, POOL_BITS);
657 
658 	orig = READ_ONCE(input_pool.init_bits);
659 	do {
660 		new = min_t(unsigned int, POOL_BITS, orig + add);
661 	} while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
662 
663 	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
664 		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
665 		if (static_key_initialized)
666 			execute_in_process_context(crng_set_ready, &set_ready);
667 		wake_up_interruptible(&crng_init_wait);
668 		kill_fasync(&fasync, SIGIO, POLL_IN);
669 		pr_notice("crng init done\n");
670 		if (urandom_warning.missed)
671 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
672 				  urandom_warning.missed);
673 	} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
674 		spin_lock_irqsave(&base_crng.lock, flags);
675 		/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
676 		if (crng_init == CRNG_EMPTY) {
677 			extract_entropy(base_crng.key, sizeof(base_crng.key));
678 			crng_init = CRNG_EARLY;
679 		}
680 		spin_unlock_irqrestore(&base_crng.lock, flags);
681 	}
682 }
683 
684 
685 /**********************************************************************
686  *
687  * Entropy collection routines.
688  *
689  * The following exported functions are used for pushing entropy into
690  * the above entropy accumulation routines:
691  *
692  *	void add_device_randomness(const void *buf, size_t len);
693  *	void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
694  *	void add_bootloader_randomness(const void *buf, size_t len);
695  *	void add_vmfork_randomness(const void *unique_vm_id, size_t len);
696  *	void add_interrupt_randomness(int irq);
697  *	void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
698  *	void add_disk_randomness(struct gendisk *disk);
699  *
700  * add_device_randomness() adds data to the input pool that
701  * is likely to differ between two devices (or possibly even per boot).
702  * This would be things like MAC addresses or serial numbers, or the
703  * read-out of the RTC. This does *not* credit any actual entropy to
704  * the pool, but it initializes the pool to different values for devices
705  * that might otherwise be identical and have very little entropy
706  * available to them (particularly common in the embedded world).
707  *
708  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
709  * entropy as specified by the caller. If the entropy pool is full it will
710  * block until more entropy is needed.
711  *
712  * add_bootloader_randomness() is called by bootloader drivers, such as EFI
713  * and device tree, and credits its input depending on whether or not the
714  * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
715  *
716  * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
717  * representing the current instance of a VM to the pool, without crediting,
718  * and then force-reseeds the crng so that it takes effect immediately.
719  *
720  * add_interrupt_randomness() uses the interrupt timing as random
721  * inputs to the entropy pool. Using the cycle counters and the irq source
722  * as inputs, it feeds the input pool roughly once a second or after 64
723  * interrupts, crediting 1 bit of entropy for whichever comes first.
724  *
725  * add_input_randomness() uses the input layer interrupt timing, as well
726  * as the event type information from the hardware.
727  *
728  * add_disk_randomness() uses what amounts to the seek time of block
729  * layer request events, on a per-disk_devt basis, as input to the
730  * entropy pool. Note that high-speed solid state drives with very low
731  * seek times do not make for good sources of entropy, as their seek
732  * times are usually fairly consistent.
733  *
734  * The last two routines try to estimate how many bits of entropy
735  * to credit. They do this by keeping track of the first and second
736  * order deltas of the event timings.
737  *
738  **********************************************************************/
739 
740 static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
741 static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
742 static int __init parse_trust_cpu(char *arg)
743 {
744 	return kstrtobool(arg, &trust_cpu);
745 }
746 static int __init parse_trust_bootloader(char *arg)
747 {
748 	return kstrtobool(arg, &trust_bootloader);
749 }
750 early_param("random.trust_cpu", parse_trust_cpu);
751 early_param("random.trust_bootloader", parse_trust_bootloader);
752 
753 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
754 {
755 	unsigned long flags, entropy = random_get_entropy();
756 
757 	/*
758 	 * Encode a representation of how long the system has been suspended,
759 	 * in a way that is distinct from prior system suspends.
760 	 */
761 	ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
762 
763 	spin_lock_irqsave(&input_pool.lock, flags);
764 	_mix_pool_bytes(&action, sizeof(action));
765 	_mix_pool_bytes(stamps, sizeof(stamps));
766 	_mix_pool_bytes(&entropy, sizeof(entropy));
767 	spin_unlock_irqrestore(&input_pool.lock, flags);
768 
769 	if (crng_ready() && (action == PM_RESTORE_PREPARE ||
770 	    (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
771 	     !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
772 		crng_reseed();
773 		pr_notice("crng reseeded on system resumption\n");
774 	}
775 	return 0;
776 }
777 
778 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
779 
780 /*
781  * This is called extremely early, before time keeping functionality is
782  * available, but arch randomness is. Interrupts are not yet enabled.
783  */
784 void __init random_init_early(const char *command_line)
785 {
786 	unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
787 	size_t i, longs, arch_bits;
788 
789 #if defined(LATENT_ENTROPY_PLUGIN)
790 	static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
791 	_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
792 #endif
793 
794 	for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
795 		longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
796 		if (longs) {
797 			_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
798 			i += longs;
799 			continue;
800 		}
801 		longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
802 		if (longs) {
803 			_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
804 			i += longs;
805 			continue;
806 		}
807 		arch_bits -= sizeof(*entropy) * 8;
808 		++i;
809 	}
810 
811 	_mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
812 	_mix_pool_bytes(command_line, strlen(command_line));
813 
814 	/* Reseed if already seeded by earlier phases. */
815 	if (crng_ready())
816 		crng_reseed();
817 	else if (trust_cpu)
818 		_credit_init_bits(arch_bits);
819 }
820 
821 /*
822  * This is called a little bit after the prior function, and now there is
823  * access to timestamps counters. Interrupts are not yet enabled.
824  */
825 void __init random_init(void)
826 {
827 	unsigned long entropy = random_get_entropy();
828 	ktime_t now = ktime_get_real();
829 
830 	_mix_pool_bytes(&now, sizeof(now));
831 	_mix_pool_bytes(&entropy, sizeof(entropy));
832 	add_latent_entropy();
833 
834 	/*
835 	 * If we were initialized by the cpu or bootloader before jump labels
836 	 * are initialized, then we should enable the static branch here, where
837 	 * it's guaranteed that jump labels have been initialized.
838 	 */
839 	if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
840 		crng_set_ready(NULL);
841 
842 	/* Reseed if already seeded by earlier phases. */
843 	if (crng_ready())
844 		crng_reseed();
845 
846 	WARN_ON(register_pm_notifier(&pm_notifier));
847 
848 	WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
849 		       "entropy collection will consequently suffer.");
850 }
851 
852 /*
853  * Add device- or boot-specific data to the input pool to help
854  * initialize it.
855  *
856  * None of this adds any entropy; it is meant to avoid the problem of
857  * the entropy pool having similar initial state across largely
858  * identical devices.
859  */
860 void add_device_randomness(const void *buf, size_t len)
861 {
862 	unsigned long entropy = random_get_entropy();
863 	unsigned long flags;
864 
865 	spin_lock_irqsave(&input_pool.lock, flags);
866 	_mix_pool_bytes(&entropy, sizeof(entropy));
867 	_mix_pool_bytes(buf, len);
868 	spin_unlock_irqrestore(&input_pool.lock, flags);
869 }
870 EXPORT_SYMBOL(add_device_randomness);
871 
872 /*
873  * Interface for in-kernel drivers of true hardware RNGs.
874  * Those devices may produce endless random bits and will be throttled
875  * when our pool is full.
876  */
877 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
878 {
879 	mix_pool_bytes(buf, len);
880 	credit_init_bits(entropy);
881 
882 	/*
883 	 * Throttle writing to once every reseed interval, unless we're not yet
884 	 * initialized or no entropy is credited.
885 	 */
886 	if (!kthread_should_stop() && (crng_ready() || !entropy))
887 		schedule_timeout_interruptible(crng_reseed_interval());
888 }
889 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
890 
891 /*
892  * Handle random seed passed by bootloader, and credit it if
893  * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
894  */
895 void __init add_bootloader_randomness(const void *buf, size_t len)
896 {
897 	mix_pool_bytes(buf, len);
898 	if (trust_bootloader)
899 		credit_init_bits(len * 8);
900 }
901 
902 #if IS_ENABLED(CONFIG_VMGENID)
903 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
904 
905 /*
906  * Handle a new unique VM ID, which is unique, not secret, so we
907  * don't credit it, but we do immediately force a reseed after so
908  * that it's used by the crng posthaste.
909  */
910 void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
911 {
912 	add_device_randomness(unique_vm_id, len);
913 	if (crng_ready()) {
914 		crng_reseed();
915 		pr_notice("crng reseeded due to virtual machine fork\n");
916 	}
917 	blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
918 }
919 #if IS_MODULE(CONFIG_VMGENID)
920 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
921 #endif
922 
923 int __cold register_random_vmfork_notifier(struct notifier_block *nb)
924 {
925 	return blocking_notifier_chain_register(&vmfork_chain, nb);
926 }
927 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
928 
929 int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
930 {
931 	return blocking_notifier_chain_unregister(&vmfork_chain, nb);
932 }
933 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
934 #endif
935 
936 struct fast_pool {
937 	unsigned long pool[4];
938 	unsigned long last;
939 	unsigned int count;
940 	struct timer_list mix;
941 };
942 
943 static void mix_interrupt_randomness(struct timer_list *work);
944 
945 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
946 #ifdef CONFIG_64BIT
947 #define FASTMIX_PERM SIPHASH_PERMUTATION
948 	.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
949 #else
950 #define FASTMIX_PERM HSIPHASH_PERMUTATION
951 	.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
952 #endif
953 	.mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
954 };
955 
956 /*
957  * This is [Half]SipHash-1-x, starting from an empty key. Because
958  * the key is fixed, it assumes that its inputs are non-malicious,
959  * and therefore this has no security on its own. s represents the
960  * four-word SipHash state, while v represents a two-word input.
961  */
962 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
963 {
964 	s[3] ^= v1;
965 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
966 	s[0] ^= v1;
967 	s[3] ^= v2;
968 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
969 	s[0] ^= v2;
970 }
971 
972 #ifdef CONFIG_SMP
973 /*
974  * This function is called when the CPU has just come online, with
975  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
976  */
977 int __cold random_online_cpu(unsigned int cpu)
978 {
979 	/*
980 	 * During CPU shutdown and before CPU onlining, add_interrupt_
981 	 * randomness() may schedule mix_interrupt_randomness(), and
982 	 * set the MIX_INFLIGHT flag. However, because the worker can
983 	 * be scheduled on a different CPU during this period, that
984 	 * flag will never be cleared. For that reason, we zero out
985 	 * the flag here, which runs just after workqueues are onlined
986 	 * for the CPU again. This also has the effect of setting the
987 	 * irq randomness count to zero so that new accumulated irqs
988 	 * are fresh.
989 	 */
990 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
991 	return 0;
992 }
993 #endif
994 
995 static void mix_interrupt_randomness(struct timer_list *work)
996 {
997 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
998 	/*
999 	 * The size of the copied stack pool is explicitly 2 longs so that we
1000 	 * only ever ingest half of the siphash output each time, retaining
1001 	 * the other half as the next "key" that carries over. The entropy is
1002 	 * supposed to be sufficiently dispersed between bits so on average
1003 	 * we don't wind up "losing" some.
1004 	 */
1005 	unsigned long pool[2];
1006 	unsigned int count;
1007 
1008 	/* Check to see if we're running on the wrong CPU due to hotplug. */
1009 	local_irq_disable();
1010 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1011 		local_irq_enable();
1012 		return;
1013 	}
1014 
1015 	/*
1016 	 * Copy the pool to the stack so that the mixer always has a
1017 	 * consistent view, before we reenable irqs again.
1018 	 */
1019 	memcpy(pool, fast_pool->pool, sizeof(pool));
1020 	count = fast_pool->count;
1021 	fast_pool->count = 0;
1022 	fast_pool->last = jiffies;
1023 	local_irq_enable();
1024 
1025 	mix_pool_bytes(pool, sizeof(pool));
1026 	credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
1027 
1028 	memzero_explicit(pool, sizeof(pool));
1029 }
1030 
1031 void add_interrupt_randomness(int irq)
1032 {
1033 	enum { MIX_INFLIGHT = 1U << 31 };
1034 	unsigned long entropy = random_get_entropy();
1035 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1036 	struct pt_regs *regs = get_irq_regs();
1037 	unsigned int new_count;
1038 
1039 	fast_mix(fast_pool->pool, entropy,
1040 		 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1041 	new_count = ++fast_pool->count;
1042 
1043 	if (new_count & MIX_INFLIGHT)
1044 		return;
1045 
1046 	if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1047 		return;
1048 
1049 	fast_pool->count |= MIX_INFLIGHT;
1050 	if (!timer_pending(&fast_pool->mix)) {
1051 		fast_pool->mix.expires = jiffies;
1052 		add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1053 	}
1054 }
1055 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1056 
1057 /* There is one of these per entropy source */
1058 struct timer_rand_state {
1059 	unsigned long last_time;
1060 	long last_delta, last_delta2;
1061 };
1062 
1063 /*
1064  * This function adds entropy to the entropy "pool" by using timing
1065  * delays. It uses the timer_rand_state structure to make an estimate
1066  * of how many bits of entropy this call has added to the pool. The
1067  * value "num" is also added to the pool; it should somehow describe
1068  * the type of event that just happened.
1069  */
1070 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1071 {
1072 	unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1073 	long delta, delta2, delta3;
1074 	unsigned int bits;
1075 
1076 	/*
1077 	 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1078 	 * sometime after, so mix into the fast pool.
1079 	 */
1080 	if (in_hardirq()) {
1081 		fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1082 	} else {
1083 		spin_lock_irqsave(&input_pool.lock, flags);
1084 		_mix_pool_bytes(&entropy, sizeof(entropy));
1085 		_mix_pool_bytes(&num, sizeof(num));
1086 		spin_unlock_irqrestore(&input_pool.lock, flags);
1087 	}
1088 
1089 	if (crng_ready())
1090 		return;
1091 
1092 	/*
1093 	 * Calculate number of bits of randomness we probably added.
1094 	 * We take into account the first, second and third-order deltas
1095 	 * in order to make our estimate.
1096 	 */
1097 	delta = now - READ_ONCE(state->last_time);
1098 	WRITE_ONCE(state->last_time, now);
1099 
1100 	delta2 = delta - READ_ONCE(state->last_delta);
1101 	WRITE_ONCE(state->last_delta, delta);
1102 
1103 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1104 	WRITE_ONCE(state->last_delta2, delta2);
1105 
1106 	if (delta < 0)
1107 		delta = -delta;
1108 	if (delta2 < 0)
1109 		delta2 = -delta2;
1110 	if (delta3 < 0)
1111 		delta3 = -delta3;
1112 	if (delta > delta2)
1113 		delta = delta2;
1114 	if (delta > delta3)
1115 		delta = delta3;
1116 
1117 	/*
1118 	 * delta is now minimum absolute delta. Round down by 1 bit
1119 	 * on general principles, and limit entropy estimate to 11 bits.
1120 	 */
1121 	bits = min(fls(delta >> 1), 11);
1122 
1123 	/*
1124 	 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1125 	 * will run after this, which uses a different crediting scheme of 1 bit
1126 	 * per every 64 interrupts. In order to let that function do accounting
1127 	 * close to the one in this function, we credit a full 64/64 bit per bit,
1128 	 * and then subtract one to account for the extra one added.
1129 	 */
1130 	if (in_hardirq())
1131 		this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1132 	else
1133 		_credit_init_bits(bits);
1134 }
1135 
1136 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1137 {
1138 	static unsigned char last_value;
1139 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1140 
1141 	/* Ignore autorepeat and the like. */
1142 	if (value == last_value)
1143 		return;
1144 
1145 	last_value = value;
1146 	add_timer_randomness(&input_timer_state,
1147 			     (type << 4) ^ code ^ (code >> 4) ^ value);
1148 }
1149 EXPORT_SYMBOL_GPL(add_input_randomness);
1150 
1151 #ifdef CONFIG_BLOCK
1152 void add_disk_randomness(struct gendisk *disk)
1153 {
1154 	if (!disk || !disk->random)
1155 		return;
1156 	/* First major is 1, so we get >= 0x200 here. */
1157 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1158 }
1159 EXPORT_SYMBOL_GPL(add_disk_randomness);
1160 
1161 void __cold rand_initialize_disk(struct gendisk *disk)
1162 {
1163 	struct timer_rand_state *state;
1164 
1165 	/*
1166 	 * If kzalloc returns null, we just won't use that entropy
1167 	 * source.
1168 	 */
1169 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1170 	if (state) {
1171 		state->last_time = INITIAL_JIFFIES;
1172 		disk->random = state;
1173 	}
1174 }
1175 #endif
1176 
1177 struct entropy_timer_state {
1178 	unsigned long entropy;
1179 	struct timer_list timer;
1180 	unsigned int samples, samples_per_bit;
1181 };
1182 
1183 /*
1184  * Each time the timer fires, we expect that we got an unpredictable
1185  * jump in the cycle counter. Even if the timer is running on another
1186  * CPU, the timer activity will be touching the stack of the CPU that is
1187  * generating entropy..
1188  *
1189  * Note that we don't re-arm the timer in the timer itself - we are
1190  * happy to be scheduled away, since that just makes the load more
1191  * complex, but we do not want the timer to keep ticking unless the
1192  * entropy loop is running.
1193  *
1194  * So the re-arming always happens in the entropy loop itself.
1195  */
1196 static void __cold entropy_timer(struct timer_list *timer)
1197 {
1198 	struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1199 
1200 	if (++state->samples == state->samples_per_bit) {
1201 		credit_init_bits(1);
1202 		state->samples = 0;
1203 	}
1204 }
1205 
1206 /*
1207  * If we have an actual cycle counter, see if we can
1208  * generate enough entropy with timing noise
1209  */
1210 static void __cold try_to_generate_entropy(void)
1211 {
1212 	enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
1213 	struct entropy_timer_state stack;
1214 	unsigned int i, num_different = 0;
1215 	unsigned long last = random_get_entropy();
1216 
1217 	for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1218 		stack.entropy = random_get_entropy();
1219 		if (stack.entropy != last)
1220 			++num_different;
1221 		last = stack.entropy;
1222 	}
1223 	stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1224 	if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
1225 		return;
1226 
1227 	stack.samples = 0;
1228 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1229 	while (!crng_ready() && !signal_pending(current)) {
1230 		if (!timer_pending(&stack.timer))
1231 			mod_timer(&stack.timer, jiffies);
1232 		mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1233 		schedule();
1234 		stack.entropy = random_get_entropy();
1235 	}
1236 
1237 	del_timer_sync(&stack.timer);
1238 	destroy_timer_on_stack(&stack.timer);
1239 	mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1240 }
1241 
1242 
1243 /**********************************************************************
1244  *
1245  * Userspace reader/writer interfaces.
1246  *
1247  * getrandom(2) is the primary modern interface into the RNG and should
1248  * be used in preference to anything else.
1249  *
1250  * Reading from /dev/random has the same functionality as calling
1251  * getrandom(2) with flags=0. In earlier versions, however, it had
1252  * vastly different semantics and should therefore be avoided, to
1253  * prevent backwards compatibility issues.
1254  *
1255  * Reading from /dev/urandom has the same functionality as calling
1256  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1257  * waiting for the RNG to be ready, it should not be used.
1258  *
1259  * Writing to either /dev/random or /dev/urandom adds entropy to
1260  * the input pool but does not credit it.
1261  *
1262  * Polling on /dev/random indicates when the RNG is initialized, on
1263  * the read side, and when it wants new entropy, on the write side.
1264  *
1265  * Both /dev/random and /dev/urandom have the same set of ioctls for
1266  * adding entropy, getting the entropy count, zeroing the count, and
1267  * reseeding the crng.
1268  *
1269  **********************************************************************/
1270 
1271 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1272 {
1273 	struct iov_iter iter;
1274 	struct iovec iov;
1275 	int ret;
1276 
1277 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1278 		return -EINVAL;
1279 
1280 	/*
1281 	 * Requesting insecure and blocking randomness at the same time makes
1282 	 * no sense.
1283 	 */
1284 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1285 		return -EINVAL;
1286 
1287 	if (!crng_ready() && !(flags & GRND_INSECURE)) {
1288 		if (flags & GRND_NONBLOCK)
1289 			return -EAGAIN;
1290 		ret = wait_for_random_bytes();
1291 		if (unlikely(ret))
1292 			return ret;
1293 	}
1294 
1295 	ret = import_single_range(READ, ubuf, len, &iov, &iter);
1296 	if (unlikely(ret))
1297 		return ret;
1298 	return get_random_bytes_user(&iter);
1299 }
1300 
1301 static __poll_t random_poll(struct file *file, poll_table *wait)
1302 {
1303 	poll_wait(file, &crng_init_wait, wait);
1304 	return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1305 }
1306 
1307 static ssize_t write_pool_user(struct iov_iter *iter)
1308 {
1309 	u8 block[BLAKE2S_BLOCK_SIZE];
1310 	ssize_t ret = 0;
1311 	size_t copied;
1312 
1313 	if (unlikely(!iov_iter_count(iter)))
1314 		return 0;
1315 
1316 	for (;;) {
1317 		copied = copy_from_iter(block, sizeof(block), iter);
1318 		ret += copied;
1319 		mix_pool_bytes(block, copied);
1320 		if (!iov_iter_count(iter) || copied != sizeof(block))
1321 			break;
1322 
1323 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1324 		if (ret % PAGE_SIZE == 0) {
1325 			if (signal_pending(current))
1326 				break;
1327 			cond_resched();
1328 		}
1329 	}
1330 
1331 	memzero_explicit(block, sizeof(block));
1332 	return ret ? ret : -EFAULT;
1333 }
1334 
1335 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1336 {
1337 	return write_pool_user(iter);
1338 }
1339 
1340 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1341 {
1342 	static int maxwarn = 10;
1343 
1344 	/*
1345 	 * Opportunistically attempt to initialize the RNG on platforms that
1346 	 * have fast cycle counters, but don't (for now) require it to succeed.
1347 	 */
1348 	if (!crng_ready())
1349 		try_to_generate_entropy();
1350 
1351 	if (!crng_ready()) {
1352 		if (!ratelimit_disable && maxwarn <= 0)
1353 			++urandom_warning.missed;
1354 		else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1355 			--maxwarn;
1356 			pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1357 				  current->comm, iov_iter_count(iter));
1358 		}
1359 	}
1360 
1361 	return get_random_bytes_user(iter);
1362 }
1363 
1364 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1365 {
1366 	int ret;
1367 
1368 	if (!crng_ready() &&
1369 	    ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1370 	     (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1371 		return -EAGAIN;
1372 
1373 	ret = wait_for_random_bytes();
1374 	if (ret != 0)
1375 		return ret;
1376 	return get_random_bytes_user(iter);
1377 }
1378 
1379 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1380 {
1381 	int __user *p = (int __user *)arg;
1382 	int ent_count;
1383 
1384 	switch (cmd) {
1385 	case RNDGETENTCNT:
1386 		/* Inherently racy, no point locking. */
1387 		if (put_user(input_pool.init_bits, p))
1388 			return -EFAULT;
1389 		return 0;
1390 	case RNDADDTOENTCNT:
1391 		if (!capable(CAP_SYS_ADMIN))
1392 			return -EPERM;
1393 		if (get_user(ent_count, p))
1394 			return -EFAULT;
1395 		if (ent_count < 0)
1396 			return -EINVAL;
1397 		credit_init_bits(ent_count);
1398 		return 0;
1399 	case RNDADDENTROPY: {
1400 		struct iov_iter iter;
1401 		struct iovec iov;
1402 		ssize_t ret;
1403 		int len;
1404 
1405 		if (!capable(CAP_SYS_ADMIN))
1406 			return -EPERM;
1407 		if (get_user(ent_count, p++))
1408 			return -EFAULT;
1409 		if (ent_count < 0)
1410 			return -EINVAL;
1411 		if (get_user(len, p++))
1412 			return -EFAULT;
1413 		ret = import_single_range(WRITE, p, len, &iov, &iter);
1414 		if (unlikely(ret))
1415 			return ret;
1416 		ret = write_pool_user(&iter);
1417 		if (unlikely(ret < 0))
1418 			return ret;
1419 		/* Since we're crediting, enforce that it was all written into the pool. */
1420 		if (unlikely(ret != len))
1421 			return -EFAULT;
1422 		credit_init_bits(ent_count);
1423 		return 0;
1424 	}
1425 	case RNDZAPENTCNT:
1426 	case RNDCLEARPOOL:
1427 		/* No longer has any effect. */
1428 		if (!capable(CAP_SYS_ADMIN))
1429 			return -EPERM;
1430 		return 0;
1431 	case RNDRESEEDCRNG:
1432 		if (!capable(CAP_SYS_ADMIN))
1433 			return -EPERM;
1434 		if (!crng_ready())
1435 			return -ENODATA;
1436 		crng_reseed();
1437 		return 0;
1438 	default:
1439 		return -EINVAL;
1440 	}
1441 }
1442 
1443 static int random_fasync(int fd, struct file *filp, int on)
1444 {
1445 	return fasync_helper(fd, filp, on, &fasync);
1446 }
1447 
1448 const struct file_operations random_fops = {
1449 	.read_iter = random_read_iter,
1450 	.write_iter = random_write_iter,
1451 	.poll = random_poll,
1452 	.unlocked_ioctl = random_ioctl,
1453 	.compat_ioctl = compat_ptr_ioctl,
1454 	.fasync = random_fasync,
1455 	.llseek = noop_llseek,
1456 	.splice_read = generic_file_splice_read,
1457 	.splice_write = iter_file_splice_write,
1458 };
1459 
1460 const struct file_operations urandom_fops = {
1461 	.read_iter = urandom_read_iter,
1462 	.write_iter = random_write_iter,
1463 	.unlocked_ioctl = random_ioctl,
1464 	.compat_ioctl = compat_ptr_ioctl,
1465 	.fasync = random_fasync,
1466 	.llseek = noop_llseek,
1467 	.splice_read = generic_file_splice_read,
1468 	.splice_write = iter_file_splice_write,
1469 };
1470 
1471 
1472 /********************************************************************
1473  *
1474  * Sysctl interface.
1475  *
1476  * These are partly unused legacy knobs with dummy values to not break
1477  * userspace and partly still useful things. They are usually accessible
1478  * in /proc/sys/kernel/random/ and are as follows:
1479  *
1480  * - boot_id - a UUID representing the current boot.
1481  *
1482  * - uuid - a random UUID, different each time the file is read.
1483  *
1484  * - poolsize - the number of bits of entropy that the input pool can
1485  *   hold, tied to the POOL_BITS constant.
1486  *
1487  * - entropy_avail - the number of bits of entropy currently in the
1488  *   input pool. Always <= poolsize.
1489  *
1490  * - write_wakeup_threshold - the amount of entropy in the input pool
1491  *   below which write polls to /dev/random will unblock, requesting
1492  *   more entropy, tied to the POOL_READY_BITS constant. It is writable
1493  *   to avoid breaking old userspaces, but writing to it does not
1494  *   change any behavior of the RNG.
1495  *
1496  * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1497  *   It is writable to avoid breaking old userspaces, but writing
1498  *   to it does not change any behavior of the RNG.
1499  *
1500  ********************************************************************/
1501 
1502 #ifdef CONFIG_SYSCTL
1503 
1504 #include <linux/sysctl.h>
1505 
1506 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1507 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1508 static int sysctl_poolsize = POOL_BITS;
1509 static u8 sysctl_bootid[UUID_SIZE];
1510 
1511 /*
1512  * This function is used to return both the bootid UUID, and random
1513  * UUID. The difference is in whether table->data is NULL; if it is,
1514  * then a new UUID is generated and returned to the user.
1515  */
1516 static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1517 			size_t *lenp, loff_t *ppos)
1518 {
1519 	u8 tmp_uuid[UUID_SIZE], *uuid;
1520 	char uuid_string[UUID_STRING_LEN + 1];
1521 	struct ctl_table fake_table = {
1522 		.data = uuid_string,
1523 		.maxlen = UUID_STRING_LEN
1524 	};
1525 
1526 	if (write)
1527 		return -EPERM;
1528 
1529 	uuid = table->data;
1530 	if (!uuid) {
1531 		uuid = tmp_uuid;
1532 		generate_random_uuid(uuid);
1533 	} else {
1534 		static DEFINE_SPINLOCK(bootid_spinlock);
1535 
1536 		spin_lock(&bootid_spinlock);
1537 		if (!uuid[8])
1538 			generate_random_uuid(uuid);
1539 		spin_unlock(&bootid_spinlock);
1540 	}
1541 
1542 	snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1543 	return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1544 }
1545 
1546 /* The same as proc_dointvec, but writes don't change anything. */
1547 static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1548 			    size_t *lenp, loff_t *ppos)
1549 {
1550 	return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1551 }
1552 
1553 static struct ctl_table random_table[] = {
1554 	{
1555 		.procname	= "poolsize",
1556 		.data		= &sysctl_poolsize,
1557 		.maxlen		= sizeof(int),
1558 		.mode		= 0444,
1559 		.proc_handler	= proc_dointvec,
1560 	},
1561 	{
1562 		.procname	= "entropy_avail",
1563 		.data		= &input_pool.init_bits,
1564 		.maxlen		= sizeof(int),
1565 		.mode		= 0444,
1566 		.proc_handler	= proc_dointvec,
1567 	},
1568 	{
1569 		.procname	= "write_wakeup_threshold",
1570 		.data		= &sysctl_random_write_wakeup_bits,
1571 		.maxlen		= sizeof(int),
1572 		.mode		= 0644,
1573 		.proc_handler	= proc_do_rointvec,
1574 	},
1575 	{
1576 		.procname	= "urandom_min_reseed_secs",
1577 		.data		= &sysctl_random_min_urandom_seed,
1578 		.maxlen		= sizeof(int),
1579 		.mode		= 0644,
1580 		.proc_handler	= proc_do_rointvec,
1581 	},
1582 	{
1583 		.procname	= "boot_id",
1584 		.data		= &sysctl_bootid,
1585 		.mode		= 0444,
1586 		.proc_handler	= proc_do_uuid,
1587 	},
1588 	{
1589 		.procname	= "uuid",
1590 		.mode		= 0444,
1591 		.proc_handler	= proc_do_uuid,
1592 	},
1593 	{ }
1594 };
1595 
1596 /*
1597  * random_init() is called before sysctl_init(),
1598  * so we cannot call register_sysctl_init() in random_init()
1599  */
1600 static int __init random_sysctls_init(void)
1601 {
1602 	register_sysctl_init("kernel/random", random_table);
1603 	return 0;
1604 }
1605 device_initcall(random_sysctls_init);
1606 #endif
1607