xref: /freebsd/sys/dev/random/randomdev.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1  /*-
2   * Copyright (c) 2017 Oliver Pinter
3   * Copyright (c) 2000-2015 Mark R V Murray
4   * All rights reserved.
5   *
6   * Redistribution and use in source and binary forms, with or without
7   * modification, are permitted provided that the following conditions
8   * are met:
9   * 1. Redistributions of source code must retain the above copyright
10   *    notice, this list of conditions and the following disclaimer
11   *    in this position and unchanged.
12   * 2. Redistributions in binary form must reproduce the above copyright
13   *    notice, this list of conditions and the following disclaimer in the
14   *    documentation and/or other materials provided with the distribution.
15   *
16   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26   *
27   */
28  
29  #include <sys/param.h>
30  #include <sys/systm.h>
31  #include <sys/bus.h>
32  #include <sys/conf.h>
33  #include <sys/fcntl.h>
34  #include <sys/filio.h>
35  #include <sys/kernel.h>
36  #include <sys/kthread.h>
37  #include <sys/lock.h>
38  #include <sys/module.h>
39  #include <sys/malloc.h>
40  #include <sys/poll.h>
41  #include <sys/proc.h>
42  #include <sys/random.h>
43  #include <sys/sbuf.h>
44  #include <sys/selinfo.h>
45  #include <sys/sysctl.h>
46  #include <sys/systm.h>
47  #include <sys/uio.h>
48  #include <sys/unistd.h>
49  
50  #include <crypto/rijndael/rijndael-api-fst.h>
51  #include <crypto/sha2/sha256.h>
52  
53  #include <dev/random/hash.h>
54  #include <dev/random/randomdev.h>
55  #include <dev/random/random_harvestq.h>
56  
57  #define	RANDOM_UNIT	0
58  
59  /*
60   * In loadable random, the core randomdev.c / random(9) routines have static
61   * visibility and an alternative name to avoid conflicting with the function
62   * pointers of the real names in the core kernel.  random_alg_context_init
63   * installs pointers to the loadable static names into the core kernel's
64   * function pointers at SI_SUB_RANDOM:SI_ORDER_SECOND.
65   */
66  #if defined(RANDOM_LOADABLE)
67  static int (read_random_uio)(struct uio *, bool);
68  static void (read_random)(void *, u_int);
69  static bool (is_random_seeded)(void);
70  #endif
71  
72  static d_read_t randomdev_read;
73  static d_write_t randomdev_write;
74  static d_poll_t randomdev_poll;
75  static d_ioctl_t randomdev_ioctl;
76  
77  static struct cdevsw random_cdevsw = {
78  	.d_name = "random",
79  	.d_version = D_VERSION,
80  	.d_read = randomdev_read,
81  	.d_write = randomdev_write,
82  	.d_poll = randomdev_poll,
83  	.d_ioctl = randomdev_ioctl,
84  };
85  
86  /* For use with make_dev(9)/destroy_dev(9). */
87  static struct cdev *random_dev;
88  
89  #if defined(RANDOM_LOADABLE)
90  static void
random_alg_context_init(void * dummy __unused)91  random_alg_context_init(void *dummy __unused)
92  {
93  	_read_random_uio = (read_random_uio);
94  	_read_random = (read_random);
95  	_is_random_seeded = (is_random_seeded);
96  }
97  SYSINIT(random_device, SI_SUB_RANDOM, SI_ORDER_SECOND, random_alg_context_init,
98      NULL);
99  #endif
100  
101  static struct selinfo rsel;
102  
103  /*
104   * This is the read uio(9) interface for random(4).
105   */
106  /* ARGSUSED */
107  static int
randomdev_read(struct cdev * dev __unused,struct uio * uio,int flags)108  randomdev_read(struct cdev *dev __unused, struct uio *uio, int flags)
109  {
110  
111  	return ((read_random_uio)(uio, (flags & O_NONBLOCK) != 0));
112  }
113  
114  /*
115   * If the random device is not seeded, blocks until it is seeded.
116   *
117   * Returns zero when the random device is seeded.
118   *
119   * If the 'interruptible' parameter is true, and the device is unseeded, this
120   * routine may be interrupted.  If interrupted, it will return either ERESTART
121   * or EINTR.
122   */
123  #define SEEDWAIT_INTERRUPTIBLE		true
124  #define SEEDWAIT_UNINTERRUPTIBLE	false
125  static int
randomdev_wait_until_seeded(bool interruptible)126  randomdev_wait_until_seeded(bool interruptible)
127  {
128  	int error, spamcount, slpflags;
129  
130  	slpflags = interruptible ? PCATCH : 0;
131  
132  	error = 0;
133  	spamcount = 0;
134  	while (!p_random_alg_context->ra_seeded()) {
135  		/* keep tapping away at the pre-read until we seed/unblock. */
136  		p_random_alg_context->ra_pre_read();
137  		/* Only bother the console every 10 seconds or so */
138  		if (spamcount == 0)
139  			printf("random: %s unblock wait\n", __func__);
140  		spamcount = (spamcount + 1) % 100;
141  		error = tsleep(p_random_alg_context, slpflags, "randseed",
142  		    hz / 10);
143  		if (error == ERESTART || error == EINTR) {
144  			KASSERT(interruptible,
145  			    ("unexpected wake of non-interruptible sleep"));
146  			break;
147  		}
148  		/* Squash tsleep timeout condition */
149  		if (error == EWOULDBLOCK)
150  			error = 0;
151  		KASSERT(error == 0, ("unexpected tsleep error %d", error));
152  	}
153  	return (error);
154  }
155  
156  int
157  (read_random_uio)(struct uio *uio, bool nonblock)
158  {
159  	/* 16 MiB takes about 0.08 s CPU time on my 2017 AMD Zen CPU */
160  #define SIGCHK_PERIOD (16 * 1024 * 1024)
161  	const size_t sigchk_period = SIGCHK_PERIOD;
162  	CTASSERT(SIGCHK_PERIOD % PAGE_SIZE == 0);
163  #undef SIGCHK_PERIOD
164  
165  	uint8_t *random_buf;
166  	size_t total_read, read_len;
167  	ssize_t bufsize;
168  	int error;
169  
170  
171  	KASSERT(uio->uio_rw == UIO_READ, ("%s: bogus write", __func__));
172  	KASSERT(uio->uio_resid >= 0, ("%s: bogus negative resid", __func__));
173  
174  	p_random_alg_context->ra_pre_read();
175  	error = 0;
176  	/* (Un)Blocking logic */
177  	if (!p_random_alg_context->ra_seeded()) {
178  		if (nonblock)
179  			error = EWOULDBLOCK;
180  		else
181  			error = randomdev_wait_until_seeded(
182  			    SEEDWAIT_INTERRUPTIBLE);
183  	}
184  	if (error != 0)
185  		return (error);
186  
187  	total_read = 0;
188  
189  	/* Easy to deal with the trivial 0 byte case. */
190  	if (__predict_false(uio->uio_resid == 0))
191  		return (0);
192  
193  	/*
194  	 * If memory is plentiful, use maximally sized requests to avoid
195  	 * per-call algorithm overhead.  But fall back to a single page
196  	 * allocation if the full request isn't immediately available.
197  	 */
198  	bufsize = MIN(sigchk_period, (size_t)uio->uio_resid);
199  	random_buf = malloc(bufsize, M_ENTROPY, M_NOWAIT);
200  	if (random_buf == NULL) {
201  		bufsize = PAGE_SIZE;
202  		random_buf = malloc(bufsize, M_ENTROPY, M_WAITOK);
203  	}
204  
205  	error = 0;
206  	while (uio->uio_resid > 0 && error == 0) {
207  		read_len = MIN((size_t)uio->uio_resid, bufsize);
208  
209  		p_random_alg_context->ra_read(random_buf, read_len);
210  
211  		/*
212  		 * uiomove() may yield the CPU before each 'read_len' bytes (up
213  		 * to bufsize) are copied out.
214  		 */
215  		error = uiomove(random_buf, read_len, uio);
216  		total_read += read_len;
217  
218  		/*
219  		 * Poll for signals every few MBs to avoid very long
220  		 * uninterruptible syscalls.
221  		 */
222  		if (error == 0 && uio->uio_resid != 0 &&
223  		    total_read % sigchk_period == 0) {
224  			error = tsleep_sbt(p_random_alg_context, PCATCH,
225  			    "randrd", SBT_1NS, 0, C_HARDCLOCK);
226  			/* Squash tsleep timeout condition */
227  			if (error == EWOULDBLOCK)
228  				error = 0;
229  		}
230  	}
231  
232  	/*
233  	 * Short reads due to signal interrupt should not indicate error.
234  	 * Instead, the uio will reflect that the read was shorter than
235  	 * requested.
236  	 */
237  	if (error == ERESTART || error == EINTR)
238  		error = 0;
239  
240  	zfree(random_buf, M_ENTROPY);
241  	return (error);
242  }
243  
244  /*-
245   * Kernel API version of read_random().  This is similar to read_random_uio(),
246   * except it doesn't interface with uio(9).  It cannot assumed that random_buf
247   * is a multiple of RANDOM_BLOCKSIZE bytes.
248   *
249   * If the tunable 'kern.random.initial_seeding.bypass_before_seeding' is set
250   * non-zero, silently fail to emit random data (matching the pre-r346250
251   * behavior).  If read_random is called prior to seeding and bypassed because
252   * of this tunable, the condition is reported in the read-only sysctl
253   * 'kern.random.initial_seeding.read_random_bypassed_before_seeding'.
254   */
255  void
256  (read_random)(void *random_buf, u_int len)
257  {
258  
259  	KASSERT(random_buf != NULL, ("No suitable random buffer in %s", __func__));
260  	p_random_alg_context->ra_pre_read();
261  
262  	if (len == 0)
263  		return;
264  
265  	/* (Un)Blocking logic */
266  	if (__predict_false(!p_random_alg_context->ra_seeded())) {
267  		if (random_bypass_before_seeding) {
268  			if (!read_random_bypassed_before_seeding) {
269  				if (!random_bypass_disable_warnings)
270  					printf("read_random: WARNING: bypassing"
271  					    " request for random data because "
272  					    "the random device is not yet "
273  					    "seeded and the knob "
274  					    "'bypass_before_seeding' was "
275  					    "enabled.\n");
276  				read_random_bypassed_before_seeding = true;
277  			}
278  			/* Avoid potentially leaking stack garbage */
279  			memset(random_buf, 0, len);
280  			return;
281  		}
282  
283  		(void)randomdev_wait_until_seeded(SEEDWAIT_UNINTERRUPTIBLE);
284  	}
285  	p_random_alg_context->ra_read(random_buf, len);
286  }
287  
288  bool
289  (is_random_seeded)(void)
290  {
291  	return (p_random_alg_context->ra_seeded());
292  }
293  
294  static __inline void
randomdev_accumulate(uint8_t * buf,u_int count)295  randomdev_accumulate(uint8_t *buf, u_int count)
296  {
297  	static u_int destination = 0;
298  	static struct harvest_event event;
299  	static struct randomdev_hash hash;
300  	static uint32_t entropy_data[RANDOM_KEYSIZE_WORDS];
301  	uint32_t timestamp;
302  	int i;
303  
304  	/* Extra timing here is helpful to scrape scheduler jitter entropy */
305  	randomdev_hash_init(&hash);
306  	timestamp = (uint32_t)get_cyclecount();
307  	randomdev_hash_iterate(&hash, &timestamp, sizeof(timestamp));
308  	randomdev_hash_iterate(&hash, buf, count);
309  	timestamp = (uint32_t)get_cyclecount();
310  	randomdev_hash_iterate(&hash, &timestamp, sizeof(timestamp));
311  	randomdev_hash_finish(&hash, entropy_data);
312  	for (i = 0; i < RANDOM_KEYSIZE_WORDS; i += sizeof(event.he_entropy)/sizeof(event.he_entropy[0])) {
313  		event.he_somecounter = (uint32_t)get_cyclecount();
314  		event.he_size = sizeof(event.he_entropy);
315  		event.he_source = RANDOM_CACHED;
316  		event.he_destination = destination++; /* Harmless cheating */
317  		memcpy(event.he_entropy, entropy_data + i, sizeof(event.he_entropy));
318  		p_random_alg_context->ra_event_processor(&event);
319  	}
320  	explicit_bzero(&event, sizeof(event));
321  	explicit_bzero(entropy_data, sizeof(entropy_data));
322  }
323  
324  /* ARGSUSED */
325  static int
randomdev_write(struct cdev * dev __unused,struct uio * uio,int flags __unused)326  randomdev_write(struct cdev *dev __unused, struct uio *uio, int flags __unused)
327  {
328  	uint8_t *random_buf;
329  	int c, error = 0;
330  	ssize_t nbytes;
331  
332  	random_buf = malloc(PAGE_SIZE, M_ENTROPY, M_WAITOK);
333  	nbytes = uio->uio_resid;
334  	while (uio->uio_resid > 0 && error == 0) {
335  		c = MIN(uio->uio_resid, PAGE_SIZE);
336  		error = uiomove(random_buf, c, uio);
337  		if (error)
338  			break;
339  		randomdev_accumulate(random_buf, c);
340  	}
341  	if (nbytes != uio->uio_resid && (error == ERESTART || error == EINTR))
342  		/* Partial write, not error. */
343  		error = 0;
344  	free(random_buf, M_ENTROPY);
345  	return (error);
346  }
347  
348  /* ARGSUSED */
349  static int
randomdev_poll(struct cdev * dev __unused,int events,struct thread * td __unused)350  randomdev_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
351  {
352  
353  	if (events & (POLLIN | POLLRDNORM)) {
354  		if (p_random_alg_context->ra_seeded())
355  			events &= (POLLIN | POLLRDNORM);
356  		else
357  			selrecord(td, &rsel);
358  	}
359  	return (events);
360  }
361  
362  /* This will be called by the entropy processor when it seeds itself and becomes secure */
363  void
randomdev_unblock(void)364  randomdev_unblock(void)
365  {
366  
367  	selwakeuppri(&rsel, PUSER);
368  	wakeup(p_random_alg_context);
369  	printf("random: unblocking device.\n");
370  #ifndef RANDOM_FENESTRASX
371  	/* Do random(9) a favour while we are about it. */
372  	(void)atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_NONE, ARC4_ENTR_HAVE);
373  #endif
374  }
375  
376  /* ARGSUSED */
377  static int
randomdev_ioctl(struct cdev * dev __unused,u_long cmd,caddr_t addr __unused,int flags __unused,struct thread * td __unused)378  randomdev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr __unused,
379      int flags __unused, struct thread *td __unused)
380  {
381  	int error = 0;
382  
383  	switch (cmd) {
384  		/* Really handled in upper layer */
385  	case FIOASYNC:
386  	case FIONBIO:
387  		break;
388  	default:
389  		error = ENOTTY;
390  	}
391  
392  	return (error);
393  }
394  
395  /* ARGSUSED */
396  static int
randomdev_modevent(module_t mod __unused,int type,void * data __unused)397  randomdev_modevent(module_t mod __unused, int type, void *data __unused)
398  {
399  	int error = 0;
400  
401  	switch (type) {
402  	case MOD_LOAD:
403  		printf("random: entropy device external interface\n");
404  		random_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &random_cdevsw,
405  		    RANDOM_UNIT, NULL, UID_ROOT, GID_WHEEL, 0644, "random");
406  		make_dev_alias(random_dev, "urandom"); /* compatibility */
407  		break;
408  	case MOD_UNLOAD:
409  		error = EBUSY;
410  		break;
411  	case MOD_SHUTDOWN:
412  		break;
413  	default:
414  		error = EOPNOTSUPP;
415  		break;
416  	}
417  	return (error);
418  }
419  
420  static moduledata_t randomdev_mod = {
421  	"random_device",
422  	randomdev_modevent,
423  	0
424  };
425  
426  DECLARE_MODULE(random_device, randomdev_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
427  MODULE_VERSION(random_device, 1);
428  MODULE_DEPEND(random_device, crypto, 1, 1, 1);
429  MODULE_DEPEND(random_device, random_harvestq, 1, 1, 1);
430