1 /*- 2 * Copyright (c) 2017 Oliver Pinter 3 * Copyright (c) 2000-2015 Mark R V Murray 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/fcntl.h> 37 #include <sys/filio.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/lock.h> 41 #include <sys/module.h> 42 #include <sys/malloc.h> 43 #include <sys/poll.h> 44 #include <sys/proc.h> 45 #include <sys/random.h> 46 #include <sys/sbuf.h> 47 #include <sys/selinfo.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 #include <sys/uio.h> 51 #include <sys/unistd.h> 52 53 #include <crypto/rijndael/rijndael-api-fst.h> 54 #include <crypto/sha2/sha256.h> 55 56 #include <dev/random/hash.h> 57 #include <dev/random/randomdev.h> 58 #include <dev/random/random_harvestq.h> 59 60 #define RANDOM_UNIT 0 61 62 #if defined(RANDOM_LOADABLE) 63 #define READ_RANDOM_UIO _read_random_uio 64 #define READ_RANDOM _read_random 65 #define IS_RANDOM_SEEDED _is_random_seeded 66 static int READ_RANDOM_UIO(struct uio *, bool); 67 static void READ_RANDOM(void *, u_int); 68 static bool IS_RANDOM_SEEDED(void); 69 #else 70 #define READ_RANDOM_UIO read_random_uio 71 #define READ_RANDOM read_random 72 #define IS_RANDOM_SEEDED is_random_seeded 73 #endif 74 75 static d_read_t randomdev_read; 76 static d_write_t randomdev_write; 77 static d_poll_t randomdev_poll; 78 static d_ioctl_t randomdev_ioctl; 79 80 static struct cdevsw random_cdevsw = { 81 .d_name = "random", 82 .d_version = D_VERSION, 83 .d_read = randomdev_read, 84 .d_write = randomdev_write, 85 .d_poll = randomdev_poll, 86 .d_ioctl = randomdev_ioctl, 87 }; 88 89 /* For use with make_dev(9)/destroy_dev(9). */ 90 static struct cdev *random_dev; 91 92 static void 93 random_alg_context_ra_init_alg(void *data) 94 { 95 96 p_random_alg_context = &random_alg_context; 97 p_random_alg_context->ra_init_alg(data); 98 #if defined(RANDOM_LOADABLE) 99 random_infra_init(READ_RANDOM_UIO, READ_RANDOM, IS_RANDOM_SEEDED); 100 #endif 101 } 102 103 static void 104 random_alg_context_ra_deinit_alg(void *data) 105 { 106 107 #if defined(RANDOM_LOADABLE) 108 random_infra_uninit(); 109 #endif 110 p_random_alg_context->ra_deinit_alg(data); 111 p_random_alg_context = NULL; 112 } 113 114 SYSINIT(random_device, SI_SUB_RANDOM, SI_ORDER_THIRD, random_alg_context_ra_init_alg, NULL); 115 SYSUNINIT(random_device, SI_SUB_RANDOM, SI_ORDER_THIRD, random_alg_context_ra_deinit_alg, NULL); 116 117 static struct selinfo rsel; 118 119 /* 120 * This is the read uio(9) interface for random(4). 121 */ 122 /* ARGSUSED */ 123 static int 124 randomdev_read(struct cdev *dev __unused, struct uio *uio, int flags) 125 { 126 127 return (READ_RANDOM_UIO(uio, (flags & O_NONBLOCK) != 0)); 128 } 129 130 /* 131 * If the random device is not seeded, blocks until it is seeded. 132 * 133 * Returns zero when the random device is seeded. 134 * 135 * If the 'interruptible' parameter is true, and the device is unseeded, this 136 * routine may be interrupted. If interrupted, it will return either ERESTART 137 * or EINTR. 138 */ 139 #define SEEDWAIT_INTERRUPTIBLE true 140 #define SEEDWAIT_UNINTERRUPTIBLE false 141 static int 142 randomdev_wait_until_seeded(bool interruptible) 143 { 144 int error, spamcount, slpflags; 145 146 slpflags = interruptible ? PCATCH : 0; 147 148 error = 0; 149 spamcount = 0; 150 while (!p_random_alg_context->ra_seeded()) { 151 /* keep tapping away at the pre-read until we seed/unblock. */ 152 p_random_alg_context->ra_pre_read(); 153 /* Only bother the console every 10 seconds or so */ 154 if (spamcount == 0) 155 printf("random: %s unblock wait\n", __func__); 156 spamcount = (spamcount + 1) % 100; 157 error = tsleep(&random_alg_context, slpflags, "randseed", 158 hz / 10); 159 if (error == ERESTART || error == EINTR) { 160 KASSERT(interruptible, 161 ("unexpected wake of non-interruptible sleep")); 162 break; 163 } 164 /* Squash tsleep timeout condition */ 165 if (error == EWOULDBLOCK) 166 error = 0; 167 KASSERT(error == 0, ("unexpected tsleep error %d", error)); 168 } 169 return (error); 170 } 171 172 int 173 READ_RANDOM_UIO(struct uio *uio, bool nonblock) 174 { 175 /* 16 MiB takes about 0.08 s CPU time on my 2017 AMD Zen CPU */ 176 #define SIGCHK_PERIOD (16 * 1024 * 1024) 177 const size_t sigchk_period = SIGCHK_PERIOD; 178 CTASSERT(SIGCHK_PERIOD % PAGE_SIZE == 0); 179 #undef SIGCHK_PERIOD 180 181 uint8_t *random_buf; 182 size_t total_read, read_len; 183 ssize_t bufsize; 184 int error; 185 186 187 KASSERT(uio->uio_rw == UIO_READ, ("%s: bogus write", __func__)); 188 KASSERT(uio->uio_resid >= 0, ("%s: bogus negative resid", __func__)); 189 190 p_random_alg_context->ra_pre_read(); 191 error = 0; 192 /* (Un)Blocking logic */ 193 if (!p_random_alg_context->ra_seeded()) { 194 if (nonblock) 195 error = EWOULDBLOCK; 196 else 197 error = randomdev_wait_until_seeded( 198 SEEDWAIT_INTERRUPTIBLE); 199 } 200 if (error != 0) 201 return (error); 202 203 read_rate_increment(howmany(uio->uio_resid + 1, sizeof(uint32_t))); 204 total_read = 0; 205 206 /* Easy to deal with the trivial 0 byte case. */ 207 if (__predict_false(uio->uio_resid == 0)) 208 return (0); 209 210 /* 211 * If memory is plentiful, use maximally sized requests to avoid 212 * per-call algorithm overhead. But fall back to a single page 213 * allocation if the full request isn't immediately available. 214 */ 215 bufsize = MIN(sigchk_period, (size_t)uio->uio_resid); 216 random_buf = malloc(bufsize, M_ENTROPY, M_NOWAIT); 217 if (random_buf == NULL) { 218 bufsize = PAGE_SIZE; 219 random_buf = malloc(bufsize, M_ENTROPY, M_WAITOK); 220 } 221 222 error = 0; 223 while (uio->uio_resid > 0 && error == 0) { 224 read_len = MIN((size_t)uio->uio_resid, bufsize); 225 226 p_random_alg_context->ra_read(random_buf, read_len); 227 228 /* 229 * uiomove() may yield the CPU before each 'read_len' bytes (up 230 * to bufsize) are copied out. 231 */ 232 error = uiomove(random_buf, read_len, uio); 233 total_read += read_len; 234 235 /* 236 * Poll for signals every few MBs to avoid very long 237 * uninterruptible syscalls. 238 */ 239 if (error == 0 && uio->uio_resid != 0 && 240 total_read % sigchk_period == 0) { 241 error = tsleep_sbt(&random_alg_context, PCATCH, 242 "randrd", SBT_1NS, 0, C_HARDCLOCK); 243 /* Squash tsleep timeout condition */ 244 if (error == EWOULDBLOCK) 245 error = 0; 246 } 247 } 248 249 /* 250 * Short reads due to signal interrupt should not indicate error. 251 * Instead, the uio will reflect that the read was shorter than 252 * requested. 253 */ 254 if (error == ERESTART || error == EINTR) 255 error = 0; 256 257 explicit_bzero(random_buf, bufsize); 258 free(random_buf, M_ENTROPY); 259 return (error); 260 } 261 262 /*- 263 * Kernel API version of read_random(). This is similar to read_random_uio(), 264 * except it doesn't interface with uio(9). It cannot assumed that random_buf 265 * is a multiple of RANDOM_BLOCKSIZE bytes. 266 * 267 * If the tunable 'kern.random.initial_seeding.bypass_before_seeding' is set 268 * non-zero, silently fail to emit random data (matching the pre-r346250 269 * behavior). If read_random is called prior to seeding and bypassed because 270 * of this tunable, the condition is reported in the read-only sysctl 271 * 'kern.random.initial_seeding.read_random_bypassed_before_seeding'. 272 */ 273 void 274 READ_RANDOM(void *random_buf, u_int len) 275 { 276 277 KASSERT(random_buf != NULL, ("No suitable random buffer in %s", __func__)); 278 p_random_alg_context->ra_pre_read(); 279 280 if (len == 0) 281 return; 282 283 /* (Un)Blocking logic */ 284 if (__predict_false(!p_random_alg_context->ra_seeded())) { 285 if (random_bypass_before_seeding) { 286 if (!read_random_bypassed_before_seeding) { 287 if (!random_bypass_disable_warnings) 288 printf("read_random: WARNING: bypassing" 289 " request for random data because " 290 "the random device is not yet " 291 "seeded and the knob " 292 "'bypass_before_seeding' was " 293 "enabled.\n"); 294 read_random_bypassed_before_seeding = true; 295 } 296 /* Avoid potentially leaking stack garbage */ 297 memset(random_buf, 0, len); 298 return; 299 } 300 301 (void)randomdev_wait_until_seeded(SEEDWAIT_UNINTERRUPTIBLE); 302 } 303 read_rate_increment(roundup2(len, sizeof(uint32_t))); 304 p_random_alg_context->ra_read(random_buf, len); 305 } 306 307 bool 308 IS_RANDOM_SEEDED(void) 309 { 310 return (p_random_alg_context->ra_seeded()); 311 } 312 313 static __inline void 314 randomdev_accumulate(uint8_t *buf, u_int count) 315 { 316 static u_int destination = 0; 317 static struct harvest_event event; 318 static struct randomdev_hash hash; 319 static uint32_t entropy_data[RANDOM_KEYSIZE_WORDS]; 320 uint32_t timestamp; 321 int i; 322 323 /* Extra timing here is helpful to scrape scheduler jitter entropy */ 324 randomdev_hash_init(&hash); 325 timestamp = (uint32_t)get_cyclecount(); 326 randomdev_hash_iterate(&hash, ×tamp, sizeof(timestamp)); 327 randomdev_hash_iterate(&hash, buf, count); 328 timestamp = (uint32_t)get_cyclecount(); 329 randomdev_hash_iterate(&hash, ×tamp, sizeof(timestamp)); 330 randomdev_hash_finish(&hash, entropy_data); 331 for (i = 0; i < RANDOM_KEYSIZE_WORDS; i += sizeof(event.he_entropy)/sizeof(event.he_entropy[0])) { 332 event.he_somecounter = (uint32_t)get_cyclecount(); 333 event.he_size = sizeof(event.he_entropy); 334 event.he_source = RANDOM_CACHED; 335 event.he_destination = destination++; /* Harmless cheating */ 336 memcpy(event.he_entropy, entropy_data + i, sizeof(event.he_entropy)); 337 p_random_alg_context->ra_event_processor(&event); 338 } 339 explicit_bzero(&event, sizeof(event)); 340 explicit_bzero(entropy_data, sizeof(entropy_data)); 341 } 342 343 /* ARGSUSED */ 344 static int 345 randomdev_write(struct cdev *dev __unused, struct uio *uio, int flags __unused) 346 { 347 uint8_t *random_buf; 348 int c, error = 0; 349 ssize_t nbytes; 350 351 random_buf = malloc(PAGE_SIZE, M_ENTROPY, M_WAITOK); 352 nbytes = uio->uio_resid; 353 while (uio->uio_resid > 0 && error == 0) { 354 c = MIN(uio->uio_resid, PAGE_SIZE); 355 error = uiomove(random_buf, c, uio); 356 if (error) 357 break; 358 randomdev_accumulate(random_buf, c); 359 tsleep(&random_alg_context, 0, "randwr", hz/10); 360 } 361 if (nbytes != uio->uio_resid && (error == ERESTART || error == EINTR)) 362 /* Partial write, not error. */ 363 error = 0; 364 free(random_buf, M_ENTROPY); 365 return (error); 366 } 367 368 /* ARGSUSED */ 369 static int 370 randomdev_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 371 { 372 373 if (events & (POLLIN | POLLRDNORM)) { 374 if (p_random_alg_context->ra_seeded()) 375 events &= (POLLIN | POLLRDNORM); 376 else 377 selrecord(td, &rsel); 378 } 379 return (events); 380 } 381 382 /* This will be called by the entropy processor when it seeds itself and becomes secure */ 383 void 384 randomdev_unblock(void) 385 { 386 387 selwakeuppri(&rsel, PUSER); 388 wakeup(&random_alg_context); 389 printf("random: unblocking device.\n"); 390 /* Do random(9) a favour while we are about it. */ 391 (void)atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_NONE, ARC4_ENTR_HAVE); 392 } 393 394 /* ARGSUSED */ 395 static int 396 randomdev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr __unused, 397 int flags __unused, struct thread *td __unused) 398 { 399 int error = 0; 400 401 switch (cmd) { 402 /* Really handled in upper layer */ 403 case FIOASYNC: 404 case FIONBIO: 405 break; 406 default: 407 error = ENOTTY; 408 } 409 410 return (error); 411 } 412 413 void 414 random_source_register(struct random_source *rsource) 415 { 416 struct random_sources *rrs; 417 418 KASSERT(rsource != NULL, ("invalid input to %s", __func__)); 419 420 rrs = malloc(sizeof(*rrs), M_ENTROPY, M_WAITOK); 421 rrs->rrs_source = rsource; 422 423 random_harvest_register_source(rsource->rs_source); 424 425 printf("random: registering fast source %s\n", rsource->rs_ident); 426 LIST_INSERT_HEAD(&source_list, rrs, rrs_entries); 427 } 428 429 void 430 random_source_deregister(struct random_source *rsource) 431 { 432 struct random_sources *rrs = NULL; 433 434 KASSERT(rsource != NULL, ("invalid input to %s", __func__)); 435 436 random_harvest_deregister_source(rsource->rs_source); 437 438 LIST_FOREACH(rrs, &source_list, rrs_entries) 439 if (rrs->rrs_source == rsource) { 440 LIST_REMOVE(rrs, rrs_entries); 441 break; 442 } 443 if (rrs != NULL) 444 free(rrs, M_ENTROPY); 445 } 446 447 static int 448 random_source_handler(SYSCTL_HANDLER_ARGS) 449 { 450 struct random_sources *rrs; 451 struct sbuf sbuf; 452 int error, count; 453 454 sbuf_new_for_sysctl(&sbuf, NULL, 64, req); 455 count = 0; 456 LIST_FOREACH(rrs, &source_list, rrs_entries) { 457 sbuf_cat(&sbuf, (count++ ? ",'" : "'")); 458 sbuf_cat(&sbuf, rrs->rrs_source->rs_ident); 459 sbuf_cat(&sbuf, "'"); 460 } 461 error = sbuf_finish(&sbuf); 462 sbuf_delete(&sbuf); 463 return (error); 464 } 465 SYSCTL_PROC(_kern_random, OID_AUTO, random_sources, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 466 NULL, 0, random_source_handler, "A", 467 "List of active fast entropy sources."); 468 469 /* ARGSUSED */ 470 static int 471 randomdev_modevent(module_t mod __unused, int type, void *data __unused) 472 { 473 int error = 0; 474 475 switch (type) { 476 case MOD_LOAD: 477 printf("random: entropy device external interface\n"); 478 random_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &random_cdevsw, 479 RANDOM_UNIT, NULL, UID_ROOT, GID_WHEEL, 0644, "random"); 480 make_dev_alias(random_dev, "urandom"); /* compatibility */ 481 break; 482 case MOD_UNLOAD: 483 destroy_dev(random_dev); 484 break; 485 case MOD_SHUTDOWN: 486 break; 487 default: 488 error = EOPNOTSUPP; 489 break; 490 } 491 return (error); 492 } 493 494 static moduledata_t randomdev_mod = { 495 "random_device", 496 randomdev_modevent, 497 0 498 }; 499 500 DECLARE_MODULE(random_device, randomdev_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 501 MODULE_VERSION(random_device, 1); 502 MODULE_DEPEND(random_device, crypto, 1, 1, 1); 503 MODULE_DEPEND(random_device, random_harvestq, 1, 1, 1); 504