1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * This file implements the interfaces that the /dev/random 27 * driver uses for read(2), write(2) and poll(2) on /dev/random or 28 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 29 * random_add_pseudo_entropy(), random_get_pseudo_bytes() 30 * and random_get_bytes(). 31 * 32 * We periodically collect random bits from providers which are registered 33 * with the Kernel Cryptographic Framework (kCF) as capable of random 34 * number generation. The random bits are maintained in a cache and 35 * it is used for high quality random numbers (/dev/random) requests. 36 * We pick a provider and call its SPI routine, if the cache does not have 37 * enough bytes to satisfy a request. 38 * 39 * /dev/urandom requests use a software-based generator algorithm that uses the 40 * random bits in the cache as a seed. We create one pseudo-random generator 41 * (for /dev/urandom) per possible CPU on the system, and use it, 42 * kmem-magazine-style, to avoid cache line contention. 43 * 44 * LOCKING HIERARCHY: 45 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 46 * 2) rndpool_lock protects the high-quality randomness pool. 47 * It may be locked while a rmp->rm_mag.rm_lock is held. 48 * 49 * A history note: The kernel API and the software-based algorithms in this 50 * file used to be part of the /dev/random driver. 51 */ 52 53 #include <sys/types.h> 54 #include <sys/conf.h> 55 #include <sys/sunddi.h> 56 #include <sys/disp.h> 57 #include <sys/modctl.h> 58 #include <sys/ddi.h> 59 #include <sys/crypto/common.h> 60 #include <sys/crypto/api.h> 61 #include <sys/crypto/impl.h> 62 #include <sys/crypto/sched_impl.h> 63 #include <sys/crypto/ioctladmin.h> 64 #include <sys/random.h> 65 #include <sys/sha1.h> 66 #include <sys/time.h> 67 #include <sys/sysmacros.h> 68 #include <sys/cpuvar.h> 69 #include <sys/taskq.h> 70 #include <rng/fips_random.h> 71 72 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 73 #define MINEXTRACTBYTES 20 74 #define MAXEXTRACTBYTES 1024 75 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 76 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 77 78 typedef enum extract_type { 79 NONBLOCK_EXTRACT, 80 BLOCKING_EXTRACT, 81 ALWAYS_EXTRACT 82 } extract_type_t; 83 84 /* 85 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 86 * routines directly instead of using k-API because we can't return any 87 * error code in /dev/urandom case and we can get an error using k-API 88 * if a mechanism is disabled. 89 */ 90 #define HASHSIZE 20 91 #define HASH_CTX SHA1_CTX 92 #define HashInit(ctx) SHA1Init((ctx)) 93 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 94 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 95 96 /* HMAC-SHA1 */ 97 #define HMAC_KEYSIZE 20 98 99 /* 100 * Cache of random bytes implemented as a circular buffer. findex and rindex 101 * track the front and back of the circular buffer. 102 */ 103 uint8_t rndpool[RNDPOOLSIZE]; 104 static int findex, rindex; 105 static int rnbyte_cnt; /* Number of bytes in the cache */ 106 107 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 108 /* and the global variables */ 109 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 110 static int num_waiters; /* #threads waiting to read from /dev/random */ 111 112 static struct pollhead rnd_pollhead; 113 /* LINTED E_STATIC_UNUSED */ 114 static timeout_id_t kcf_rndtimeout_id; 115 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 116 rnd_stats_t rnd_stats; 117 static boolean_t rng_prov_found = B_TRUE; 118 static boolean_t rng_ok_to_log = B_TRUE; 119 static boolean_t rngprov_task_idle = B_TRUE; 120 121 static void rndc_addbytes(uint8_t *, size_t); 122 static void rndc_getbytes(uint8_t *ptr, size_t len); 123 static void rnd_handler(void *); 124 static void rnd_alloc_magazines(); 125 126 void 127 kcf_rnd_init() 128 { 129 hrtime_t ts; 130 time_t now; 131 132 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 133 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 134 135 /* 136 * Add bytes to the cache using 137 * . 2 unpredictable times: high resolution time since the boot-time, 138 * and the current time-of-the day. 139 * This is used only to make the timeout value in the timer 140 * unpredictable. 141 */ 142 ts = gethrtime(); 143 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 144 145 (void) drv_getparm(TIME, &now); 146 rndc_addbytes((uint8_t *)&now, sizeof (now)); 147 148 rnbyte_cnt = 0; 149 findex = rindex = 0; 150 num_waiters = 0; 151 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 152 153 rnd_alloc_magazines(); 154 } 155 156 /* 157 * Return TRUE if at least one provider exists that can 158 * supply random numbers. 159 */ 160 boolean_t 161 kcf_rngprov_check(void) 162 { 163 int rv; 164 kcf_provider_desc_t *pd; 165 166 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 167 NULL, CRYPTO_FG_RANDOM, 0)) != NULL) { 168 KCF_PROV_REFRELE(pd); 169 /* 170 * We logged a warning once about no provider being available 171 * and now a provider became available. So, set the flag so 172 * that we can log again if the problem recurs. 173 */ 174 rng_ok_to_log = B_TRUE; 175 rng_prov_found = B_TRUE; 176 return (B_TRUE); 177 } else { 178 rng_prov_found = B_FALSE; 179 return (B_FALSE); 180 } 181 } 182 183 /* 184 * Pick a software-based provider and submit a request to seed 185 * its random number generator. 186 */ 187 static void 188 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 189 { 190 kcf_provider_desc_t *pd = NULL; 191 192 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 193 CRYPTO_SUCCESS) { 194 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 195 entropy_est, flags, NULL); 196 KCF_PROV_REFRELE(pd); 197 } 198 } 199 200 /* 201 * This routine is called for blocking reads. 202 * 203 * The argument is_taskq_thr indicates whether the caller is 204 * the taskq thread dispatched by the timeout handler routine. 205 * In this case, we cycle through all the providers 206 * submitting a request to each provider to generate random numbers. 207 * 208 * For other cases, we pick a provider and submit a request to generate 209 * random numbers. We retry using another provider if we get an error. 210 * 211 * Returns the number of bytes that are written to 'ptr'. Returns -1 212 * if no provider is found. ptr and need are unchanged. 213 */ 214 static int 215 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr) 216 { 217 int rv; 218 int prov_cnt = 0; 219 int total_bytes = 0; 220 kcf_provider_desc_t *pd; 221 kcf_req_params_t params; 222 kcf_prov_tried_t *list = NULL; 223 224 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 225 list, CRYPTO_FG_RANDOM, 0)) != NULL) { 226 227 prov_cnt++; 228 229 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 230 pd->pd_sid, ptr, need, 0, 0); 231 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 232 ASSERT(rv != CRYPTO_QUEUED); 233 234 if (rv == CRYPTO_SUCCESS) { 235 total_bytes += need; 236 if (is_taskq_thr) 237 rndc_addbytes(ptr, need); 238 else { 239 KCF_PROV_REFRELE(pd); 240 break; 241 } 242 } 243 244 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 245 /* Add pd to the linked list of providers tried. */ 246 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 247 KCF_PROV_REFRELE(pd); 248 break; 249 } 250 } 251 252 } 253 254 if (list != NULL) 255 kcf_free_triedlist(list); 256 257 if (prov_cnt == 0) { /* no provider could be found. */ 258 rng_prov_found = B_FALSE; 259 return (-1); 260 } else { 261 rng_prov_found = B_TRUE; 262 /* See comments in kcf_rngprov_check() */ 263 rng_ok_to_log = B_TRUE; 264 } 265 266 return (total_bytes); 267 } 268 269 static void 270 notify_done(void *arg, int rv) 271 { 272 uchar_t *rndbuf = arg; 273 274 if (rv == CRYPTO_SUCCESS) 275 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 276 277 bzero(rndbuf, MINEXTRACTBYTES); 278 kmem_free(rndbuf, MINEXTRACTBYTES); 279 } 280 281 /* 282 * Cycle through all the providers submitting a request to each provider 283 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 284 * and ALWAYS_EXTRACT. 285 * 286 * Returns the number of bytes that are written to 'ptr'. Returns -1 287 * if no provider is found. ptr and len are unchanged. 288 */ 289 static int 290 rngprov_getbytes_nblk(uint8_t *ptr, size_t len) 291 { 292 int rv, total_bytes; 293 size_t blen; 294 uchar_t *rndbuf; 295 kcf_provider_desc_t *pd; 296 kcf_req_params_t params; 297 crypto_call_req_t req; 298 kcf_prov_tried_t *list = NULL; 299 int prov_cnt = 0; 300 301 blen = 0; 302 total_bytes = 0; 303 req.cr_flag = CRYPTO_SKIP_REQID; 304 req.cr_callback_func = notify_done; 305 306 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 307 list, CRYPTO_FG_RANDOM, 0)) != NULL) { 308 309 prov_cnt ++; 310 switch (pd->pd_prov_type) { 311 case CRYPTO_HW_PROVIDER: 312 /* 313 * We have to allocate a buffer here as we can not 314 * assume that the input buffer will remain valid 315 * when the callback comes. We use a fixed size buffer 316 * to simplify the book keeping. 317 */ 318 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 319 if (rndbuf == NULL) { 320 KCF_PROV_REFRELE(pd); 321 if (list != NULL) 322 kcf_free_triedlist(list); 323 return (total_bytes); 324 } 325 req.cr_callback_arg = rndbuf; 326 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 327 KCF_OP_RANDOM_GENERATE, 328 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 329 break; 330 331 case CRYPTO_SW_PROVIDER: 332 /* 333 * We do not need to allocate a buffer in the software 334 * provider case as there is no callback involved. We 335 * avoid any extra data copy by directly passing 'ptr'. 336 */ 337 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 338 KCF_OP_RANDOM_GENERATE, 339 pd->pd_sid, ptr, len, 0, 0); 340 break; 341 } 342 343 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 344 if (rv == CRYPTO_SUCCESS) { 345 switch (pd->pd_prov_type) { 346 case CRYPTO_HW_PROVIDER: 347 /* 348 * Since we have the input buffer handy, 349 * we directly copy to it rather than 350 * adding to the pool. 351 */ 352 blen = min(MINEXTRACTBYTES, len); 353 bcopy(rndbuf, ptr, blen); 354 if (len < MINEXTRACTBYTES) 355 rndc_addbytes(rndbuf + len, 356 MINEXTRACTBYTES - len); 357 ptr += blen; 358 len -= blen; 359 total_bytes += blen; 360 break; 361 362 case CRYPTO_SW_PROVIDER: 363 total_bytes += len; 364 len = 0; 365 break; 366 } 367 } 368 369 /* 370 * We free the buffer in the callback routine 371 * for the CRYPTO_QUEUED case. 372 */ 373 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 374 rv != CRYPTO_QUEUED) { 375 bzero(rndbuf, MINEXTRACTBYTES); 376 kmem_free(rndbuf, MINEXTRACTBYTES); 377 } 378 379 if (len == 0) { 380 KCF_PROV_REFRELE(pd); 381 break; 382 } 383 384 if (rv != CRYPTO_SUCCESS) { 385 /* Add pd to the linked list of providers tried. */ 386 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 387 NULL) { 388 KCF_PROV_REFRELE(pd); 389 break; 390 } 391 } 392 } 393 394 if (list != NULL) { 395 kcf_free_triedlist(list); 396 } 397 398 if (prov_cnt == 0) { /* no provider could be found. */ 399 rng_prov_found = B_FALSE; 400 return (-1); 401 } else { 402 rng_prov_found = B_TRUE; 403 /* See comments in kcf_rngprov_check() */ 404 rng_ok_to_log = B_TRUE; 405 } 406 407 return (total_bytes); 408 } 409 410 static void 411 rngprov_task(void *arg) 412 { 413 int len = (int)(uintptr_t)arg; 414 uchar_t tbuf[MAXEXTRACTBYTES]; 415 416 ASSERT(len <= MAXEXTRACTBYTES); 417 (void) rngprov_getbytes(tbuf, len, B_TRUE); 418 rngprov_task_idle = B_TRUE; 419 } 420 421 /* 422 * Returns "len" random or pseudo-random bytes in *ptr. 423 * Will block if not enough random bytes are available and the 424 * call is blocking. 425 * 426 * Called with rndpool_lock held (allowing caller to do optimistic locking; 427 * releases the lock before return). 428 */ 429 static int 430 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how) 431 { 432 size_t bytes; 433 int got; 434 435 ASSERT(mutex_owned(&rndpool_lock)); 436 /* 437 * Check if the request can be satisfied from the cache 438 * of random bytes. 439 */ 440 if (len <= rnbyte_cnt) { 441 rndc_getbytes(ptr, len); 442 mutex_exit(&rndpool_lock); 443 return (0); 444 } 445 mutex_exit(&rndpool_lock); 446 447 switch (how) { 448 case BLOCKING_EXTRACT: 449 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1) 450 break; /* No provider found */ 451 452 if (got == len) 453 return (0); 454 len -= got; 455 ptr += got; 456 break; 457 458 case NONBLOCK_EXTRACT: 459 case ALWAYS_EXTRACT: 460 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) { 461 /* No provider found */ 462 if (how == NONBLOCK_EXTRACT) { 463 return (EAGAIN); 464 } 465 } else { 466 if (got == len) 467 return (0); 468 len -= got; 469 ptr += got; 470 } 471 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 472 return (EAGAIN); 473 break; 474 } 475 476 mutex_enter(&rndpool_lock); 477 while (len > 0) { 478 if (how == BLOCKING_EXTRACT) { 479 /* Check if there is enough */ 480 while (rnbyte_cnt < MINEXTRACTBYTES) { 481 num_waiters++; 482 if (cv_wait_sig(&rndpool_read_cv, 483 &rndpool_lock) == 0) { 484 num_waiters--; 485 mutex_exit(&rndpool_lock); 486 return (EINTR); 487 } 488 num_waiters--; 489 } 490 } 491 492 /* Figure out how many bytes to extract */ 493 bytes = min(len, rnbyte_cnt); 494 rndc_getbytes(ptr, bytes); 495 496 len -= bytes; 497 ptr += bytes; 498 499 if (len > 0 && how == ALWAYS_EXTRACT) { 500 /* 501 * There are not enough bytes, but we can not block. 502 * This only happens in the case of /dev/urandom which 503 * runs an additional generation algorithm. So, there 504 * is no problem. 505 */ 506 while (len > 0) { 507 *ptr = rndpool[findex]; 508 ptr++; len--; 509 rindex = findex = (findex + 1) & 510 (RNDPOOLSIZE - 1); 511 } 512 break; 513 } 514 } 515 516 mutex_exit(&rndpool_lock); 517 return (0); 518 } 519 520 int 521 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock) 522 { 523 extract_type_t how; 524 int error; 525 526 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 527 mutex_enter(&rndpool_lock); 528 if ((error = rnd_get_bytes(ptr, len, how)) != 0) 529 return (error); 530 531 BUMP_RND_STATS(rs_rndOut, len); 532 return (0); 533 } 534 535 /* 536 * Revisit this if the structs grow or we come up with a better way 537 * of cache-line-padding structures. 538 */ 539 #define RND_CPU_CACHE_SIZE 64 540 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 541 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 542 sizeof (rndmag_t)) 543 /* 544 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 545 * a per-CPU instance of the pseudo-random generator. We have it much easier 546 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 547 * 548 * Note that this usage is preemption-safe; a thread 549 * entering a critical section remembers which generator it locked 550 * and unlocks the same one; should it be preempted and wind up running on 551 * a different CPU, there will be a brief period of increased contention 552 * before it exits the critical section but nothing will melt. 553 */ 554 typedef struct rndmag_s 555 { 556 kmutex_t rm_lock; 557 uint8_t *rm_buffer; /* Start of buffer */ 558 uint8_t *rm_eptr; /* End of buffer */ 559 uint8_t *rm_rptr; /* Current read pointer */ 560 uint32_t rm_oblocks; /* time to rekey? */ 561 uint32_t rm_ofuzz; /* Rekey backoff state */ 562 uint32_t rm_olimit; /* Hard rekey limit */ 563 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 564 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */ 565 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */ 566 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */ 567 } rndmag_t; 568 569 typedef struct rndmag_pad_s 570 { 571 rndmag_t rm_mag; 572 uint8_t rm_pad[RND_CPU_PAD]; 573 } rndmag_pad_t; 574 575 /* 576 * Generate random bytes for /dev/urandom by applying the 577 * FIPS 186-2 algorithm with a key created from bytes extracted 578 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 579 * is generated before a new key is obtained. 580 * 581 * Note that callers to this routine are likely to assume it can't fail. 582 * 583 * Called with rmp locked; releases lock. 584 */ 585 static int 586 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 587 { 588 size_t bytes = len, size; 589 int nblock; 590 uint32_t oblocks; 591 uint32_t tempout[HASHSIZE/BYTES_IN_WORD]; 592 uint32_t seed[HASHSIZE/BYTES_IN_WORD]; 593 int i; 594 hrtime_t timestamp; 595 uint8_t *src, *dst; 596 597 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 598 599 /* Nothing is being asked */ 600 if (len == 0) { 601 mutex_exit(&rmp->rm_mag.rm_lock); 602 return (0); 603 } 604 605 nblock = howmany(len, HASHSIZE); 606 607 rmp->rm_mag.rm_oblocks += nblock; 608 oblocks = rmp->rm_mag.rm_oblocks; 609 610 do { 611 if (oblocks >= rmp->rm_mag.rm_olimit) { 612 613 /* 614 * Contention-avoiding rekey: see if 615 * the pool is locked, and if so, wait a bit. 616 * Do an 'exponential back-in' to ensure we don't 617 * run too long without rekey. 618 */ 619 if (rmp->rm_mag.rm_ofuzz) { 620 /* 621 * Decaying exponential back-in for rekey. 622 */ 623 if ((rnbyte_cnt < MINEXTRACTBYTES) || 624 (!mutex_tryenter(&rndpool_lock))) { 625 rmp->rm_mag.rm_olimit += 626 rmp->rm_mag.rm_ofuzz; 627 rmp->rm_mag.rm_ofuzz >>= 1; 628 goto punt; 629 } 630 } else { 631 mutex_enter(&rndpool_lock); 632 } 633 634 /* Get a new chunk of entropy */ 635 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 636 HMAC_KEYSIZE, ALWAYS_EXTRACT); 637 638 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 639 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 640 oblocks = 0; 641 rmp->rm_mag.rm_oblocks = nblock; 642 } 643 punt: 644 timestamp = gethrtime(); 645 646 src = (uint8_t *)×tamp; 647 dst = (uint8_t *)rmp->rm_mag.rm_seed; 648 649 for (i = 0; i < HASHSIZE; i++) { 650 dst[i] ^= src[i % sizeof (timestamp)]; 651 } 652 653 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 654 655 fips_random_inner(rmp->rm_mag.rm_key, tempout, 656 seed); 657 658 if (bytes >= HASHSIZE) { 659 size = HASHSIZE; 660 } else { 661 size = min(bytes, HASHSIZE); 662 } 663 664 /* 665 * FIPS 140-2: Continuous RNG test - each generation 666 * of an n-bit block shall be compared with the previously 667 * generated block. Test shall fail if any two compared 668 * n-bit blocks are equal. 669 */ 670 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) { 671 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 672 break; 673 } 674 if (i == HASHSIZE/BYTES_IN_WORD) { 675 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 676 "block random bytes are same as the previous " 677 "one.\n"); 678 /* discard random bytes and return error */ 679 return (EIO); 680 } 681 682 bcopy(tempout, rmp->rm_mag.rm_previous, 683 HASHSIZE); 684 685 bcopy(tempout, ptr, size); 686 ptr += size; 687 bytes -= size; 688 oblocks++; 689 nblock--; 690 } while (bytes > 0); 691 692 /* Zero out sensitive information */ 693 bzero(seed, HASHSIZE); 694 bzero(tempout, HASHSIZE); 695 mutex_exit(&rmp->rm_mag.rm_lock); 696 return (0); 697 } 698 699 /* 700 * Per-CPU Random magazines. 701 */ 702 static rndmag_pad_t *rndmag; 703 static uint8_t *rndbuf; 704 static size_t rndmag_total; 705 /* 706 * common/os/cpu.c says that platform support code can shrinkwrap 707 * max_ncpus. On the off chance that we get loaded very early, we 708 * read it exactly once, to copy it here. 709 */ 710 static uint32_t random_max_ncpus = 0; 711 712 /* 713 * Boot-time tunables, for experimentation. 714 */ 715 size_t rndmag_threshold = 2560; 716 size_t rndbuf_len = 5120; 717 size_t rndmag_size = 1280; 718 719 720 int 721 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 722 { 723 rndmag_pad_t *rmp; 724 uint8_t *cptr, *eptr; 725 726 /* 727 * Anyone who asks for zero bytes of randomness should get slapped. 728 */ 729 ASSERT(len > 0); 730 731 /* 732 * Fast path. 733 */ 734 for (;;) { 735 rmp = &rndmag[CPU->cpu_seqid]; 736 mutex_enter(&rmp->rm_mag.rm_lock); 737 738 /* 739 * Big requests bypass buffer and tail-call the 740 * generate routine directly. 741 */ 742 if (len > rndmag_threshold) { 743 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 744 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 745 } 746 747 cptr = rmp->rm_mag.rm_rptr; 748 eptr = cptr + len; 749 750 if (eptr <= rmp->rm_mag.rm_eptr) { 751 rmp->rm_mag.rm_rptr = eptr; 752 bcopy(cptr, ptr, len); 753 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 754 mutex_exit(&rmp->rm_mag.rm_lock); 755 756 return (0); 757 } 758 /* 759 * End fast path. 760 */ 761 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 762 /* 763 * Note: We assume the generate routine always succeeds 764 * in this case (because it does at present..) 765 * It also always releases rm_lock. 766 */ 767 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 768 rndbuf_len); 769 } 770 } 771 772 /* 773 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 774 * little memory on big systems that don't have the full set installed. 775 * See above; "empty" means "rptr equal to eptr"; this will trigger the 776 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 777 * 778 * TODO: make rndmag_size tunable at run time! 779 */ 780 static void 781 rnd_alloc_magazines() 782 { 783 rndmag_pad_t *rmp; 784 int i; 785 uint8_t discard_buf[HASHSIZE]; 786 787 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 788 if (rndmag_size < rndbuf_len) 789 rndmag_size = rndbuf_len; 790 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 791 792 random_max_ncpus = max_ncpus; 793 rndmag_total = rndmag_size * random_max_ncpus; 794 795 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 796 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 797 KM_SLEEP); 798 799 for (i = 0; i < random_max_ncpus; i++) { 800 uint8_t *buf; 801 802 rmp = &rndmag[i]; 803 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 804 805 buf = rndbuf + i * rndmag_size; 806 807 rmp->rm_mag.rm_buffer = buf; 808 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 809 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 810 rmp->rm_mag.rm_oblocks = 1; 811 812 mutex_enter(&rndpool_lock); 813 /* 814 * FIPS 140-2: the first n-bit (n > 15) block generated 815 * after power-up, initialization, or reset shall not 816 * be used, but shall be saved for comparison. 817 */ 818 (void) rnd_get_bytes(discard_buf, 819 HMAC_KEYSIZE, ALWAYS_EXTRACT); 820 bcopy(discard_buf, rmp->rm_mag.rm_previous, 821 HMAC_KEYSIZE); 822 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 823 mutex_enter(&rndpool_lock); 824 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 825 HMAC_KEYSIZE, ALWAYS_EXTRACT); 826 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 827 mutex_enter(&rndpool_lock); 828 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 829 HMAC_KEYSIZE, ALWAYS_EXTRACT); 830 } 831 } 832 833 void 834 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 835 { 836 clock_t ut; /* time in microseconds */ 837 838 if (do_mech2id) 839 rngmech_type = crypto_mech2id(SUN_RANDOM); 840 841 /* 842 * The new timeout value is taken from the buffer of random bytes. 843 * We're merely reading the first 32 bits from the buffer here, not 844 * consuming any random bytes. 845 * The timeout multiplier value is a random value between 0.5 sec and 846 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 847 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 848 */ 849 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 850 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 851 TIMEOUT_INTERVAL * drv_usectohz(ut)); 852 } 853 854 /* 855 * Called from the driver for a poll on /dev/random 856 * . POLLOUT always succeeds. 857 * . POLLIN and POLLRDNORM will block until a 858 * minimum amount of entropy is available. 859 * 860 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 861 * will block. When enough random bytes are available, later, the timeout 862 * handler routine will issue the pollwakeup() calls. 863 */ 864 void 865 kcf_rnd_chpoll(short events, int anyyet, short *reventsp, 866 struct pollhead **phpp) 867 { 868 *reventsp = events & POLLOUT; 869 870 if (events & (POLLIN | POLLRDNORM)) { 871 /* 872 * Sampling of rnbyte_cnt is an atomic 873 * operation. Hence we do not need any locking. 874 */ 875 if (rnbyte_cnt >= MINEXTRACTBYTES) 876 *reventsp |= (events & (POLLIN | POLLRDNORM)); 877 } 878 879 if (*reventsp == 0 && !anyyet) 880 *phpp = &rnd_pollhead; 881 } 882 883 /*ARGSUSED*/ 884 static void 885 rnd_handler(void *arg) 886 { 887 int len = 0; 888 889 if (!rng_prov_found && rng_ok_to_log) { 890 cmn_err(CE_WARN, "No randomness provider enabled for " 891 "/dev/random. Use cryptoadm(1M) to enable a provider."); 892 rng_ok_to_log = B_FALSE; 893 } 894 895 if (num_waiters > 0) 896 /* 897 * Note: len has no relationship with how many bytes 898 * a poll thread needs. 899 */ 900 len = MAXEXTRACTBYTES; 901 else if (rnbyte_cnt < RNDPOOLSIZE) 902 len = MINEXTRACTBYTES; 903 904 /* 905 * Only one thread gets to set rngprov_task_idle at a given point 906 * of time and the order of the writes is defined. Also, it is OK 907 * if we read an older value of it and skip the dispatch once 908 * since we will get the correct value during the next time here. 909 * So, no locking is needed here. 910 */ 911 if (len > 0 && rngprov_task_idle) { 912 rngprov_task_idle = B_FALSE; 913 914 /* 915 * It is OK if taskq_dispatch fails here. We will retry 916 * the next time around. Meanwhile, a thread doing a 917 * read() will go to the provider directly, if the 918 * cache becomes empty. 919 */ 920 if (taskq_dispatch(system_taskq, rngprov_task, 921 (void *)(uintptr_t)len, TQ_NOSLEEP | TQ_NOQUEUE) == 0) { 922 rngprov_task_idle = B_TRUE; 923 } 924 } 925 926 mutex_enter(&rndpool_lock); 927 /* 928 * Wake up threads waiting in poll() or for enough accumulated 929 * random bytes to read from /dev/random. In case a poll() is 930 * concurrent with a read(), the polling process may be woken up 931 * indicating that enough randomness is now available for reading, 932 * and another process *steals* the bits from the pool, causing the 933 * subsequent read() from the first process to block. It is acceptable 934 * since the blocking will eventually end, after the timeout 935 * has expired enough times to honor the read. 936 * 937 * Note - Since we hold the rndpool_lock across the pollwakeup() call 938 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 939 */ 940 if (rnbyte_cnt >= MINEXTRACTBYTES) 941 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 942 943 if (num_waiters > 0) 944 cv_broadcast(&rndpool_read_cv); 945 mutex_exit(&rndpool_lock); 946 947 kcf_rnd_schedule_timeout(B_FALSE); 948 } 949 950 static void 951 rndc_addbytes(uint8_t *ptr, size_t len) 952 { 953 ASSERT(ptr != NULL && len > 0); 954 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 955 956 mutex_enter(&rndpool_lock); 957 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 958 rndpool[rindex] ^= *ptr; 959 ptr++; len--; 960 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 961 rnbyte_cnt++; 962 } 963 964 /* Handle buffer full case */ 965 while (len > 0) { 966 rndpool[rindex] ^= *ptr; 967 ptr++; len--; 968 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 969 } 970 mutex_exit(&rndpool_lock); 971 } 972 973 /* 974 * Caller should check len <= rnbyte_cnt under the 975 * rndpool_lock before calling. 976 */ 977 static void 978 rndc_getbytes(uint8_t *ptr, size_t len) 979 { 980 ASSERT(MUTEX_HELD(&rndpool_lock)); 981 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 982 983 BUMP_RND_STATS(rs_rndcOut, len); 984 985 while (len > 0) { 986 *ptr = rndpool[findex]; 987 ptr++; len--; 988 findex = (findex + 1) & (RNDPOOLSIZE - 1); 989 rnbyte_cnt--; 990 } 991 } 992 993 /* Random number exported entry points */ 994 995 /* 996 * Mix the supplied bytes into the entropy pool of a kCF 997 * RNG provider. 998 */ 999 int 1000 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1001 { 1002 if (len < 1) 1003 return (-1); 1004 1005 rngprov_seed(ptr, len, entropy_est, 0); 1006 1007 return (0); 1008 } 1009 1010 /* 1011 * Mix the supplied bytes into the entropy pool of a kCF 1012 * RNG provider. Mix immediately. 1013 */ 1014 int 1015 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1016 { 1017 if (len < 1) 1018 return (-1); 1019 1020 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1021 1022 return (0); 1023 } 1024 1025 /* 1026 * Get bytes from the /dev/urandom generator. This function 1027 * always succeeds. Returns 0. 1028 */ 1029 int 1030 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1031 { 1032 ASSERT(!mutex_owned(&rndpool_lock)); 1033 1034 if (len < 1) 1035 return (0); 1036 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1037 } 1038 1039 /* 1040 * Get bytes from the /dev/random generator. Returns 0 1041 * on success. Returns EAGAIN if there is insufficient entropy. 1042 */ 1043 int 1044 random_get_bytes(uint8_t *ptr, size_t len) 1045 { 1046 ASSERT(!mutex_owned(&rndpool_lock)); 1047 1048 if (len < 1) 1049 return (0); 1050 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1051 } 1052 1053 /* 1054 * The two functions below are identical to random_get_pseudo_bytes() and 1055 * random_get_bytes_fips, this function is called for consumers that want 1056 * FIPS 140-2. This function waits until the FIPS boundary can be verified. 1057 */ 1058 1059 /* 1060 * Get bytes from the /dev/urandom generator. This function 1061 * always succeeds. Returns 0. 1062 */ 1063 int 1064 random_get_pseudo_bytes_fips140(uint8_t *ptr, size_t len) 1065 { 1066 ASSERT(!mutex_owned(&rndpool_lock)); 1067 1068 mutex_enter(&fips140_mode_lock); 1069 while (global_fips140_mode < FIPS140_MODE_ENABLED) { 1070 cv_wait(&cv_fips140, &fips140_mode_lock); 1071 } 1072 mutex_exit(&fips140_mode_lock); 1073 1074 if (len < 1) 1075 return (0); 1076 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1077 } 1078 1079 /* 1080 * Get bytes from the /dev/random generator. Returns 0 1081 * on success. Returns EAGAIN if there is insufficient entropy. 1082 */ 1083 int 1084 random_get_bytes_fips140(uint8_t *ptr, size_t len) 1085 { 1086 ASSERT(!mutex_owned(&rndpool_lock)); 1087 1088 mutex_enter(&fips140_mode_lock); 1089 while (global_fips140_mode < FIPS140_MODE_ENABLED) { 1090 cv_wait(&cv_fips140, &fips140_mode_lock); 1091 } 1092 mutex_exit(&fips140_mode_lock); 1093 1094 if (len < 1) 1095 return (0); 1096 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1097 } 1098