1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file implements the interfaces that the /dev/random 28 * driver uses for read(2), write(2) and poll(2) on /dev/random or 29 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 30 * random_get_pseudo_bytes() and random_get_bytes(). 31 * 32 * We periodically collect random bits from providers which are registered 33 * with the Kernel Cryptographic Framework (kCF) as capable of random 34 * number generation. The random bits are maintained in a cache and 35 * it is used for high quality random numbers (/dev/random) requests. 36 * We pick a provider and call its SPI routine, if the cache does not have 37 * enough bytes to satisfy a request. 38 * 39 * /dev/urandom requests use a software-based generator algorithm that uses the 40 * random bits in the cache as a seed. We create one pseudo-random generator 41 * (for /dev/urandom) per possible CPU on the system, and use it, 42 * kmem-magazine-style, to avoid cache line contention. 43 * 44 * LOCKING HIERARCHY: 45 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 46 * 2) rndpool_lock protects the high-quality randomness pool. 47 * It may be locked while a rmp->rm_mag.rm_lock is held. 48 * 49 * A history note: The kernel API and the software-based algorithms in this 50 * file used to be part of the /dev/random driver. 51 */ 52 53 #include <sys/types.h> 54 #include <sys/conf.h> 55 #include <sys/sunddi.h> 56 #include <sys/disp.h> 57 #include <sys/modctl.h> 58 #include <sys/ddi.h> 59 #include <sys/crypto/common.h> 60 #include <sys/crypto/api.h> 61 #include <sys/crypto/impl.h> 62 #include <sys/crypto/sched_impl.h> 63 #include <sys/random.h> 64 #include <sys/sha1.h> 65 #include <sys/time.h> 66 #include <sys/sysmacros.h> 67 #include <sys/cpuvar.h> 68 #include <sys/taskq.h> 69 #include <rng/fips_random.h> 70 71 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 72 #define MINEXTRACTBYTES 20 73 #define MAXEXTRACTBYTES 1024 74 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 75 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 76 77 typedef enum extract_type { 78 NONBLOCK_EXTRACT, 79 BLOCKING_EXTRACT, 80 ALWAYS_EXTRACT 81 } extract_type_t; 82 83 /* 84 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 85 * routines directly instead of using k-API because we can't return any 86 * error code in /dev/urandom case and we can get an error using k-API 87 * if a mechanism is disabled. 88 */ 89 #define HASHSIZE 20 90 #define HASH_CTX SHA1_CTX 91 #define HashInit(ctx) SHA1Init((ctx)) 92 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 93 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 94 95 /* HMAC-SHA1 */ 96 #define HMAC_KEYSIZE 20 97 98 /* 99 * Cache of random bytes implemented as a circular buffer. findex and rindex 100 * track the front and back of the circular buffer. 101 */ 102 uint8_t rndpool[RNDPOOLSIZE]; 103 static int findex, rindex; 104 static int rnbyte_cnt; /* Number of bytes in the cache */ 105 106 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 107 /* and the global variables */ 108 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 109 static int num_waiters; /* #threads waiting to read from /dev/random */ 110 111 static struct pollhead rnd_pollhead; 112 static timeout_id_t kcf_rndtimeout_id; 113 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 114 rnd_stats_t rnd_stats; 115 static boolean_t rng_prov_found = B_TRUE; 116 static boolean_t rng_ok_to_log = B_TRUE; 117 118 static void rndc_addbytes(uint8_t *, size_t); 119 static void rndc_getbytes(uint8_t *ptr, size_t len); 120 static void rnd_handler(void *); 121 static void rnd_alloc_magazines(); 122 123 void 124 kcf_rnd_init() 125 { 126 hrtime_t ts; 127 time_t now; 128 129 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 130 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 131 132 /* 133 * Add bytes to the cache using 134 * . 2 unpredictable times: high resolution time since the boot-time, 135 * and the current time-of-the day. 136 * This is used only to make the timeout value in the timer 137 * unpredictable. 138 */ 139 ts = gethrtime(); 140 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 141 142 (void) drv_getparm(TIME, &now); 143 rndc_addbytes((uint8_t *)&now, sizeof (now)); 144 145 rnbyte_cnt = 0; 146 findex = rindex = 0; 147 num_waiters = 0; 148 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 149 150 rnd_alloc_magazines(); 151 } 152 153 /* 154 * Return TRUE if at least one provider exists that can 155 * supply random numbers. 156 */ 157 boolean_t 158 kcf_rngprov_check(void) 159 { 160 int rv; 161 kcf_provider_desc_t *pd; 162 163 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 164 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 165 KCF_PROV_REFRELE(pd); 166 /* 167 * We logged a warning once about no provider being available 168 * and now a provider became available. So, set the flag so 169 * that we can log again if the problem recurs. 170 */ 171 rng_ok_to_log = B_TRUE; 172 rng_prov_found = B_TRUE; 173 return (B_TRUE); 174 } else { 175 rng_prov_found = B_FALSE; 176 return (B_FALSE); 177 } 178 } 179 180 /* 181 * Pick a software-based provider and submit a request to seed 182 * its random number generator. 183 */ 184 static void 185 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 186 { 187 kcf_provider_desc_t *pd = NULL; 188 189 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 190 CRYPTO_SUCCESS) { 191 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 192 entropy_est, flags, NULL); 193 KCF_PROV_REFRELE(pd); 194 } 195 } 196 197 /* Boot-time tunable for experimentation. */ 198 int kcf_limit_hwrng = 1; 199 200 201 /* 202 * This routine is called for blocking reads. 203 * 204 * The argument from_user_api indicates whether the caller is 205 * from userland coming via the /dev/random driver. 206 * 207 * The argument is_taskq_thr indicates whether the caller is 208 * the taskq thread dispatched by the timeout handler routine. 209 * In this case, we cycle through all the providers 210 * submitting a request to each provider to generate random numbers. 211 * 212 * For other cases, we pick a provider and submit a request to generate 213 * random numbers. We retry using another provider if we get an error. 214 * 215 * Returns the number of bytes that are written to 'ptr'. Returns -1 216 * if no provider is found. ptr and need are unchanged. 217 */ 218 static int 219 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 220 boolean_t is_taskq_thr) 221 { 222 int rv; 223 int prov_cnt = 0; 224 int total_bytes = 0; 225 kcf_provider_desc_t *pd; 226 kcf_req_params_t params; 227 kcf_prov_tried_t *list = NULL; 228 229 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 230 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 231 232 prov_cnt++; 233 /* 234 * Typically a hardware RNG is a multi-purpose 235 * crypto card and hence we do not want to overload the card 236 * just for random numbers. The following check is to prevent 237 * a user process from hogging the hardware RNG. Note that we 238 * still use the hardware RNG from the periodically run 239 * taskq thread. 240 */ 241 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 242 kcf_limit_hwrng == 1) { 243 ASSERT(is_taskq_thr == B_FALSE); 244 goto try_next; 245 } 246 247 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 248 pd->pd_sid, ptr, need, 0, 0); 249 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 250 ASSERT(rv != CRYPTO_QUEUED); 251 252 if (rv == CRYPTO_SUCCESS) { 253 total_bytes += need; 254 if (is_taskq_thr) 255 rndc_addbytes(ptr, need); 256 else { 257 KCF_PROV_REFRELE(pd); 258 break; 259 } 260 } 261 262 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 263 try_next: 264 /* Add pd to the linked list of providers tried. */ 265 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 266 KCF_PROV_REFRELE(pd); 267 break; 268 } 269 } 270 271 } 272 273 if (list != NULL) 274 kcf_free_triedlist(list); 275 276 if (prov_cnt == 0) { /* no provider could be found. */ 277 rng_prov_found = B_FALSE; 278 return (-1); 279 } else { 280 rng_prov_found = B_TRUE; 281 /* See comments in kcf_rngprov_check() */ 282 rng_ok_to_log = B_TRUE; 283 } 284 285 return (total_bytes); 286 } 287 288 static void 289 notify_done(void *arg, int rv) 290 { 291 uchar_t *rndbuf = arg; 292 293 if (rv == CRYPTO_SUCCESS) 294 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 295 296 bzero(rndbuf, MINEXTRACTBYTES); 297 kmem_free(rndbuf, MINEXTRACTBYTES); 298 } 299 300 /* 301 * Cycle through all the providers submitting a request to each provider 302 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 303 * and ALWAYS_EXTRACT. 304 * 305 * Returns the number of bytes that are written to 'ptr'. Returns -1 306 * if no provider is found. ptr and len are unchanged. 307 */ 308 static int 309 rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 310 { 311 int rv, blen, total_bytes; 312 uchar_t *rndbuf; 313 kcf_provider_desc_t *pd; 314 kcf_req_params_t params; 315 crypto_call_req_t req; 316 kcf_prov_tried_t *list = NULL; 317 int prov_cnt = 0; 318 319 blen = 0; 320 total_bytes = 0; 321 req.cr_flag = CRYPTO_SKIP_REQID; 322 req.cr_callback_func = notify_done; 323 324 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 325 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 326 327 prov_cnt ++; 328 switch (pd->pd_prov_type) { 329 case CRYPTO_HW_PROVIDER: 330 /* See comments in rngprov_getbytes() */ 331 if (from_user_api && kcf_limit_hwrng == 1) 332 goto try_next; 333 334 /* 335 * We have to allocate a buffer here as we can not 336 * assume that the input buffer will remain valid 337 * when the callback comes. We use a fixed size buffer 338 * to simplify the book keeping. 339 */ 340 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 341 if (rndbuf == NULL) { 342 KCF_PROV_REFRELE(pd); 343 if (list != NULL) 344 kcf_free_triedlist(list); 345 return (total_bytes); 346 } 347 req.cr_callback_arg = rndbuf; 348 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 349 KCF_OP_RANDOM_GENERATE, 350 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 351 break; 352 353 case CRYPTO_SW_PROVIDER: 354 /* 355 * We do not need to allocate a buffer in the software 356 * provider case as there is no callback involved. We 357 * avoid any extra data copy by directly passing 'ptr'. 358 */ 359 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 360 KCF_OP_RANDOM_GENERATE, 361 pd->pd_sid, ptr, len, 0, 0); 362 break; 363 } 364 365 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 366 if (rv == CRYPTO_SUCCESS) { 367 switch (pd->pd_prov_type) { 368 case CRYPTO_HW_PROVIDER: 369 /* 370 * Since we have the input buffer handy, 371 * we directly copy to it rather than 372 * adding to the pool. 373 */ 374 blen = min(MINEXTRACTBYTES, len); 375 bcopy(rndbuf, ptr, blen); 376 if (len < MINEXTRACTBYTES) 377 rndc_addbytes(rndbuf + len, 378 MINEXTRACTBYTES - len); 379 ptr += blen; 380 len -= blen; 381 total_bytes += blen; 382 break; 383 384 case CRYPTO_SW_PROVIDER: 385 total_bytes += len; 386 len = 0; 387 break; 388 } 389 } 390 391 /* 392 * We free the buffer in the callback routine 393 * for the CRYPTO_QUEUED case. 394 */ 395 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 396 rv != CRYPTO_QUEUED) { 397 bzero(rndbuf, MINEXTRACTBYTES); 398 kmem_free(rndbuf, MINEXTRACTBYTES); 399 } 400 401 if (len == 0) { 402 KCF_PROV_REFRELE(pd); 403 break; 404 } 405 406 if (rv != CRYPTO_SUCCESS) { 407 try_next: 408 /* Add pd to the linked list of providers tried. */ 409 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 410 NULL) { 411 KCF_PROV_REFRELE(pd); 412 break; 413 } 414 } 415 } 416 417 if (list != NULL) { 418 kcf_free_triedlist(list); 419 } 420 421 if (prov_cnt == 0) { /* no provider could be found. */ 422 rng_prov_found = B_FALSE; 423 return (-1); 424 } else { 425 rng_prov_found = B_TRUE; 426 /* See comments in kcf_rngprov_check() */ 427 rng_ok_to_log = B_TRUE; 428 } 429 430 return (total_bytes); 431 } 432 433 static void 434 rngprov_task(void *arg) 435 { 436 int len = (int)(uintptr_t)arg; 437 uchar_t tbuf[MAXEXTRACTBYTES]; 438 439 ASSERT(len <= MAXEXTRACTBYTES); 440 (void) rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE); 441 } 442 443 /* 444 * Returns "len" random or pseudo-random bytes in *ptr. 445 * Will block if not enough random bytes are available and the 446 * call is blocking. 447 * 448 * Called with rndpool_lock held (allowing caller to do optimistic locking; 449 * releases the lock before return). 450 */ 451 static int 452 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 453 boolean_t from_user_api) 454 { 455 int bytes; 456 size_t got; 457 458 ASSERT(mutex_owned(&rndpool_lock)); 459 /* 460 * Check if the request can be satisfied from the cache 461 * of random bytes. 462 */ 463 if (len <= rnbyte_cnt) { 464 rndc_getbytes(ptr, len); 465 mutex_exit(&rndpool_lock); 466 return (0); 467 } 468 mutex_exit(&rndpool_lock); 469 470 switch (how) { 471 case BLOCKING_EXTRACT: 472 if ((got = rngprov_getbytes(ptr, len, from_user_api, 473 B_FALSE)) == -1) 474 break; /* No provider found */ 475 476 if (got == len) 477 return (0); 478 len -= got; 479 ptr += got; 480 break; 481 482 case NONBLOCK_EXTRACT: 483 case ALWAYS_EXTRACT: 484 if ((got = rngprov_getbytes_nblk(ptr, len, 485 from_user_api)) == -1) { 486 /* No provider found */ 487 if (how == NONBLOCK_EXTRACT) { 488 return (EAGAIN); 489 } 490 } else { 491 if (got == len) 492 return (0); 493 len -= got; 494 ptr += got; 495 } 496 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 497 return (EAGAIN); 498 break; 499 } 500 501 mutex_enter(&rndpool_lock); 502 while (len > 0) { 503 if (how == BLOCKING_EXTRACT) { 504 /* Check if there is enough */ 505 while (rnbyte_cnt < MINEXTRACTBYTES) { 506 num_waiters++; 507 if (cv_wait_sig(&rndpool_read_cv, 508 &rndpool_lock) == 0) { 509 num_waiters--; 510 mutex_exit(&rndpool_lock); 511 return (EINTR); 512 } 513 num_waiters--; 514 } 515 } 516 517 /* Figure out how many bytes to extract */ 518 bytes = min(len, rnbyte_cnt); 519 rndc_getbytes(ptr, bytes); 520 521 len -= bytes; 522 ptr += bytes; 523 524 if (len > 0 && how == ALWAYS_EXTRACT) { 525 /* 526 * There are not enough bytes, but we can not block. 527 * This only happens in the case of /dev/urandom which 528 * runs an additional generation algorithm. So, there 529 * is no problem. 530 */ 531 while (len > 0) { 532 *ptr = rndpool[findex]; 533 ptr++; len--; 534 rindex = findex = (findex + 1) & 535 (RNDPOOLSIZE - 1); 536 } 537 break; 538 } 539 } 540 541 mutex_exit(&rndpool_lock); 542 return (0); 543 } 544 545 int 546 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 547 boolean_t from_user_api) 548 { 549 extract_type_t how; 550 int error; 551 552 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 553 mutex_enter(&rndpool_lock); 554 if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 555 return (error); 556 557 BUMP_RND_STATS(rs_rndOut, len); 558 return (0); 559 } 560 561 /* 562 * Revisit this if the structs grow or we come up with a better way 563 * of cache-line-padding structures. 564 */ 565 #define RND_CPU_CACHE_SIZE 64 566 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 567 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 568 sizeof (rndmag_t)) 569 /* 570 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 571 * a per-CPU instance of the pseudo-random generator. We have it much easier 572 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 573 * 574 * Note that this usage is preemption-safe; a thread 575 * entering a critical section remembers which generator it locked 576 * and unlocks the same one; should it be preempted and wind up running on 577 * a different CPU, there will be a brief period of increased contention 578 * before it exits the critical section but nothing will melt. 579 */ 580 typedef struct rndmag_s 581 { 582 kmutex_t rm_lock; 583 uint8_t *rm_buffer; /* Start of buffer */ 584 uint8_t *rm_eptr; /* End of buffer */ 585 uint8_t *rm_rptr; /* Current read pointer */ 586 uint32_t rm_oblocks; /* time to rekey? */ 587 uint32_t rm_ofuzz; /* Rekey backoff state */ 588 uint32_t rm_olimit; /* Hard rekey limit */ 589 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 590 uint32_t rm_key[SHA1WORDS]; /* FIPS XKEY */ 591 uint32_t rm_seed[SHA1WORDS]; /* seed for rekey */ 592 uint32_t rm_previous[SHA1WORDS]; /* previous random bytes */ 593 } rndmag_t; 594 595 typedef struct rndmag_pad_s 596 { 597 rndmag_t rm_mag; 598 uint8_t rm_pad[RND_CPU_PAD]; 599 } rndmag_pad_t; 600 601 /* 602 * Generate random bytes for /dev/urandom by applying the 603 * FIPS 186-2 algorithm with a key created from bytes extracted 604 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 605 * is generated before a new key is obtained. 606 * 607 * Note that callers to this routine are likely to assume it can't fail. 608 * 609 * Called with rmp locked; releases lock. 610 */ 611 static int 612 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 613 { 614 size_t bytes = len; 615 int nblock, size; 616 uint32_t oblocks; 617 uint32_t tempout[SHA1WORDS]; 618 uint32_t seed[SHA1WORDS]; 619 int i; 620 hrtime_t timestamp; 621 uint8_t *src, *dst; 622 623 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 624 625 /* Nothing is being asked */ 626 if (len == 0) { 627 mutex_exit(&rmp->rm_mag.rm_lock); 628 return (0); 629 } 630 631 nblock = howmany(len, HASHSIZE); 632 633 rmp->rm_mag.rm_oblocks += nblock; 634 oblocks = rmp->rm_mag.rm_oblocks; 635 636 do { 637 if (oblocks >= rmp->rm_mag.rm_olimit) { 638 639 /* 640 * Contention-avoiding rekey: see if 641 * the pool is locked, and if so, wait a bit. 642 * Do an 'exponential back-in' to ensure we don't 643 * run too long without rekey. 644 */ 645 if (rmp->rm_mag.rm_ofuzz) { 646 /* 647 * Decaying exponential back-in for rekey. 648 */ 649 if ((rnbyte_cnt < MINEXTRACTBYTES) || 650 (!mutex_tryenter(&rndpool_lock))) { 651 rmp->rm_mag.rm_olimit += 652 rmp->rm_mag.rm_ofuzz; 653 rmp->rm_mag.rm_ofuzz >>= 1; 654 goto punt; 655 } 656 } else { 657 mutex_enter(&rndpool_lock); 658 } 659 660 /* Get a new chunk of entropy */ 661 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 662 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 663 664 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 665 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 666 oblocks = 0; 667 rmp->rm_mag.rm_oblocks = nblock; 668 } 669 punt: 670 timestamp = gethrtime(); 671 672 src = (uint8_t *)×tamp; 673 dst = (uint8_t *)rmp->rm_mag.rm_seed; 674 675 for (i = 0; i < HASHSIZE; i++) { 676 dst[i] ^= src[i % sizeof (timestamp)]; 677 } 678 679 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 680 681 fips_random_inner(rmp->rm_mag.rm_key, tempout, 682 seed); 683 684 if (bytes >= HASHSIZE) { 685 size = HASHSIZE; 686 } else { 687 size = min(bytes, HASHSIZE); 688 } 689 690 /* 691 * FIPS 140-2: Continuous RNG test - each generation 692 * of an n-bit block shall be compared with the previously 693 * generated block. Test shall fail if any two compared 694 * n-bit blocks are equal. 695 */ 696 for (i = 0; i < size/BYTES_IN_WORD; i++) { 697 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 698 break; 699 } 700 if (i == size/BYTES_IN_WORD) 701 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 702 "block random bytes are same as the previous " 703 "one.\n"); 704 705 bcopy(tempout, rmp->rm_mag.rm_previous, 706 HASHSIZE); 707 708 bcopy(tempout, ptr, size); 709 ptr += size; 710 bytes -= size; 711 oblocks++; 712 nblock--; 713 } while (bytes > 0); 714 715 /* Zero out sensitive information */ 716 bzero(seed, HASHSIZE); 717 bzero(tempout, HASHSIZE); 718 mutex_exit(&rmp->rm_mag.rm_lock); 719 return (0); 720 } 721 722 /* 723 * Per-CPU Random magazines. 724 */ 725 static rndmag_pad_t *rndmag; 726 static uint8_t *rndbuf; 727 static size_t rndmag_total; 728 /* 729 * common/os/cpu.c says that platform support code can shrinkwrap 730 * max_ncpus. On the off chance that we get loaded very early, we 731 * read it exactly once, to copy it here. 732 */ 733 static uint32_t random_max_ncpus = 0; 734 735 /* 736 * Boot-time tunables, for experimentation. 737 */ 738 size_t rndmag_threshold = 2560; 739 size_t rndbuf_len = 5120; 740 size_t rndmag_size = 1280; 741 742 743 int 744 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 745 { 746 rndmag_pad_t *rmp; 747 uint8_t *cptr, *eptr; 748 749 /* 750 * Anyone who asks for zero bytes of randomness should get slapped. 751 */ 752 ASSERT(len > 0); 753 754 /* 755 * Fast path. 756 */ 757 for (;;) { 758 rmp = &rndmag[CPU->cpu_seqid]; 759 mutex_enter(&rmp->rm_mag.rm_lock); 760 761 /* 762 * Big requests bypass buffer and tail-call the 763 * generate routine directly. 764 */ 765 if (len > rndmag_threshold) { 766 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 767 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 768 } 769 770 cptr = rmp->rm_mag.rm_rptr; 771 eptr = cptr + len; 772 773 if (eptr <= rmp->rm_mag.rm_eptr) { 774 rmp->rm_mag.rm_rptr = eptr; 775 bcopy(cptr, ptr, len); 776 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 777 mutex_exit(&rmp->rm_mag.rm_lock); 778 779 return (0); 780 } 781 /* 782 * End fast path. 783 */ 784 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 785 /* 786 * Note: We assume the generate routine always succeeds 787 * in this case (because it does at present..) 788 * It also always releases rm_lock. 789 */ 790 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 791 rndbuf_len); 792 } 793 } 794 795 /* 796 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 797 * little memory on big systems that don't have the full set installed. 798 * See above; "empty" means "rptr equal to eptr"; this will trigger the 799 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 800 * 801 * TODO: make rndmag_size tunable at run time! 802 */ 803 static void 804 rnd_alloc_magazines() 805 { 806 rndmag_pad_t *rmp; 807 int i; 808 uint8_t discard_buf[HASHSIZE]; 809 810 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 811 if (rndmag_size < rndbuf_len) 812 rndmag_size = rndbuf_len; 813 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 814 815 random_max_ncpus = max_ncpus; 816 rndmag_total = rndmag_size * random_max_ncpus; 817 818 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 819 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 820 KM_SLEEP); 821 822 for (i = 0; i < random_max_ncpus; i++) { 823 uint8_t *buf; 824 825 rmp = &rndmag[i]; 826 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 827 828 buf = rndbuf + i * rndmag_size; 829 830 rmp->rm_mag.rm_buffer = buf; 831 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 832 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 833 rmp->rm_mag.rm_oblocks = 1; 834 835 mutex_enter(&rndpool_lock); 836 /* 837 * FIPS 140-2: the first n-bit (n > 15) block generated 838 * after power-up, initialization, or reset shall not 839 * be used, but shall be saved for comparison. 840 */ 841 (void) rnd_get_bytes(discard_buf, 842 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 843 bcopy(discard_buf, rmp->rm_mag.rm_previous, 844 HMAC_KEYSIZE); 845 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 846 mutex_enter(&rndpool_lock); 847 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 848 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 849 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 850 mutex_enter(&rndpool_lock); 851 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 852 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 853 } 854 } 855 856 void 857 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 858 { 859 clock_t ut; /* time in microseconds */ 860 861 if (do_mech2id) 862 rngmech_type = crypto_mech2id(SUN_RANDOM); 863 864 /* 865 * The new timeout value is taken from the buffer of random bytes. 866 * We're merely reading the first 32 bits from the buffer here, not 867 * consuming any random bytes. 868 * The timeout multiplier value is a random value between 0.5 sec and 869 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 870 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 871 */ 872 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 873 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 874 TIMEOUT_INTERVAL * drv_usectohz(ut)); 875 } 876 877 /* 878 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 879 * will block. When enough random bytes are available, later, the timeout 880 * handler routine will issue the pollwakeup() calls. 881 */ 882 void 883 kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 884 { 885 /* 886 * Sampling of rnbyte_cnt is an atomic 887 * operation. Hence we do not need any locking. 888 */ 889 if (rnbyte_cnt >= MINEXTRACTBYTES) { 890 *reventsp |= (POLLIN | POLLRDNORM); 891 } else { 892 *reventsp = 0; 893 if (!anyyet) 894 *phpp = &rnd_pollhead; 895 } 896 } 897 898 /*ARGSUSED*/ 899 static void 900 rnd_handler(void *arg) 901 { 902 int len = 0; 903 904 if (!rng_prov_found && rng_ok_to_log) { 905 cmn_err(CE_WARN, "No randomness provider enabled for " 906 "/dev/random. Use cryptoadm(1M) to enable a provider."); 907 rng_ok_to_log = B_FALSE; 908 } 909 910 if (num_waiters > 0) 911 len = MAXEXTRACTBYTES; 912 else if (rnbyte_cnt < RNDPOOLSIZE) 913 len = MINEXTRACTBYTES; 914 915 if (len > 0) { 916 (void) taskq_dispatch(system_taskq, rngprov_task, 917 (void *)(uintptr_t)len, TQ_NOSLEEP); 918 } 919 920 mutex_enter(&rndpool_lock); 921 /* 922 * Wake up threads waiting in poll() or for enough accumulated 923 * random bytes to read from /dev/random. In case a poll() is 924 * concurrent with a read(), the polling process may be woken up 925 * indicating that enough randomness is now available for reading, 926 * and another process *steals* the bits from the pool, causing the 927 * subsequent read() from the first process to block. It is acceptable 928 * since the blocking will eventually end, after the timeout 929 * has expired enough times to honor the read. 930 * 931 * Note - Since we hold the rndpool_lock across the pollwakeup() call 932 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 933 */ 934 if (rnbyte_cnt >= MINEXTRACTBYTES) 935 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 936 937 if (num_waiters > 0) 938 cv_broadcast(&rndpool_read_cv); 939 mutex_exit(&rndpool_lock); 940 941 kcf_rnd_schedule_timeout(B_FALSE); 942 } 943 944 static void 945 rndc_addbytes(uint8_t *ptr, size_t len) 946 { 947 ASSERT(ptr != NULL && len > 0); 948 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 949 950 mutex_enter(&rndpool_lock); 951 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 952 rndpool[rindex] ^= *ptr; 953 ptr++; len--; 954 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 955 rnbyte_cnt++; 956 } 957 958 /* Handle buffer full case */ 959 while (len > 0) { 960 rndpool[rindex] ^= *ptr; 961 ptr++; len--; 962 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 963 } 964 mutex_exit(&rndpool_lock); 965 } 966 967 /* 968 * Caller should check len <= rnbyte_cnt under the 969 * rndpool_lock before calling. 970 */ 971 static void 972 rndc_getbytes(uint8_t *ptr, size_t len) 973 { 974 ASSERT(MUTEX_HELD(&rndpool_lock)); 975 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 976 977 BUMP_RND_STATS(rs_rndcOut, len); 978 979 while (len > 0) { 980 *ptr = rndpool[findex]; 981 ptr++; len--; 982 findex = (findex + 1) & (RNDPOOLSIZE - 1); 983 rnbyte_cnt--; 984 } 985 } 986 987 /* Random number exported entry points */ 988 989 /* 990 * Mix the supplied bytes into the entropy pool of a kCF 991 * RNG provider. 992 */ 993 int 994 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 995 { 996 if (len < 1) 997 return (-1); 998 999 rngprov_seed(ptr, len, entropy_est, 0); 1000 1001 return (0); 1002 } 1003 1004 /* 1005 * Mix the supplied bytes into the entropy pool of a kCF 1006 * RNG provider. Mix immediately. 1007 */ 1008 int 1009 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1010 { 1011 if (len < 1) 1012 return (-1); 1013 1014 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1015 1016 return (0); 1017 } 1018 1019 /* 1020 * Get bytes from the /dev/urandom generator. This function 1021 * always succeeds. Returns 0. 1022 */ 1023 int 1024 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1025 { 1026 ASSERT(!mutex_owned(&rndpool_lock)); 1027 1028 if (len < 1) 1029 return (0); 1030 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1031 } 1032 1033 /* 1034 * Get bytes from the /dev/random generator. Returns 0 1035 * on success. Returns EAGAIN if there is insufficient entropy. 1036 */ 1037 int 1038 random_get_bytes(uint8_t *ptr, size_t len) 1039 { 1040 ASSERT(!mutex_owned(&rndpool_lock)); 1041 1042 if (len < 1) 1043 return (0); 1044 return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 1045 } 1046