1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file implements the interfaces that the /dev/random 28 * driver uses for read(2), write(2) and poll(2) on /dev/random or 29 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 30 * random_add_pseudo_entropy(), random_get_pseudo_bytes() 31 * and random_get_bytes(). 32 * 33 * We periodically collect random bits from providers which are registered 34 * with the Kernel Cryptographic Framework (kCF) as capable of random 35 * number generation. The random bits are maintained in a cache and 36 * it is used for high quality random numbers (/dev/random) requests. 37 * We pick a provider and call its SPI routine, if the cache does not have 38 * enough bytes to satisfy a request. 39 * 40 * /dev/urandom requests use a software-based generator algorithm that uses the 41 * random bits in the cache as a seed. We create one pseudo-random generator 42 * (for /dev/urandom) per possible CPU on the system, and use it, 43 * kmem-magazine-style, to avoid cache line contention. 44 * 45 * LOCKING HIERARCHY: 46 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 47 * 2) rndpool_lock protects the high-quality randomness pool. 48 * It may be locked while a rmp->rm_mag.rm_lock is held. 49 * 50 * A history note: The kernel API and the software-based algorithms in this 51 * file used to be part of the /dev/random driver. 52 */ 53 54 #include <sys/types.h> 55 #include <sys/conf.h> 56 #include <sys/sunddi.h> 57 #include <sys/disp.h> 58 #include <sys/modctl.h> 59 #include <sys/ddi.h> 60 #include <sys/crypto/common.h> 61 #include <sys/crypto/api.h> 62 #include <sys/crypto/impl.h> 63 #include <sys/crypto/sched_impl.h> 64 #include <sys/random.h> 65 #include <sys/sha1.h> 66 #include <sys/time.h> 67 #include <sys/sysmacros.h> 68 #include <sys/cpuvar.h> 69 #include <sys/taskq.h> 70 #include <rng/fips_random.h> 71 72 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 73 #define MINEXTRACTBYTES 20 74 #define MAXEXTRACTBYTES 1024 75 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 76 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 77 78 typedef enum extract_type { 79 NONBLOCK_EXTRACT, 80 BLOCKING_EXTRACT, 81 ALWAYS_EXTRACT 82 } extract_type_t; 83 84 /* 85 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 86 * routines directly instead of using k-API because we can't return any 87 * error code in /dev/urandom case and we can get an error using k-API 88 * if a mechanism is disabled. 89 */ 90 #define HASHSIZE 20 91 #define HASH_CTX SHA1_CTX 92 #define HashInit(ctx) SHA1Init((ctx)) 93 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 94 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 95 96 /* HMAC-SHA1 */ 97 #define HMAC_KEYSIZE 20 98 99 /* 100 * Cache of random bytes implemented as a circular buffer. findex and rindex 101 * track the front and back of the circular buffer. 102 */ 103 uint8_t rndpool[RNDPOOLSIZE]; 104 static int findex, rindex; 105 static int rnbyte_cnt; /* Number of bytes in the cache */ 106 107 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 108 /* and the global variables */ 109 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 110 static int num_waiters; /* #threads waiting to read from /dev/random */ 111 112 static struct pollhead rnd_pollhead; 113 static timeout_id_t kcf_rndtimeout_id; 114 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 115 rnd_stats_t rnd_stats; 116 static boolean_t rng_prov_found = B_TRUE; 117 static boolean_t rng_ok_to_log = B_TRUE; 118 119 static void rndc_addbytes(uint8_t *, size_t); 120 static void rndc_getbytes(uint8_t *ptr, size_t len); 121 static void rnd_handler(void *); 122 static void rnd_alloc_magazines(); 123 124 void 125 kcf_rnd_init() 126 { 127 hrtime_t ts; 128 time_t now; 129 130 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 131 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 132 133 /* 134 * Add bytes to the cache using 135 * . 2 unpredictable times: high resolution time since the boot-time, 136 * and the current time-of-the day. 137 * This is used only to make the timeout value in the timer 138 * unpredictable. 139 */ 140 ts = gethrtime(); 141 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 142 143 (void) drv_getparm(TIME, &now); 144 rndc_addbytes((uint8_t *)&now, sizeof (now)); 145 146 rnbyte_cnt = 0; 147 findex = rindex = 0; 148 num_waiters = 0; 149 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 150 151 rnd_alloc_magazines(); 152 } 153 154 /* 155 * Return TRUE if at least one provider exists that can 156 * supply random numbers. 157 */ 158 boolean_t 159 kcf_rngprov_check(void) 160 { 161 int rv; 162 kcf_provider_desc_t *pd; 163 164 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 165 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 166 KCF_PROV_REFRELE(pd); 167 /* 168 * We logged a warning once about no provider being available 169 * and now a provider became available. So, set the flag so 170 * that we can log again if the problem recurs. 171 */ 172 rng_ok_to_log = B_TRUE; 173 rng_prov_found = B_TRUE; 174 return (B_TRUE); 175 } else { 176 rng_prov_found = B_FALSE; 177 return (B_FALSE); 178 } 179 } 180 181 /* 182 * Pick a software-based provider and submit a request to seed 183 * its random number generator. 184 */ 185 static void 186 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 187 { 188 kcf_provider_desc_t *pd = NULL; 189 190 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 191 CRYPTO_SUCCESS) { 192 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 193 entropy_est, flags, NULL); 194 KCF_PROV_REFRELE(pd); 195 } 196 } 197 198 /* Boot-time tunable for experimentation. */ 199 int kcf_limit_hwrng = 1; 200 201 202 /* 203 * This routine is called for blocking reads. 204 * 205 * The argument from_user_api indicates whether the caller is 206 * from userland coming via the /dev/random driver. 207 * 208 * The argument is_taskq_thr indicates whether the caller is 209 * the taskq thread dispatched by the timeout handler routine. 210 * In this case, we cycle through all the providers 211 * submitting a request to each provider to generate random numbers. 212 * 213 * For other cases, we pick a provider and submit a request to generate 214 * random numbers. We retry using another provider if we get an error. 215 * 216 * Returns the number of bytes that are written to 'ptr'. Returns -1 217 * if no provider is found. ptr and need are unchanged. 218 */ 219 static int 220 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 221 boolean_t is_taskq_thr) 222 { 223 int rv; 224 int prov_cnt = 0; 225 int total_bytes = 0; 226 kcf_provider_desc_t *pd; 227 kcf_req_params_t params; 228 kcf_prov_tried_t *list = NULL; 229 230 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 231 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 232 233 prov_cnt++; 234 /* 235 * Typically a hardware RNG is a multi-purpose 236 * crypto card and hence we do not want to overload the card 237 * just for random numbers. The following check is to prevent 238 * a user process from hogging the hardware RNG. Note that we 239 * still use the hardware RNG from the periodically run 240 * taskq thread. 241 */ 242 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 243 kcf_limit_hwrng == 1) { 244 ASSERT(is_taskq_thr == B_FALSE); 245 goto try_next; 246 } 247 248 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 249 pd->pd_sid, ptr, need, 0, 0); 250 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 251 ASSERT(rv != CRYPTO_QUEUED); 252 253 if (rv == CRYPTO_SUCCESS) { 254 total_bytes += need; 255 if (is_taskq_thr) 256 rndc_addbytes(ptr, need); 257 else { 258 KCF_PROV_REFRELE(pd); 259 break; 260 } 261 } 262 263 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 264 try_next: 265 /* Add pd to the linked list of providers tried. */ 266 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 267 KCF_PROV_REFRELE(pd); 268 break; 269 } 270 } 271 272 } 273 274 if (list != NULL) 275 kcf_free_triedlist(list); 276 277 if (prov_cnt == 0) { /* no provider could be found. */ 278 rng_prov_found = B_FALSE; 279 return (-1); 280 } else { 281 rng_prov_found = B_TRUE; 282 /* See comments in kcf_rngprov_check() */ 283 rng_ok_to_log = B_TRUE; 284 } 285 286 return (total_bytes); 287 } 288 289 static void 290 notify_done(void *arg, int rv) 291 { 292 uchar_t *rndbuf = arg; 293 294 if (rv == CRYPTO_SUCCESS) 295 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 296 297 bzero(rndbuf, MINEXTRACTBYTES); 298 kmem_free(rndbuf, MINEXTRACTBYTES); 299 } 300 301 /* 302 * Cycle through all the providers submitting a request to each provider 303 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 304 * and ALWAYS_EXTRACT. 305 * 306 * Returns the number of bytes that are written to 'ptr'. Returns -1 307 * if no provider is found. ptr and len are unchanged. 308 */ 309 static int 310 rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 311 { 312 int rv, blen, total_bytes; 313 uchar_t *rndbuf; 314 kcf_provider_desc_t *pd; 315 kcf_req_params_t params; 316 crypto_call_req_t req; 317 kcf_prov_tried_t *list = NULL; 318 int prov_cnt = 0; 319 320 blen = 0; 321 total_bytes = 0; 322 req.cr_flag = CRYPTO_SKIP_REQID; 323 req.cr_callback_func = notify_done; 324 325 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 326 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 327 328 prov_cnt ++; 329 switch (pd->pd_prov_type) { 330 case CRYPTO_HW_PROVIDER: 331 /* See comments in rngprov_getbytes() */ 332 if (from_user_api && kcf_limit_hwrng == 1) 333 goto try_next; 334 335 /* 336 * We have to allocate a buffer here as we can not 337 * assume that the input buffer will remain valid 338 * when the callback comes. We use a fixed size buffer 339 * to simplify the book keeping. 340 */ 341 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 342 if (rndbuf == NULL) { 343 KCF_PROV_REFRELE(pd); 344 if (list != NULL) 345 kcf_free_triedlist(list); 346 return (total_bytes); 347 } 348 req.cr_callback_arg = rndbuf; 349 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 350 KCF_OP_RANDOM_GENERATE, 351 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 352 break; 353 354 case CRYPTO_SW_PROVIDER: 355 /* 356 * We do not need to allocate a buffer in the software 357 * provider case as there is no callback involved. We 358 * avoid any extra data copy by directly passing 'ptr'. 359 */ 360 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 361 KCF_OP_RANDOM_GENERATE, 362 pd->pd_sid, ptr, len, 0, 0); 363 break; 364 } 365 366 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 367 if (rv == CRYPTO_SUCCESS) { 368 switch (pd->pd_prov_type) { 369 case CRYPTO_HW_PROVIDER: 370 /* 371 * Since we have the input buffer handy, 372 * we directly copy to it rather than 373 * adding to the pool. 374 */ 375 blen = min(MINEXTRACTBYTES, len); 376 bcopy(rndbuf, ptr, blen); 377 if (len < MINEXTRACTBYTES) 378 rndc_addbytes(rndbuf + len, 379 MINEXTRACTBYTES - len); 380 ptr += blen; 381 len -= blen; 382 total_bytes += blen; 383 break; 384 385 case CRYPTO_SW_PROVIDER: 386 total_bytes += len; 387 len = 0; 388 break; 389 } 390 } 391 392 /* 393 * We free the buffer in the callback routine 394 * for the CRYPTO_QUEUED case. 395 */ 396 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 397 rv != CRYPTO_QUEUED) { 398 bzero(rndbuf, MINEXTRACTBYTES); 399 kmem_free(rndbuf, MINEXTRACTBYTES); 400 } 401 402 if (len == 0) { 403 KCF_PROV_REFRELE(pd); 404 break; 405 } 406 407 if (rv != CRYPTO_SUCCESS) { 408 try_next: 409 /* Add pd to the linked list of providers tried. */ 410 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 411 NULL) { 412 KCF_PROV_REFRELE(pd); 413 break; 414 } 415 } 416 } 417 418 if (list != NULL) { 419 kcf_free_triedlist(list); 420 } 421 422 if (prov_cnt == 0) { /* no provider could be found. */ 423 rng_prov_found = B_FALSE; 424 return (-1); 425 } else { 426 rng_prov_found = B_TRUE; 427 /* See comments in kcf_rngprov_check() */ 428 rng_ok_to_log = B_TRUE; 429 } 430 431 return (total_bytes); 432 } 433 434 static void 435 rngprov_task(void *arg) 436 { 437 int len = (int)(uintptr_t)arg; 438 uchar_t tbuf[MAXEXTRACTBYTES]; 439 440 ASSERT(len <= MAXEXTRACTBYTES); 441 (void) rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE); 442 } 443 444 /* 445 * Returns "len" random or pseudo-random bytes in *ptr. 446 * Will block if not enough random bytes are available and the 447 * call is blocking. 448 * 449 * Called with rndpool_lock held (allowing caller to do optimistic locking; 450 * releases the lock before return). 451 */ 452 static int 453 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 454 boolean_t from_user_api) 455 { 456 int bytes; 457 size_t got; 458 459 ASSERT(mutex_owned(&rndpool_lock)); 460 /* 461 * Check if the request can be satisfied from the cache 462 * of random bytes. 463 */ 464 if (len <= rnbyte_cnt) { 465 rndc_getbytes(ptr, len); 466 mutex_exit(&rndpool_lock); 467 return (0); 468 } 469 mutex_exit(&rndpool_lock); 470 471 switch (how) { 472 case BLOCKING_EXTRACT: 473 if ((got = rngprov_getbytes(ptr, len, from_user_api, 474 B_FALSE)) == -1) 475 break; /* No provider found */ 476 477 if (got == len) 478 return (0); 479 len -= got; 480 ptr += got; 481 break; 482 483 case NONBLOCK_EXTRACT: 484 case ALWAYS_EXTRACT: 485 if ((got = rngprov_getbytes_nblk(ptr, len, 486 from_user_api)) == -1) { 487 /* No provider found */ 488 if (how == NONBLOCK_EXTRACT) { 489 return (EAGAIN); 490 } 491 } else { 492 if (got == len) 493 return (0); 494 len -= got; 495 ptr += got; 496 } 497 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 498 return (EAGAIN); 499 break; 500 } 501 502 mutex_enter(&rndpool_lock); 503 while (len > 0) { 504 if (how == BLOCKING_EXTRACT) { 505 /* Check if there is enough */ 506 while (rnbyte_cnt < MINEXTRACTBYTES) { 507 num_waiters++; 508 if (cv_wait_sig(&rndpool_read_cv, 509 &rndpool_lock) == 0) { 510 num_waiters--; 511 mutex_exit(&rndpool_lock); 512 return (EINTR); 513 } 514 num_waiters--; 515 } 516 } 517 518 /* Figure out how many bytes to extract */ 519 bytes = min(len, rnbyte_cnt); 520 rndc_getbytes(ptr, bytes); 521 522 len -= bytes; 523 ptr += bytes; 524 525 if (len > 0 && how == ALWAYS_EXTRACT) { 526 /* 527 * There are not enough bytes, but we can not block. 528 * This only happens in the case of /dev/urandom which 529 * runs an additional generation algorithm. So, there 530 * is no problem. 531 */ 532 while (len > 0) { 533 *ptr = rndpool[findex]; 534 ptr++; len--; 535 rindex = findex = (findex + 1) & 536 (RNDPOOLSIZE - 1); 537 } 538 break; 539 } 540 } 541 542 mutex_exit(&rndpool_lock); 543 return (0); 544 } 545 546 int 547 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 548 boolean_t from_user_api) 549 { 550 extract_type_t how; 551 int error; 552 553 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 554 mutex_enter(&rndpool_lock); 555 if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 556 return (error); 557 558 BUMP_RND_STATS(rs_rndOut, len); 559 return (0); 560 } 561 562 /* 563 * Revisit this if the structs grow or we come up with a better way 564 * of cache-line-padding structures. 565 */ 566 #define RND_CPU_CACHE_SIZE 64 567 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 568 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 569 sizeof (rndmag_t)) 570 /* 571 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 572 * a per-CPU instance of the pseudo-random generator. We have it much easier 573 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 574 * 575 * Note that this usage is preemption-safe; a thread 576 * entering a critical section remembers which generator it locked 577 * and unlocks the same one; should it be preempted and wind up running on 578 * a different CPU, there will be a brief period of increased contention 579 * before it exits the critical section but nothing will melt. 580 */ 581 typedef struct rndmag_s 582 { 583 kmutex_t rm_lock; 584 uint8_t *rm_buffer; /* Start of buffer */ 585 uint8_t *rm_eptr; /* End of buffer */ 586 uint8_t *rm_rptr; /* Current read pointer */ 587 uint32_t rm_oblocks; /* time to rekey? */ 588 uint32_t rm_ofuzz; /* Rekey backoff state */ 589 uint32_t rm_olimit; /* Hard rekey limit */ 590 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 591 uint32_t rm_key[SHA1WORDS]; /* FIPS XKEY */ 592 uint32_t rm_seed[SHA1WORDS]; /* seed for rekey */ 593 uint32_t rm_previous[SHA1WORDS]; /* previous random bytes */ 594 } rndmag_t; 595 596 typedef struct rndmag_pad_s 597 { 598 rndmag_t rm_mag; 599 uint8_t rm_pad[RND_CPU_PAD]; 600 } rndmag_pad_t; 601 602 /* 603 * Generate random bytes for /dev/urandom by applying the 604 * FIPS 186-2 algorithm with a key created from bytes extracted 605 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 606 * is generated before a new key is obtained. 607 * 608 * Note that callers to this routine are likely to assume it can't fail. 609 * 610 * Called with rmp locked; releases lock. 611 */ 612 static int 613 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 614 { 615 size_t bytes = len; 616 int nblock, size; 617 uint32_t oblocks; 618 uint32_t tempout[SHA1WORDS]; 619 uint32_t seed[SHA1WORDS]; 620 int i; 621 hrtime_t timestamp; 622 uint8_t *src, *dst; 623 624 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 625 626 /* Nothing is being asked */ 627 if (len == 0) { 628 mutex_exit(&rmp->rm_mag.rm_lock); 629 return (0); 630 } 631 632 nblock = howmany(len, HASHSIZE); 633 634 rmp->rm_mag.rm_oblocks += nblock; 635 oblocks = rmp->rm_mag.rm_oblocks; 636 637 do { 638 if (oblocks >= rmp->rm_mag.rm_olimit) { 639 640 /* 641 * Contention-avoiding rekey: see if 642 * the pool is locked, and if so, wait a bit. 643 * Do an 'exponential back-in' to ensure we don't 644 * run too long without rekey. 645 */ 646 if (rmp->rm_mag.rm_ofuzz) { 647 /* 648 * Decaying exponential back-in for rekey. 649 */ 650 if ((rnbyte_cnt < MINEXTRACTBYTES) || 651 (!mutex_tryenter(&rndpool_lock))) { 652 rmp->rm_mag.rm_olimit += 653 rmp->rm_mag.rm_ofuzz; 654 rmp->rm_mag.rm_ofuzz >>= 1; 655 goto punt; 656 } 657 } else { 658 mutex_enter(&rndpool_lock); 659 } 660 661 /* Get a new chunk of entropy */ 662 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 663 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 664 665 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 666 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 667 oblocks = 0; 668 rmp->rm_mag.rm_oblocks = nblock; 669 } 670 punt: 671 timestamp = gethrtime(); 672 673 src = (uint8_t *)×tamp; 674 dst = (uint8_t *)rmp->rm_mag.rm_seed; 675 676 for (i = 0; i < HASHSIZE; i++) { 677 dst[i] ^= src[i % sizeof (timestamp)]; 678 } 679 680 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 681 682 fips_random_inner(rmp->rm_mag.rm_key, tempout, 683 seed); 684 685 if (bytes >= HASHSIZE) { 686 size = HASHSIZE; 687 } else { 688 size = min(bytes, HASHSIZE); 689 } 690 691 /* 692 * FIPS 140-2: Continuous RNG test - each generation 693 * of an n-bit block shall be compared with the previously 694 * generated block. Test shall fail if any two compared 695 * n-bit blocks are equal. 696 */ 697 for (i = 0; i < size/BYTES_IN_WORD; i++) { 698 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 699 break; 700 } 701 if (i == size/BYTES_IN_WORD) 702 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 703 "block random bytes are same as the previous " 704 "one.\n"); 705 706 bcopy(tempout, rmp->rm_mag.rm_previous, 707 HASHSIZE); 708 709 bcopy(tempout, ptr, size); 710 ptr += size; 711 bytes -= size; 712 oblocks++; 713 nblock--; 714 } while (bytes > 0); 715 716 /* Zero out sensitive information */ 717 bzero(seed, HASHSIZE); 718 bzero(tempout, HASHSIZE); 719 mutex_exit(&rmp->rm_mag.rm_lock); 720 return (0); 721 } 722 723 /* 724 * Per-CPU Random magazines. 725 */ 726 static rndmag_pad_t *rndmag; 727 static uint8_t *rndbuf; 728 static size_t rndmag_total; 729 /* 730 * common/os/cpu.c says that platform support code can shrinkwrap 731 * max_ncpus. On the off chance that we get loaded very early, we 732 * read it exactly once, to copy it here. 733 */ 734 static uint32_t random_max_ncpus = 0; 735 736 /* 737 * Boot-time tunables, for experimentation. 738 */ 739 size_t rndmag_threshold = 2560; 740 size_t rndbuf_len = 5120; 741 size_t rndmag_size = 1280; 742 743 744 int 745 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 746 { 747 rndmag_pad_t *rmp; 748 uint8_t *cptr, *eptr; 749 750 /* 751 * Anyone who asks for zero bytes of randomness should get slapped. 752 */ 753 ASSERT(len > 0); 754 755 /* 756 * Fast path. 757 */ 758 for (;;) { 759 rmp = &rndmag[CPU->cpu_seqid]; 760 mutex_enter(&rmp->rm_mag.rm_lock); 761 762 /* 763 * Big requests bypass buffer and tail-call the 764 * generate routine directly. 765 */ 766 if (len > rndmag_threshold) { 767 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 768 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 769 } 770 771 cptr = rmp->rm_mag.rm_rptr; 772 eptr = cptr + len; 773 774 if (eptr <= rmp->rm_mag.rm_eptr) { 775 rmp->rm_mag.rm_rptr = eptr; 776 bcopy(cptr, ptr, len); 777 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 778 mutex_exit(&rmp->rm_mag.rm_lock); 779 780 return (0); 781 } 782 /* 783 * End fast path. 784 */ 785 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 786 /* 787 * Note: We assume the generate routine always succeeds 788 * in this case (because it does at present..) 789 * It also always releases rm_lock. 790 */ 791 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 792 rndbuf_len); 793 } 794 } 795 796 /* 797 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 798 * little memory on big systems that don't have the full set installed. 799 * See above; "empty" means "rptr equal to eptr"; this will trigger the 800 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 801 * 802 * TODO: make rndmag_size tunable at run time! 803 */ 804 static void 805 rnd_alloc_magazines() 806 { 807 rndmag_pad_t *rmp; 808 int i; 809 uint8_t discard_buf[HASHSIZE]; 810 811 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 812 if (rndmag_size < rndbuf_len) 813 rndmag_size = rndbuf_len; 814 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 815 816 random_max_ncpus = max_ncpus; 817 rndmag_total = rndmag_size * random_max_ncpus; 818 819 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 820 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 821 KM_SLEEP); 822 823 for (i = 0; i < random_max_ncpus; i++) { 824 uint8_t *buf; 825 826 rmp = &rndmag[i]; 827 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 828 829 buf = rndbuf + i * rndmag_size; 830 831 rmp->rm_mag.rm_buffer = buf; 832 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 833 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 834 rmp->rm_mag.rm_oblocks = 1; 835 836 mutex_enter(&rndpool_lock); 837 /* 838 * FIPS 140-2: the first n-bit (n > 15) block generated 839 * after power-up, initialization, or reset shall not 840 * be used, but shall be saved for comparison. 841 */ 842 (void) rnd_get_bytes(discard_buf, 843 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 844 bcopy(discard_buf, rmp->rm_mag.rm_previous, 845 HMAC_KEYSIZE); 846 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 847 mutex_enter(&rndpool_lock); 848 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 849 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 850 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 851 mutex_enter(&rndpool_lock); 852 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 853 HMAC_KEYSIZE, ALWAYS_EXTRACT, B_FALSE); 854 } 855 } 856 857 void 858 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 859 { 860 clock_t ut; /* time in microseconds */ 861 862 if (do_mech2id) 863 rngmech_type = crypto_mech2id(SUN_RANDOM); 864 865 /* 866 * The new timeout value is taken from the buffer of random bytes. 867 * We're merely reading the first 32 bits from the buffer here, not 868 * consuming any random bytes. 869 * The timeout multiplier value is a random value between 0.5 sec and 870 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 871 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 872 */ 873 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 874 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 875 TIMEOUT_INTERVAL * drv_usectohz(ut)); 876 } 877 878 /* 879 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 880 * will block. When enough random bytes are available, later, the timeout 881 * handler routine will issue the pollwakeup() calls. 882 */ 883 void 884 kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 885 { 886 /* 887 * Sampling of rnbyte_cnt is an atomic 888 * operation. Hence we do not need any locking. 889 */ 890 if (rnbyte_cnt >= MINEXTRACTBYTES) { 891 *reventsp |= (POLLIN | POLLRDNORM); 892 } else { 893 *reventsp = 0; 894 if (!anyyet) 895 *phpp = &rnd_pollhead; 896 } 897 } 898 899 /*ARGSUSED*/ 900 static void 901 rnd_handler(void *arg) 902 { 903 int len = 0; 904 905 if (!rng_prov_found && rng_ok_to_log) { 906 cmn_err(CE_WARN, "No randomness provider enabled for " 907 "/dev/random. Use cryptoadm(1M) to enable a provider."); 908 rng_ok_to_log = B_FALSE; 909 } 910 911 if (num_waiters > 0) 912 len = MAXEXTRACTBYTES; 913 else if (rnbyte_cnt < RNDPOOLSIZE) 914 len = MINEXTRACTBYTES; 915 916 if (len > 0) { 917 (void) taskq_dispatch(system_taskq, rngprov_task, 918 (void *)(uintptr_t)len, TQ_NOSLEEP); 919 } 920 921 mutex_enter(&rndpool_lock); 922 /* 923 * Wake up threads waiting in poll() or for enough accumulated 924 * random bytes to read from /dev/random. In case a poll() is 925 * concurrent with a read(), the polling process may be woken up 926 * indicating that enough randomness is now available for reading, 927 * and another process *steals* the bits from the pool, causing the 928 * subsequent read() from the first process to block. It is acceptable 929 * since the blocking will eventually end, after the timeout 930 * has expired enough times to honor the read. 931 * 932 * Note - Since we hold the rndpool_lock across the pollwakeup() call 933 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 934 */ 935 if (rnbyte_cnt >= MINEXTRACTBYTES) 936 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 937 938 if (num_waiters > 0) 939 cv_broadcast(&rndpool_read_cv); 940 mutex_exit(&rndpool_lock); 941 942 kcf_rnd_schedule_timeout(B_FALSE); 943 } 944 945 static void 946 rndc_addbytes(uint8_t *ptr, size_t len) 947 { 948 ASSERT(ptr != NULL && len > 0); 949 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 950 951 mutex_enter(&rndpool_lock); 952 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 953 rndpool[rindex] ^= *ptr; 954 ptr++; len--; 955 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 956 rnbyte_cnt++; 957 } 958 959 /* Handle buffer full case */ 960 while (len > 0) { 961 rndpool[rindex] ^= *ptr; 962 ptr++; len--; 963 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 964 } 965 mutex_exit(&rndpool_lock); 966 } 967 968 /* 969 * Caller should check len <= rnbyte_cnt under the 970 * rndpool_lock before calling. 971 */ 972 static void 973 rndc_getbytes(uint8_t *ptr, size_t len) 974 { 975 ASSERT(MUTEX_HELD(&rndpool_lock)); 976 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 977 978 BUMP_RND_STATS(rs_rndcOut, len); 979 980 while (len > 0) { 981 *ptr = rndpool[findex]; 982 ptr++; len--; 983 findex = (findex + 1) & (RNDPOOLSIZE - 1); 984 rnbyte_cnt--; 985 } 986 } 987 988 /* Random number exported entry points */ 989 990 /* 991 * Mix the supplied bytes into the entropy pool of a kCF 992 * RNG provider. 993 */ 994 int 995 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 996 { 997 if (len < 1) 998 return (-1); 999 1000 rngprov_seed(ptr, len, entropy_est, 0); 1001 1002 return (0); 1003 } 1004 1005 /* 1006 * Mix the supplied bytes into the entropy pool of a kCF 1007 * RNG provider. Mix immediately. 1008 */ 1009 int 1010 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1011 { 1012 if (len < 1) 1013 return (-1); 1014 1015 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1016 1017 return (0); 1018 } 1019 1020 /* 1021 * Get bytes from the /dev/urandom generator. This function 1022 * always succeeds. Returns 0. 1023 */ 1024 int 1025 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1026 { 1027 ASSERT(!mutex_owned(&rndpool_lock)); 1028 1029 if (len < 1) 1030 return (0); 1031 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1032 } 1033 1034 /* 1035 * Get bytes from the /dev/random generator. Returns 0 1036 * on success. Returns EAGAIN if there is insufficient entropy. 1037 */ 1038 int 1039 random_get_bytes(uint8_t *ptr, size_t len) 1040 { 1041 ASSERT(!mutex_owned(&rndpool_lock)); 1042 1043 if (len < 1) 1044 return (0); 1045 return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 1046 } 1047