1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file implements the interfaces that the /dev/random 28 * driver uses for read(2), write(2) and poll(2) on /dev/random or 29 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 30 * random_add_pseudo_entropy(), random_get_pseudo_bytes() 31 * and random_get_bytes(). 32 * 33 * We periodically collect random bits from providers which are registered 34 * with the Kernel Cryptographic Framework (kCF) as capable of random 35 * number generation. The random bits are maintained in a cache and 36 * it is used for high quality random numbers (/dev/random) requests. 37 * We pick a provider and call its SPI routine, if the cache does not have 38 * enough bytes to satisfy a request. 39 * 40 * /dev/urandom requests use a software-based generator algorithm that uses the 41 * random bits in the cache as a seed. We create one pseudo-random generator 42 * (for /dev/urandom) per possible CPU on the system, and use it, 43 * kmem-magazine-style, to avoid cache line contention. 44 * 45 * LOCKING HIERARCHY: 46 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 47 * 2) rndpool_lock protects the high-quality randomness pool. 48 * It may be locked while a rmp->rm_mag.rm_lock is held. 49 * 50 * A history note: The kernel API and the software-based algorithms in this 51 * file used to be part of the /dev/random driver. 52 */ 53 54 #include <sys/types.h> 55 #include <sys/conf.h> 56 #include <sys/sunddi.h> 57 #include <sys/disp.h> 58 #include <sys/modctl.h> 59 #include <sys/ddi.h> 60 #include <sys/crypto/common.h> 61 #include <sys/crypto/api.h> 62 #include <sys/crypto/impl.h> 63 #include <sys/crypto/sched_impl.h> 64 #include <sys/random.h> 65 #include <sys/sha1.h> 66 #include <sys/time.h> 67 #include <sys/sysmacros.h> 68 #include <sys/cpuvar.h> 69 #include <sys/taskq.h> 70 #include <rng/fips_random.h> 71 72 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 73 #define MINEXTRACTBYTES 20 74 #define MAXEXTRACTBYTES 1024 75 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 76 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 77 78 typedef enum extract_type { 79 NONBLOCK_EXTRACT, 80 BLOCKING_EXTRACT, 81 ALWAYS_EXTRACT 82 } extract_type_t; 83 84 /* 85 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 86 * routines directly instead of using k-API because we can't return any 87 * error code in /dev/urandom case and we can get an error using k-API 88 * if a mechanism is disabled. 89 */ 90 #define HASHSIZE 20 91 #define HASH_CTX SHA1_CTX 92 #define HashInit(ctx) SHA1Init((ctx)) 93 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 94 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 95 96 /* HMAC-SHA1 */ 97 #define HMAC_KEYSIZE 20 98 99 /* 100 * Cache of random bytes implemented as a circular buffer. findex and rindex 101 * track the front and back of the circular buffer. 102 */ 103 uint8_t rndpool[RNDPOOLSIZE]; 104 static int findex, rindex; 105 static int rnbyte_cnt; /* Number of bytes in the cache */ 106 107 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 108 /* and the global variables */ 109 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 110 static int num_waiters; /* #threads waiting to read from /dev/random */ 111 112 static struct pollhead rnd_pollhead; 113 static timeout_id_t kcf_rndtimeout_id; 114 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 115 rnd_stats_t rnd_stats; 116 static boolean_t rng_prov_found = B_TRUE; 117 static boolean_t rng_ok_to_log = B_TRUE; 118 119 static void rndc_addbytes(uint8_t *, size_t); 120 static void rndc_getbytes(uint8_t *ptr, size_t len); 121 static void rnd_handler(void *); 122 static void rnd_alloc_magazines(); 123 124 void 125 kcf_rnd_init() 126 { 127 hrtime_t ts; 128 time_t now; 129 130 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 131 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 132 133 /* 134 * Add bytes to the cache using 135 * . 2 unpredictable times: high resolution time since the boot-time, 136 * and the current time-of-the day. 137 * This is used only to make the timeout value in the timer 138 * unpredictable. 139 */ 140 ts = gethrtime(); 141 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 142 143 (void) drv_getparm(TIME, &now); 144 rndc_addbytes((uint8_t *)&now, sizeof (now)); 145 146 rnbyte_cnt = 0; 147 findex = rindex = 0; 148 num_waiters = 0; 149 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 150 151 rnd_alloc_magazines(); 152 } 153 154 /* 155 * Return TRUE if at least one provider exists that can 156 * supply random numbers. 157 */ 158 boolean_t 159 kcf_rngprov_check(void) 160 { 161 int rv; 162 kcf_provider_desc_t *pd; 163 164 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 165 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 166 KCF_PROV_REFRELE(pd); 167 /* 168 * We logged a warning once about no provider being available 169 * and now a provider became available. So, set the flag so 170 * that we can log again if the problem recurs. 171 */ 172 rng_ok_to_log = B_TRUE; 173 rng_prov_found = B_TRUE; 174 return (B_TRUE); 175 } else { 176 rng_prov_found = B_FALSE; 177 return (B_FALSE); 178 } 179 } 180 181 /* 182 * Pick a software-based provider and submit a request to seed 183 * its random number generator. 184 */ 185 static void 186 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 187 { 188 kcf_provider_desc_t *pd = NULL; 189 190 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 191 CRYPTO_SUCCESS) { 192 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 193 entropy_est, flags, NULL); 194 KCF_PROV_REFRELE(pd); 195 } 196 } 197 198 /* 199 * This routine is called for blocking reads. 200 * 201 * The argument is_taskq_thr indicates whether the caller is 202 * the taskq thread dispatched by the timeout handler routine. 203 * In this case, we cycle through all the providers 204 * submitting a request to each provider to generate random numbers. 205 * 206 * For other cases, we pick a provider and submit a request to generate 207 * random numbers. We retry using another provider if we get an error. 208 * 209 * Returns the number of bytes that are written to 'ptr'. Returns -1 210 * if no provider is found. ptr and need are unchanged. 211 */ 212 static int 213 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr) 214 { 215 int rv; 216 int prov_cnt = 0; 217 int total_bytes = 0; 218 kcf_provider_desc_t *pd; 219 kcf_req_params_t params; 220 kcf_prov_tried_t *list = NULL; 221 222 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 223 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 224 225 prov_cnt++; 226 227 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 228 pd->pd_sid, ptr, need, 0, 0); 229 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 230 ASSERT(rv != CRYPTO_QUEUED); 231 232 if (rv == CRYPTO_SUCCESS) { 233 total_bytes += need; 234 if (is_taskq_thr) 235 rndc_addbytes(ptr, need); 236 else { 237 KCF_PROV_REFRELE(pd); 238 break; 239 } 240 } 241 242 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 243 /* Add pd to the linked list of providers tried. */ 244 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 245 KCF_PROV_REFRELE(pd); 246 break; 247 } 248 } 249 250 } 251 252 if (list != NULL) 253 kcf_free_triedlist(list); 254 255 if (prov_cnt == 0) { /* no provider could be found. */ 256 rng_prov_found = B_FALSE; 257 return (-1); 258 } else { 259 rng_prov_found = B_TRUE; 260 /* See comments in kcf_rngprov_check() */ 261 rng_ok_to_log = B_TRUE; 262 } 263 264 return (total_bytes); 265 } 266 267 static void 268 notify_done(void *arg, int rv) 269 { 270 uchar_t *rndbuf = arg; 271 272 if (rv == CRYPTO_SUCCESS) 273 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 274 275 bzero(rndbuf, MINEXTRACTBYTES); 276 kmem_free(rndbuf, MINEXTRACTBYTES); 277 } 278 279 /* 280 * Cycle through all the providers submitting a request to each provider 281 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 282 * and ALWAYS_EXTRACT. 283 * 284 * Returns the number of bytes that are written to 'ptr'. Returns -1 285 * if no provider is found. ptr and len are unchanged. 286 */ 287 static int 288 rngprov_getbytes_nblk(uint8_t *ptr, size_t len) 289 { 290 int rv, blen, total_bytes; 291 uchar_t *rndbuf; 292 kcf_provider_desc_t *pd; 293 kcf_req_params_t params; 294 crypto_call_req_t req; 295 kcf_prov_tried_t *list = NULL; 296 int prov_cnt = 0; 297 298 blen = 0; 299 total_bytes = 0; 300 req.cr_flag = CRYPTO_SKIP_REQID; 301 req.cr_callback_func = notify_done; 302 303 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 304 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 305 306 prov_cnt ++; 307 switch (pd->pd_prov_type) { 308 case CRYPTO_HW_PROVIDER: 309 /* 310 * We have to allocate a buffer here as we can not 311 * assume that the input buffer will remain valid 312 * when the callback comes. We use a fixed size buffer 313 * to simplify the book keeping. 314 */ 315 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 316 if (rndbuf == NULL) { 317 KCF_PROV_REFRELE(pd); 318 if (list != NULL) 319 kcf_free_triedlist(list); 320 return (total_bytes); 321 } 322 req.cr_callback_arg = rndbuf; 323 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 324 KCF_OP_RANDOM_GENERATE, 325 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 326 break; 327 328 case CRYPTO_SW_PROVIDER: 329 /* 330 * We do not need to allocate a buffer in the software 331 * provider case as there is no callback involved. We 332 * avoid any extra data copy by directly passing 'ptr'. 333 */ 334 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 335 KCF_OP_RANDOM_GENERATE, 336 pd->pd_sid, ptr, len, 0, 0); 337 break; 338 } 339 340 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 341 if (rv == CRYPTO_SUCCESS) { 342 switch (pd->pd_prov_type) { 343 case CRYPTO_HW_PROVIDER: 344 /* 345 * Since we have the input buffer handy, 346 * we directly copy to it rather than 347 * adding to the pool. 348 */ 349 blen = min(MINEXTRACTBYTES, len); 350 bcopy(rndbuf, ptr, blen); 351 if (len < MINEXTRACTBYTES) 352 rndc_addbytes(rndbuf + len, 353 MINEXTRACTBYTES - len); 354 ptr += blen; 355 len -= blen; 356 total_bytes += blen; 357 break; 358 359 case CRYPTO_SW_PROVIDER: 360 total_bytes += len; 361 len = 0; 362 break; 363 } 364 } 365 366 /* 367 * We free the buffer in the callback routine 368 * for the CRYPTO_QUEUED case. 369 */ 370 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 371 rv != CRYPTO_QUEUED) { 372 bzero(rndbuf, MINEXTRACTBYTES); 373 kmem_free(rndbuf, MINEXTRACTBYTES); 374 } 375 376 if (len == 0) { 377 KCF_PROV_REFRELE(pd); 378 break; 379 } 380 381 if (rv != CRYPTO_SUCCESS) { 382 /* Add pd to the linked list of providers tried. */ 383 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 384 NULL) { 385 KCF_PROV_REFRELE(pd); 386 break; 387 } 388 } 389 } 390 391 if (list != NULL) { 392 kcf_free_triedlist(list); 393 } 394 395 if (prov_cnt == 0) { /* no provider could be found. */ 396 rng_prov_found = B_FALSE; 397 return (-1); 398 } else { 399 rng_prov_found = B_TRUE; 400 /* See comments in kcf_rngprov_check() */ 401 rng_ok_to_log = B_TRUE; 402 } 403 404 return (total_bytes); 405 } 406 407 static void 408 rngprov_task(void *arg) 409 { 410 int len = (int)(uintptr_t)arg; 411 uchar_t tbuf[MAXEXTRACTBYTES]; 412 413 ASSERT(len <= MAXEXTRACTBYTES); 414 (void) rngprov_getbytes(tbuf, len, B_TRUE); 415 } 416 417 /* 418 * Returns "len" random or pseudo-random bytes in *ptr. 419 * Will block if not enough random bytes are available and the 420 * call is blocking. 421 * 422 * Called with rndpool_lock held (allowing caller to do optimistic locking; 423 * releases the lock before return). 424 */ 425 static int 426 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how) 427 { 428 int bytes; 429 size_t got; 430 431 ASSERT(mutex_owned(&rndpool_lock)); 432 /* 433 * Check if the request can be satisfied from the cache 434 * of random bytes. 435 */ 436 if (len <= rnbyte_cnt) { 437 rndc_getbytes(ptr, len); 438 mutex_exit(&rndpool_lock); 439 return (0); 440 } 441 mutex_exit(&rndpool_lock); 442 443 switch (how) { 444 case BLOCKING_EXTRACT: 445 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1) 446 break; /* No provider found */ 447 448 if (got == len) 449 return (0); 450 len -= got; 451 ptr += got; 452 break; 453 454 case NONBLOCK_EXTRACT: 455 case ALWAYS_EXTRACT: 456 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) { 457 /* No provider found */ 458 if (how == NONBLOCK_EXTRACT) { 459 return (EAGAIN); 460 } 461 } else { 462 if (got == len) 463 return (0); 464 len -= got; 465 ptr += got; 466 } 467 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 468 return (EAGAIN); 469 break; 470 } 471 472 mutex_enter(&rndpool_lock); 473 while (len > 0) { 474 if (how == BLOCKING_EXTRACT) { 475 /* Check if there is enough */ 476 while (rnbyte_cnt < MINEXTRACTBYTES) { 477 num_waiters++; 478 if (cv_wait_sig(&rndpool_read_cv, 479 &rndpool_lock) == 0) { 480 num_waiters--; 481 mutex_exit(&rndpool_lock); 482 return (EINTR); 483 } 484 num_waiters--; 485 } 486 } 487 488 /* Figure out how many bytes to extract */ 489 bytes = min(len, rnbyte_cnt); 490 rndc_getbytes(ptr, bytes); 491 492 len -= bytes; 493 ptr += bytes; 494 495 if (len > 0 && how == ALWAYS_EXTRACT) { 496 /* 497 * There are not enough bytes, but we can not block. 498 * This only happens in the case of /dev/urandom which 499 * runs an additional generation algorithm. So, there 500 * is no problem. 501 */ 502 while (len > 0) { 503 *ptr = rndpool[findex]; 504 ptr++; len--; 505 rindex = findex = (findex + 1) & 506 (RNDPOOLSIZE - 1); 507 } 508 break; 509 } 510 } 511 512 mutex_exit(&rndpool_lock); 513 return (0); 514 } 515 516 int 517 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock) 518 { 519 extract_type_t how; 520 int error; 521 522 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 523 mutex_enter(&rndpool_lock); 524 if ((error = rnd_get_bytes(ptr, len, how)) != 0) 525 return (error); 526 527 BUMP_RND_STATS(rs_rndOut, len); 528 return (0); 529 } 530 531 /* 532 * Revisit this if the structs grow or we come up with a better way 533 * of cache-line-padding structures. 534 */ 535 #define RND_CPU_CACHE_SIZE 64 536 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 537 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 538 sizeof (rndmag_t)) 539 /* 540 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 541 * a per-CPU instance of the pseudo-random generator. We have it much easier 542 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 543 * 544 * Note that this usage is preemption-safe; a thread 545 * entering a critical section remembers which generator it locked 546 * and unlocks the same one; should it be preempted and wind up running on 547 * a different CPU, there will be a brief period of increased contention 548 * before it exits the critical section but nothing will melt. 549 */ 550 typedef struct rndmag_s 551 { 552 kmutex_t rm_lock; 553 uint8_t *rm_buffer; /* Start of buffer */ 554 uint8_t *rm_eptr; /* End of buffer */ 555 uint8_t *rm_rptr; /* Current read pointer */ 556 uint32_t rm_oblocks; /* time to rekey? */ 557 uint32_t rm_ofuzz; /* Rekey backoff state */ 558 uint32_t rm_olimit; /* Hard rekey limit */ 559 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 560 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */ 561 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */ 562 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */ 563 } rndmag_t; 564 565 typedef struct rndmag_pad_s 566 { 567 rndmag_t rm_mag; 568 uint8_t rm_pad[RND_CPU_PAD]; 569 } rndmag_pad_t; 570 571 /* 572 * Generate random bytes for /dev/urandom by applying the 573 * FIPS 186-2 algorithm with a key created from bytes extracted 574 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 575 * is generated before a new key is obtained. 576 * 577 * Note that callers to this routine are likely to assume it can't fail. 578 * 579 * Called with rmp locked; releases lock. 580 */ 581 static int 582 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 583 { 584 size_t bytes = len; 585 int nblock, size; 586 uint32_t oblocks; 587 uint32_t tempout[HASHSIZE/BYTES_IN_WORD]; 588 uint32_t seed[HASHSIZE/BYTES_IN_WORD]; 589 int i; 590 hrtime_t timestamp; 591 uint8_t *src, *dst; 592 593 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 594 595 /* Nothing is being asked */ 596 if (len == 0) { 597 mutex_exit(&rmp->rm_mag.rm_lock); 598 return (0); 599 } 600 601 nblock = howmany(len, HASHSIZE); 602 603 rmp->rm_mag.rm_oblocks += nblock; 604 oblocks = rmp->rm_mag.rm_oblocks; 605 606 do { 607 if (oblocks >= rmp->rm_mag.rm_olimit) { 608 609 /* 610 * Contention-avoiding rekey: see if 611 * the pool is locked, and if so, wait a bit. 612 * Do an 'exponential back-in' to ensure we don't 613 * run too long without rekey. 614 */ 615 if (rmp->rm_mag.rm_ofuzz) { 616 /* 617 * Decaying exponential back-in for rekey. 618 */ 619 if ((rnbyte_cnt < MINEXTRACTBYTES) || 620 (!mutex_tryenter(&rndpool_lock))) { 621 rmp->rm_mag.rm_olimit += 622 rmp->rm_mag.rm_ofuzz; 623 rmp->rm_mag.rm_ofuzz >>= 1; 624 goto punt; 625 } 626 } else { 627 mutex_enter(&rndpool_lock); 628 } 629 630 /* Get a new chunk of entropy */ 631 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 632 HMAC_KEYSIZE, ALWAYS_EXTRACT); 633 634 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 635 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 636 oblocks = 0; 637 rmp->rm_mag.rm_oblocks = nblock; 638 } 639 punt: 640 timestamp = gethrtime(); 641 642 src = (uint8_t *)×tamp; 643 dst = (uint8_t *)rmp->rm_mag.rm_seed; 644 645 for (i = 0; i < HASHSIZE; i++) { 646 dst[i] ^= src[i % sizeof (timestamp)]; 647 } 648 649 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 650 651 fips_random_inner(rmp->rm_mag.rm_key, tempout, 652 seed); 653 654 if (bytes >= HASHSIZE) { 655 size = HASHSIZE; 656 } else { 657 size = min(bytes, HASHSIZE); 658 } 659 660 /* 661 * FIPS 140-2: Continuous RNG test - each generation 662 * of an n-bit block shall be compared with the previously 663 * generated block. Test shall fail if any two compared 664 * n-bit blocks are equal. 665 */ 666 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) { 667 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 668 break; 669 } 670 if (i == HASHSIZE/BYTES_IN_WORD) { 671 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 672 "block random bytes are same as the previous " 673 "one.\n"); 674 /* discard random bytes and return error */ 675 return (EIO); 676 } 677 678 bcopy(tempout, rmp->rm_mag.rm_previous, 679 HASHSIZE); 680 681 bcopy(tempout, ptr, size); 682 ptr += size; 683 bytes -= size; 684 oblocks++; 685 nblock--; 686 } while (bytes > 0); 687 688 /* Zero out sensitive information */ 689 bzero(seed, HASHSIZE); 690 bzero(tempout, HASHSIZE); 691 mutex_exit(&rmp->rm_mag.rm_lock); 692 return (0); 693 } 694 695 /* 696 * Per-CPU Random magazines. 697 */ 698 static rndmag_pad_t *rndmag; 699 static uint8_t *rndbuf; 700 static size_t rndmag_total; 701 /* 702 * common/os/cpu.c says that platform support code can shrinkwrap 703 * max_ncpus. On the off chance that we get loaded very early, we 704 * read it exactly once, to copy it here. 705 */ 706 static uint32_t random_max_ncpus = 0; 707 708 /* 709 * Boot-time tunables, for experimentation. 710 */ 711 size_t rndmag_threshold = 2560; 712 size_t rndbuf_len = 5120; 713 size_t rndmag_size = 1280; 714 715 716 int 717 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 718 { 719 rndmag_pad_t *rmp; 720 uint8_t *cptr, *eptr; 721 722 /* 723 * Anyone who asks for zero bytes of randomness should get slapped. 724 */ 725 ASSERT(len > 0); 726 727 /* 728 * Fast path. 729 */ 730 for (;;) { 731 rmp = &rndmag[CPU->cpu_seqid]; 732 mutex_enter(&rmp->rm_mag.rm_lock); 733 734 /* 735 * Big requests bypass buffer and tail-call the 736 * generate routine directly. 737 */ 738 if (len > rndmag_threshold) { 739 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 740 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 741 } 742 743 cptr = rmp->rm_mag.rm_rptr; 744 eptr = cptr + len; 745 746 if (eptr <= rmp->rm_mag.rm_eptr) { 747 rmp->rm_mag.rm_rptr = eptr; 748 bcopy(cptr, ptr, len); 749 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 750 mutex_exit(&rmp->rm_mag.rm_lock); 751 752 return (0); 753 } 754 /* 755 * End fast path. 756 */ 757 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 758 /* 759 * Note: We assume the generate routine always succeeds 760 * in this case (because it does at present..) 761 * It also always releases rm_lock. 762 */ 763 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 764 rndbuf_len); 765 } 766 } 767 768 /* 769 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 770 * little memory on big systems that don't have the full set installed. 771 * See above; "empty" means "rptr equal to eptr"; this will trigger the 772 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 773 * 774 * TODO: make rndmag_size tunable at run time! 775 */ 776 static void 777 rnd_alloc_magazines() 778 { 779 rndmag_pad_t *rmp; 780 int i; 781 uint8_t discard_buf[HASHSIZE]; 782 783 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 784 if (rndmag_size < rndbuf_len) 785 rndmag_size = rndbuf_len; 786 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 787 788 random_max_ncpus = max_ncpus; 789 rndmag_total = rndmag_size * random_max_ncpus; 790 791 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 792 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 793 KM_SLEEP); 794 795 for (i = 0; i < random_max_ncpus; i++) { 796 uint8_t *buf; 797 798 rmp = &rndmag[i]; 799 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 800 801 buf = rndbuf + i * rndmag_size; 802 803 rmp->rm_mag.rm_buffer = buf; 804 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 805 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 806 rmp->rm_mag.rm_oblocks = 1; 807 808 mutex_enter(&rndpool_lock); 809 /* 810 * FIPS 140-2: the first n-bit (n > 15) block generated 811 * after power-up, initialization, or reset shall not 812 * be used, but shall be saved for comparison. 813 */ 814 (void) rnd_get_bytes(discard_buf, 815 HMAC_KEYSIZE, ALWAYS_EXTRACT); 816 bcopy(discard_buf, rmp->rm_mag.rm_previous, 817 HMAC_KEYSIZE); 818 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 819 mutex_enter(&rndpool_lock); 820 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 821 HMAC_KEYSIZE, ALWAYS_EXTRACT); 822 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 823 mutex_enter(&rndpool_lock); 824 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 825 HMAC_KEYSIZE, ALWAYS_EXTRACT); 826 } 827 } 828 829 void 830 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 831 { 832 clock_t ut; /* time in microseconds */ 833 834 if (do_mech2id) 835 rngmech_type = crypto_mech2id(SUN_RANDOM); 836 837 /* 838 * The new timeout value is taken from the buffer of random bytes. 839 * We're merely reading the first 32 bits from the buffer here, not 840 * consuming any random bytes. 841 * The timeout multiplier value is a random value between 0.5 sec and 842 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 843 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 844 */ 845 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 846 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 847 TIMEOUT_INTERVAL * drv_usectohz(ut)); 848 } 849 850 /* 851 * Called from the driver for a poll on /dev/random 852 * . POLLOUT always succeeds. 853 * . POLLIN and POLLRDNORM will block until a 854 * minimum amount of entropy is available. 855 * 856 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 857 * will block. When enough random bytes are available, later, the timeout 858 * handler routine will issue the pollwakeup() calls. 859 */ 860 void 861 kcf_rnd_chpoll(short events, int anyyet, short *reventsp, 862 struct pollhead **phpp) 863 { 864 *reventsp = events & POLLOUT; 865 866 if (events & (POLLIN | POLLRDNORM)) { 867 /* 868 * Sampling of rnbyte_cnt is an atomic 869 * operation. Hence we do not need any locking. 870 */ 871 if (rnbyte_cnt >= MINEXTRACTBYTES) 872 *reventsp |= (events & (POLLIN | POLLRDNORM)); 873 } 874 875 if (*reventsp == 0 && !anyyet) 876 *phpp = &rnd_pollhead; 877 } 878 879 /*ARGSUSED*/ 880 static void 881 rnd_handler(void *arg) 882 { 883 int len = 0; 884 885 if (!rng_prov_found && rng_ok_to_log) { 886 cmn_err(CE_WARN, "No randomness provider enabled for " 887 "/dev/random. Use cryptoadm(1M) to enable a provider."); 888 rng_ok_to_log = B_FALSE; 889 } 890 891 if (num_waiters > 0) 892 len = MAXEXTRACTBYTES; 893 else if (rnbyte_cnt < RNDPOOLSIZE) 894 len = MINEXTRACTBYTES; 895 896 if (len > 0) { 897 (void) taskq_dispatch(system_taskq, rngprov_task, 898 (void *)(uintptr_t)len, TQ_NOSLEEP); 899 } 900 901 mutex_enter(&rndpool_lock); 902 /* 903 * Wake up threads waiting in poll() or for enough accumulated 904 * random bytes to read from /dev/random. In case a poll() is 905 * concurrent with a read(), the polling process may be woken up 906 * indicating that enough randomness is now available for reading, 907 * and another process *steals* the bits from the pool, causing the 908 * subsequent read() from the first process to block. It is acceptable 909 * since the blocking will eventually end, after the timeout 910 * has expired enough times to honor the read. 911 * 912 * Note - Since we hold the rndpool_lock across the pollwakeup() call 913 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 914 */ 915 if (rnbyte_cnt >= MINEXTRACTBYTES) 916 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 917 918 if (num_waiters > 0) 919 cv_broadcast(&rndpool_read_cv); 920 mutex_exit(&rndpool_lock); 921 922 kcf_rnd_schedule_timeout(B_FALSE); 923 } 924 925 static void 926 rndc_addbytes(uint8_t *ptr, size_t len) 927 { 928 ASSERT(ptr != NULL && len > 0); 929 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 930 931 mutex_enter(&rndpool_lock); 932 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 933 rndpool[rindex] ^= *ptr; 934 ptr++; len--; 935 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 936 rnbyte_cnt++; 937 } 938 939 /* Handle buffer full case */ 940 while (len > 0) { 941 rndpool[rindex] ^= *ptr; 942 ptr++; len--; 943 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 944 } 945 mutex_exit(&rndpool_lock); 946 } 947 948 /* 949 * Caller should check len <= rnbyte_cnt under the 950 * rndpool_lock before calling. 951 */ 952 static void 953 rndc_getbytes(uint8_t *ptr, size_t len) 954 { 955 ASSERT(MUTEX_HELD(&rndpool_lock)); 956 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 957 958 BUMP_RND_STATS(rs_rndcOut, len); 959 960 while (len > 0) { 961 *ptr = rndpool[findex]; 962 ptr++; len--; 963 findex = (findex + 1) & (RNDPOOLSIZE - 1); 964 rnbyte_cnt--; 965 } 966 } 967 968 /* Random number exported entry points */ 969 970 /* 971 * Mix the supplied bytes into the entropy pool of a kCF 972 * RNG provider. 973 */ 974 int 975 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 976 { 977 if (len < 1) 978 return (-1); 979 980 rngprov_seed(ptr, len, entropy_est, 0); 981 982 return (0); 983 } 984 985 /* 986 * Mix the supplied bytes into the entropy pool of a kCF 987 * RNG provider. Mix immediately. 988 */ 989 int 990 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 991 { 992 if (len < 1) 993 return (-1); 994 995 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 996 997 return (0); 998 } 999 1000 /* 1001 * Get bytes from the /dev/urandom generator. This function 1002 * always succeeds. Returns 0. 1003 */ 1004 int 1005 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1006 { 1007 ASSERT(!mutex_owned(&rndpool_lock)); 1008 1009 if (len < 1) 1010 return (0); 1011 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1012 } 1013 1014 /* 1015 * Get bytes from the /dev/random generator. Returns 0 1016 * on success. Returns EAGAIN if there is insufficient entropy. 1017 */ 1018 int 1019 random_get_bytes(uint8_t *ptr, size_t len) 1020 { 1021 ASSERT(!mutex_owned(&rndpool_lock)); 1022 1023 if (len < 1) 1024 return (0); 1025 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1026 } 1027