1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2010 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 /* 29 * This file implements the interfaces that the /dev/random 30 * driver uses for read(2), write(2) and poll(2) on /dev/random or 31 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 32 * random_add_pseudo_entropy(), random_get_pseudo_bytes() 33 * and random_get_bytes(). 34 * 35 * We periodically collect random bits from providers which are registered 36 * with the Kernel Cryptographic Framework (kCF) as capable of random 37 * number generation. The random bits are maintained in a cache and 38 * it is used for high quality random numbers (/dev/random) requests. 39 * We pick a provider and call its SPI routine, if the cache does not have 40 * enough bytes to satisfy a request. 41 * 42 * /dev/urandom requests use a software-based generator algorithm that uses the 43 * random bits in the cache as a seed. We create one pseudo-random generator 44 * (for /dev/urandom) per possible CPU on the system, and use it, 45 * kmem-magazine-style, to avoid cache line contention. 46 * 47 * LOCKING HIERARCHY: 48 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 49 * 2) rndpool_lock protects the high-quality randomness pool. 50 * It may be locked while a rmp->rm_mag.rm_lock is held. 51 * 52 * A history note: The kernel API and the software-based algorithms in this 53 * file used to be part of the /dev/random driver. 54 */ 55 56 #include <sys/types.h> 57 #include <sys/conf.h> 58 #include <sys/sunddi.h> 59 #include <sys/disp.h> 60 #include <sys/modctl.h> 61 #include <sys/ddi.h> 62 #include <sys/crypto/common.h> 63 #include <sys/crypto/api.h> 64 #include <sys/crypto/impl.h> 65 #include <sys/crypto/sched_impl.h> 66 #include <sys/crypto/ioctladmin.h> 67 #include <sys/random.h> 68 #include <sys/sha1.h> 69 #include <sys/time.h> 70 #include <sys/sysmacros.h> 71 #include <sys/cpuvar.h> 72 #include <sys/taskq.h> 73 #include <rng/fips_random.h> 74 75 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 76 #define MINEXTRACTBYTES 20 77 #define MAXEXTRACTBYTES 1024 78 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 79 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 80 81 typedef enum extract_type { 82 NONBLOCK_EXTRACT, 83 BLOCKING_EXTRACT, 84 ALWAYS_EXTRACT 85 } extract_type_t; 86 87 /* 88 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 89 * routines directly instead of using k-API because we can't return any 90 * error code in /dev/urandom case and we can get an error using k-API 91 * if a mechanism is disabled. 92 */ 93 #define HASHSIZE 20 94 #define HASH_CTX SHA1_CTX 95 #define HashInit(ctx) SHA1Init((ctx)) 96 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 97 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 98 99 /* HMAC-SHA1 */ 100 #define HMAC_KEYSIZE 20 101 102 /* 103 * Cache of random bytes implemented as a circular buffer. findex and rindex 104 * track the front and back of the circular buffer. 105 */ 106 uint8_t rndpool[RNDPOOLSIZE]; 107 static int findex, rindex; 108 static int rnbyte_cnt; /* Number of bytes in the cache */ 109 110 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 111 /* and the global variables */ 112 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 113 static int num_waiters; /* #threads waiting to read from /dev/random */ 114 115 static struct pollhead rnd_pollhead; 116 /* LINTED E_STATIC_UNUSED */ 117 static timeout_id_t kcf_rndtimeout_id; 118 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 119 rnd_stats_t rnd_stats; 120 static boolean_t rng_prov_found = B_TRUE; 121 static boolean_t rng_ok_to_log = B_TRUE; 122 static boolean_t rngprov_task_idle = B_TRUE; 123 124 static void rndc_addbytes(uint8_t *, size_t); 125 static void rndc_getbytes(uint8_t *ptr, size_t len); 126 static void rnd_handler(void *); 127 static void rnd_alloc_magazines(); 128 129 void 130 kcf_rnd_init() 131 { 132 hrtime_t ts; 133 time_t now; 134 135 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 136 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 137 138 /* 139 * Add bytes to the cache using 140 * . 2 unpredictable times: high resolution time since the boot-time, 141 * and the current time-of-the day. 142 * This is used only to make the timeout value in the timer 143 * unpredictable. 144 */ 145 ts = gethrtime(); 146 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 147 148 (void) drv_getparm(TIME, &now); 149 rndc_addbytes((uint8_t *)&now, sizeof (now)); 150 151 rnbyte_cnt = 0; 152 findex = rindex = 0; 153 num_waiters = 0; 154 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 155 156 rnd_alloc_magazines(); 157 } 158 159 /* 160 * Return TRUE if at least one provider exists that can 161 * supply random numbers. 162 */ 163 boolean_t 164 kcf_rngprov_check(void) 165 { 166 int rv; 167 kcf_provider_desc_t *pd; 168 169 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 170 NULL, CRYPTO_FG_RANDOM, 0)) != NULL) { 171 KCF_PROV_REFRELE(pd); 172 /* 173 * We logged a warning once about no provider being available 174 * and now a provider became available. So, set the flag so 175 * that we can log again if the problem recurs. 176 */ 177 rng_ok_to_log = B_TRUE; 178 rng_prov_found = B_TRUE; 179 return (B_TRUE); 180 } else { 181 rng_prov_found = B_FALSE; 182 return (B_FALSE); 183 } 184 } 185 186 /* 187 * Pick a software-based provider and submit a request to seed 188 * its random number generator. 189 */ 190 static void 191 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 192 { 193 kcf_provider_desc_t *pd = NULL; 194 195 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 196 CRYPTO_SUCCESS) { 197 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 198 entropy_est, flags, NULL); 199 KCF_PROV_REFRELE(pd); 200 } 201 } 202 203 /* 204 * This routine is called for blocking reads. 205 * 206 * The argument is_taskq_thr indicates whether the caller is 207 * the taskq thread dispatched by the timeout handler routine. 208 * In this case, we cycle through all the providers 209 * submitting a request to each provider to generate random numbers. 210 * 211 * For other cases, we pick a provider and submit a request to generate 212 * random numbers. We retry using another provider if we get an error. 213 * 214 * Returns the number of bytes that are written to 'ptr'. Returns -1 215 * if no provider is found. ptr and need are unchanged. 216 */ 217 static int 218 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr) 219 { 220 int rv; 221 int prov_cnt = 0; 222 int total_bytes = 0; 223 kcf_provider_desc_t *pd; 224 kcf_req_params_t params; 225 kcf_prov_tried_t *list = NULL; 226 227 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 228 list, CRYPTO_FG_RANDOM, 0)) != NULL) { 229 230 prov_cnt++; 231 232 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 233 pd->pd_sid, ptr, need, 0, 0); 234 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 235 ASSERT(rv != CRYPTO_QUEUED); 236 237 if (rv == CRYPTO_SUCCESS) { 238 total_bytes += need; 239 if (is_taskq_thr) 240 rndc_addbytes(ptr, need); 241 else { 242 KCF_PROV_REFRELE(pd); 243 break; 244 } 245 } 246 247 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 248 /* Add pd to the linked list of providers tried. */ 249 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 250 KCF_PROV_REFRELE(pd); 251 break; 252 } 253 } 254 255 } 256 257 if (list != NULL) 258 kcf_free_triedlist(list); 259 260 if (prov_cnt == 0) { /* no provider could be found. */ 261 rng_prov_found = B_FALSE; 262 return (-1); 263 } else { 264 rng_prov_found = B_TRUE; 265 /* See comments in kcf_rngprov_check() */ 266 rng_ok_to_log = B_TRUE; 267 } 268 269 return (total_bytes); 270 } 271 272 static void 273 notify_done(void *arg, int rv) 274 { 275 uchar_t *rndbuf = arg; 276 277 if (rv == CRYPTO_SUCCESS) 278 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 279 280 bzero(rndbuf, MINEXTRACTBYTES); 281 kmem_free(rndbuf, MINEXTRACTBYTES); 282 } 283 284 /* 285 * Cycle through all the providers submitting a request to each provider 286 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 287 * and ALWAYS_EXTRACT. 288 * 289 * Returns the number of bytes that are written to 'ptr'. Returns -1 290 * if no provider is found. ptr and len are unchanged. 291 */ 292 static int 293 rngprov_getbytes_nblk(uint8_t *ptr, size_t len) 294 { 295 int rv, total_bytes; 296 size_t blen; 297 uchar_t *rndbuf; 298 kcf_provider_desc_t *pd; 299 kcf_req_params_t params; 300 crypto_call_req_t req; 301 kcf_prov_tried_t *list = NULL; 302 int prov_cnt = 0; 303 304 blen = 0; 305 total_bytes = 0; 306 req.cr_flag = CRYPTO_SKIP_REQID; 307 req.cr_callback_func = notify_done; 308 309 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 310 list, CRYPTO_FG_RANDOM, 0)) != NULL) { 311 312 prov_cnt ++; 313 switch (pd->pd_prov_type) { 314 case CRYPTO_HW_PROVIDER: 315 /* 316 * We have to allocate a buffer here as we can not 317 * assume that the input buffer will remain valid 318 * when the callback comes. We use a fixed size buffer 319 * to simplify the book keeping. 320 */ 321 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 322 if (rndbuf == NULL) { 323 KCF_PROV_REFRELE(pd); 324 if (list != NULL) 325 kcf_free_triedlist(list); 326 return (total_bytes); 327 } 328 req.cr_callback_arg = rndbuf; 329 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 330 KCF_OP_RANDOM_GENERATE, 331 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 332 break; 333 334 case CRYPTO_SW_PROVIDER: 335 /* 336 * We do not need to allocate a buffer in the software 337 * provider case as there is no callback involved. We 338 * avoid any extra data copy by directly passing 'ptr'. 339 */ 340 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 341 KCF_OP_RANDOM_GENERATE, 342 pd->pd_sid, ptr, len, 0, 0); 343 break; 344 } 345 346 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 347 if (rv == CRYPTO_SUCCESS) { 348 switch (pd->pd_prov_type) { 349 case CRYPTO_HW_PROVIDER: 350 /* 351 * Since we have the input buffer handy, 352 * we directly copy to it rather than 353 * adding to the pool. 354 */ 355 blen = min(MINEXTRACTBYTES, len); 356 bcopy(rndbuf, ptr, blen); 357 if (len < MINEXTRACTBYTES) 358 rndc_addbytes(rndbuf + len, 359 MINEXTRACTBYTES - len); 360 ptr += blen; 361 len -= blen; 362 total_bytes += blen; 363 break; 364 365 case CRYPTO_SW_PROVIDER: 366 total_bytes += len; 367 len = 0; 368 break; 369 } 370 } 371 372 /* 373 * We free the buffer in the callback routine 374 * for the CRYPTO_QUEUED case. 375 */ 376 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 377 rv != CRYPTO_QUEUED) { 378 bzero(rndbuf, MINEXTRACTBYTES); 379 kmem_free(rndbuf, MINEXTRACTBYTES); 380 } 381 382 if (len == 0) { 383 KCF_PROV_REFRELE(pd); 384 break; 385 } 386 387 if (rv != CRYPTO_SUCCESS) { 388 /* Add pd to the linked list of providers tried. */ 389 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 390 NULL) { 391 KCF_PROV_REFRELE(pd); 392 break; 393 } 394 } 395 } 396 397 if (list != NULL) { 398 kcf_free_triedlist(list); 399 } 400 401 if (prov_cnt == 0) { /* no provider could be found. */ 402 rng_prov_found = B_FALSE; 403 return (-1); 404 } else { 405 rng_prov_found = B_TRUE; 406 /* See comments in kcf_rngprov_check() */ 407 rng_ok_to_log = B_TRUE; 408 } 409 410 return (total_bytes); 411 } 412 413 static void 414 rngprov_task(void *arg) 415 { 416 int len = (int)(uintptr_t)arg; 417 uchar_t tbuf[MAXEXTRACTBYTES]; 418 419 ASSERT(len <= MAXEXTRACTBYTES); 420 (void) rngprov_getbytes(tbuf, len, B_TRUE); 421 rngprov_task_idle = B_TRUE; 422 } 423 424 /* 425 * Returns "len" random or pseudo-random bytes in *ptr. 426 * Will block if not enough random bytes are available and the 427 * call is blocking. 428 * 429 * Called with rndpool_lock held (allowing caller to do optimistic locking; 430 * releases the lock before return). 431 */ 432 static int 433 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how) 434 { 435 size_t bytes; 436 int got; 437 438 ASSERT(mutex_owned(&rndpool_lock)); 439 /* 440 * Check if the request can be satisfied from the cache 441 * of random bytes. 442 */ 443 if (len <= rnbyte_cnt) { 444 rndc_getbytes(ptr, len); 445 mutex_exit(&rndpool_lock); 446 return (0); 447 } 448 mutex_exit(&rndpool_lock); 449 450 switch (how) { 451 case BLOCKING_EXTRACT: 452 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1) 453 break; /* No provider found */ 454 455 if (got == len) 456 return (0); 457 len -= got; 458 ptr += got; 459 break; 460 461 case NONBLOCK_EXTRACT: 462 case ALWAYS_EXTRACT: 463 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) { 464 /* No provider found */ 465 if (how == NONBLOCK_EXTRACT) { 466 return (EAGAIN); 467 } 468 } else { 469 if (got == len) 470 return (0); 471 len -= got; 472 ptr += got; 473 } 474 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 475 return (EAGAIN); 476 break; 477 } 478 479 mutex_enter(&rndpool_lock); 480 while (len > 0) { 481 if (how == BLOCKING_EXTRACT) { 482 /* Check if there is enough */ 483 while (rnbyte_cnt < MINEXTRACTBYTES) { 484 num_waiters++; 485 if (cv_wait_sig(&rndpool_read_cv, 486 &rndpool_lock) == 0) { 487 num_waiters--; 488 mutex_exit(&rndpool_lock); 489 return (EINTR); 490 } 491 num_waiters--; 492 } 493 } 494 495 /* Figure out how many bytes to extract */ 496 bytes = min(len, rnbyte_cnt); 497 rndc_getbytes(ptr, bytes); 498 499 len -= bytes; 500 ptr += bytes; 501 502 if (len > 0 && how == ALWAYS_EXTRACT) { 503 /* 504 * There are not enough bytes, but we can not block. 505 * This only happens in the case of /dev/urandom which 506 * runs an additional generation algorithm. So, there 507 * is no problem. 508 */ 509 while (len > 0) { 510 *ptr = rndpool[findex]; 511 ptr++; len--; 512 rindex = findex = (findex + 1) & 513 (RNDPOOLSIZE - 1); 514 } 515 break; 516 } 517 } 518 519 mutex_exit(&rndpool_lock); 520 return (0); 521 } 522 523 int 524 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock) 525 { 526 extract_type_t how; 527 int error; 528 529 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 530 mutex_enter(&rndpool_lock); 531 if ((error = rnd_get_bytes(ptr, len, how)) != 0) 532 return (error); 533 534 BUMP_RND_STATS(rs_rndOut, len); 535 return (0); 536 } 537 538 /* 539 * Revisit this if the structs grow or we come up with a better way 540 * of cache-line-padding structures. 541 */ 542 #define RND_CPU_CACHE_SIZE 64 543 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 544 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 545 sizeof (rndmag_t)) 546 /* 547 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 548 * a per-CPU instance of the pseudo-random generator. We have it much easier 549 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 550 * 551 * Note that this usage is preemption-safe; a thread 552 * entering a critical section remembers which generator it locked 553 * and unlocks the same one; should it be preempted and wind up running on 554 * a different CPU, there will be a brief period of increased contention 555 * before it exits the critical section but nothing will melt. 556 */ 557 typedef struct rndmag_s 558 { 559 kmutex_t rm_lock; 560 uint8_t *rm_buffer; /* Start of buffer */ 561 uint8_t *rm_eptr; /* End of buffer */ 562 uint8_t *rm_rptr; /* Current read pointer */ 563 uint32_t rm_oblocks; /* time to rekey? */ 564 uint32_t rm_ofuzz; /* Rekey backoff state */ 565 uint32_t rm_olimit; /* Hard rekey limit */ 566 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 567 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */ 568 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */ 569 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */ 570 } rndmag_t; 571 572 typedef struct rndmag_pad_s 573 { 574 rndmag_t rm_mag; 575 uint8_t rm_pad[RND_CPU_PAD]; 576 } rndmag_pad_t; 577 578 /* 579 * Generate random bytes for /dev/urandom by applying the 580 * FIPS 186-2 algorithm with a key created from bytes extracted 581 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 582 * is generated before a new key is obtained. 583 * 584 * Note that callers to this routine are likely to assume it can't fail. 585 * 586 * Called with rmp locked; releases lock. 587 */ 588 static int 589 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 590 { 591 size_t bytes = len, size; 592 int nblock; 593 uint32_t oblocks; 594 uint32_t tempout[HASHSIZE/BYTES_IN_WORD]; 595 uint32_t seed[HASHSIZE/BYTES_IN_WORD]; 596 int i; 597 hrtime_t timestamp; 598 uint8_t *src, *dst; 599 600 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 601 602 /* Nothing is being asked */ 603 if (len == 0) { 604 mutex_exit(&rmp->rm_mag.rm_lock); 605 return (0); 606 } 607 608 nblock = howmany(len, HASHSIZE); 609 610 rmp->rm_mag.rm_oblocks += nblock; 611 oblocks = rmp->rm_mag.rm_oblocks; 612 613 do { 614 if (oblocks >= rmp->rm_mag.rm_olimit) { 615 616 /* 617 * Contention-avoiding rekey: see if 618 * the pool is locked, and if so, wait a bit. 619 * Do an 'exponential back-in' to ensure we don't 620 * run too long without rekey. 621 */ 622 if (rmp->rm_mag.rm_ofuzz) { 623 /* 624 * Decaying exponential back-in for rekey. 625 */ 626 if ((rnbyte_cnt < MINEXTRACTBYTES) || 627 (!mutex_tryenter(&rndpool_lock))) { 628 rmp->rm_mag.rm_olimit += 629 rmp->rm_mag.rm_ofuzz; 630 rmp->rm_mag.rm_ofuzz >>= 1; 631 goto punt; 632 } 633 } else { 634 mutex_enter(&rndpool_lock); 635 } 636 637 /* Get a new chunk of entropy */ 638 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 639 HMAC_KEYSIZE, ALWAYS_EXTRACT); 640 641 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 642 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 643 oblocks = 0; 644 rmp->rm_mag.rm_oblocks = nblock; 645 } 646 punt: 647 timestamp = gethrtime(); 648 649 src = (uint8_t *)×tamp; 650 dst = (uint8_t *)rmp->rm_mag.rm_seed; 651 652 for (i = 0; i < HASHSIZE; i++) { 653 dst[i] ^= src[i % sizeof (timestamp)]; 654 } 655 656 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 657 658 fips_random_inner(rmp->rm_mag.rm_key, tempout, 659 seed); 660 661 if (bytes >= HASHSIZE) { 662 size = HASHSIZE; 663 } else { 664 size = min(bytes, HASHSIZE); 665 } 666 667 /* 668 * FIPS 140-2: Continuous RNG test - each generation 669 * of an n-bit block shall be compared with the previously 670 * generated block. Test shall fail if any two compared 671 * n-bit blocks are equal. 672 */ 673 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) { 674 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 675 break; 676 } 677 if (i == HASHSIZE/BYTES_IN_WORD) { 678 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 679 "block random bytes are same as the previous " 680 "one.\n"); 681 /* discard random bytes and return error */ 682 mutex_exit(&rmp->rm_mag.rm_lock); 683 return (EIO); 684 } 685 686 bcopy(tempout, rmp->rm_mag.rm_previous, 687 HASHSIZE); 688 689 bcopy(tempout, ptr, size); 690 ptr += size; 691 bytes -= size; 692 oblocks++; 693 nblock--; 694 } while (bytes > 0); 695 696 /* Zero out sensitive information */ 697 bzero(seed, HASHSIZE); 698 bzero(tempout, HASHSIZE); 699 mutex_exit(&rmp->rm_mag.rm_lock); 700 return (0); 701 } 702 703 /* 704 * Per-CPU Random magazines. 705 */ 706 static rndmag_pad_t *rndmag; 707 static uint8_t *rndbuf; 708 static size_t rndmag_total; 709 /* 710 * common/os/cpu.c says that platform support code can shrinkwrap 711 * max_ncpus. On the off chance that we get loaded very early, we 712 * read it exactly once, to copy it here. 713 */ 714 static uint32_t random_max_ncpus = 0; 715 716 /* 717 * Boot-time tunables, for experimentation. 718 */ 719 size_t rndmag_threshold = 2560; 720 size_t rndbuf_len = 5120; 721 size_t rndmag_size = 1280; 722 723 724 int 725 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 726 { 727 rndmag_pad_t *rmp; 728 uint8_t *cptr, *eptr; 729 730 /* 731 * Anyone who asks for zero bytes of randomness should get slapped. 732 */ 733 ASSERT(len > 0); 734 735 /* 736 * Fast path. 737 */ 738 for (;;) { 739 rmp = &rndmag[CPU->cpu_seqid]; 740 mutex_enter(&rmp->rm_mag.rm_lock); 741 742 /* 743 * Big requests bypass buffer and tail-call the 744 * generate routine directly. 745 */ 746 if (len > rndmag_threshold) { 747 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 748 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 749 } 750 751 cptr = rmp->rm_mag.rm_rptr; 752 eptr = cptr + len; 753 754 if (eptr <= rmp->rm_mag.rm_eptr) { 755 rmp->rm_mag.rm_rptr = eptr; 756 bcopy(cptr, ptr, len); 757 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 758 mutex_exit(&rmp->rm_mag.rm_lock); 759 760 return (0); 761 } 762 /* 763 * End fast path. 764 */ 765 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 766 /* 767 * Note: We assume the generate routine always succeeds 768 * in this case (because it does at present..) 769 * It also always releases rm_lock. 770 */ 771 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 772 rndbuf_len); 773 } 774 } 775 776 /* 777 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 778 * little memory on big systems that don't have the full set installed. 779 * See above; "empty" means "rptr equal to eptr"; this will trigger the 780 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 781 * 782 * TODO: make rndmag_size tunable at run time! 783 */ 784 static void 785 rnd_alloc_magazines() 786 { 787 rndmag_pad_t *rmp; 788 int i; 789 uint8_t discard_buf[HASHSIZE]; 790 791 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 792 if (rndmag_size < rndbuf_len) 793 rndmag_size = rndbuf_len; 794 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 795 796 random_max_ncpus = max_ncpus; 797 rndmag_total = rndmag_size * random_max_ncpus; 798 799 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 800 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 801 KM_SLEEP); 802 803 for (i = 0; i < random_max_ncpus; i++) { 804 uint8_t *buf; 805 806 rmp = &rndmag[i]; 807 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 808 809 buf = rndbuf + i * rndmag_size; 810 811 rmp->rm_mag.rm_buffer = buf; 812 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 813 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 814 rmp->rm_mag.rm_oblocks = 1; 815 816 mutex_enter(&rndpool_lock); 817 /* 818 * FIPS 140-2: the first n-bit (n > 15) block generated 819 * after power-up, initialization, or reset shall not 820 * be used, but shall be saved for comparison. 821 */ 822 (void) rnd_get_bytes(discard_buf, 823 HMAC_KEYSIZE, ALWAYS_EXTRACT); 824 bcopy(discard_buf, rmp->rm_mag.rm_previous, 825 HMAC_KEYSIZE); 826 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 827 mutex_enter(&rndpool_lock); 828 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 829 HMAC_KEYSIZE, ALWAYS_EXTRACT); 830 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 831 mutex_enter(&rndpool_lock); 832 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 833 HMAC_KEYSIZE, ALWAYS_EXTRACT); 834 } 835 } 836 837 static void 838 rnd_mechid(void *notused) 839 { 840 _NOTE(ARGUNUSED(notused)); 841 rngmech_type = crypto_mech2id(SUN_RANDOM); 842 } 843 844 void 845 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 846 { 847 clock_t ut; /* time in microseconds */ 848 849 if (do_mech2id) { 850 /* This should never fail due to TQ_SLEEP. */ 851 (void) taskq_dispatch(system_taskq, rnd_mechid, NULL, TQ_SLEEP); 852 } 853 854 /* 855 * The new timeout value is taken from the buffer of random bytes. 856 * We're merely reading the first 32 bits from the buffer here, not 857 * consuming any random bytes. 858 * The timeout multiplier value is a random value between 0.5 sec and 859 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 860 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 861 */ 862 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 863 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 864 TIMEOUT_INTERVAL * drv_usectohz(ut)); 865 } 866 867 /* 868 * Called from the driver for a poll on /dev/random 869 * . POLLOUT always succeeds. 870 * . POLLIN and POLLRDNORM will block until a 871 * minimum amount of entropy is available. 872 * 873 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 874 * will block. When enough random bytes are available, later, the timeout 875 * handler routine will issue the pollwakeup() calls. 876 */ 877 void 878 kcf_rnd_chpoll(short events, int anyyet, short *reventsp, 879 struct pollhead **phpp) 880 { 881 *reventsp = events & POLLOUT; 882 883 if (events & (POLLIN | POLLRDNORM)) { 884 /* 885 * Sampling of rnbyte_cnt is an atomic 886 * operation. Hence we do not need any locking. 887 */ 888 if (rnbyte_cnt >= MINEXTRACTBYTES) 889 *reventsp |= (events & (POLLIN | POLLRDNORM)); 890 } 891 892 if (*reventsp == 0 && !anyyet) 893 *phpp = &rnd_pollhead; 894 } 895 896 /*ARGSUSED*/ 897 static void 898 rnd_handler(void *arg) 899 { 900 int len = 0; 901 902 if (!rng_prov_found && rng_ok_to_log) { 903 cmn_err(CE_WARN, "No randomness provider enabled for " 904 "/dev/random. Use cryptoadm(1M) to enable a provider."); 905 rng_ok_to_log = B_FALSE; 906 } 907 908 if (num_waiters > 0) 909 /* 910 * Note: len has no relationship with how many bytes 911 * a poll thread needs. 912 */ 913 len = MAXEXTRACTBYTES; 914 else if (rnbyte_cnt < RNDPOOLSIZE) 915 len = MINEXTRACTBYTES; 916 917 /* 918 * Only one thread gets to set rngprov_task_idle at a given point 919 * of time and the order of the writes is defined. Also, it is OK 920 * if we read an older value of it and skip the dispatch once 921 * since we will get the correct value during the next time here. 922 * So, no locking is needed here. 923 */ 924 if (len > 0 && rngprov_task_idle) { 925 rngprov_task_idle = B_FALSE; 926 927 /* 928 * It is OK if taskq_dispatch fails here. We will retry 929 * the next time around. Meanwhile, a thread doing a 930 * read() will go to the provider directly, if the 931 * cache becomes empty. 932 */ 933 if (taskq_dispatch(system_taskq, rngprov_task, 934 (void *)(uintptr_t)len, TQ_NOSLEEP | TQ_NOQUEUE) == 0) { 935 rngprov_task_idle = B_TRUE; 936 } 937 } 938 939 mutex_enter(&rndpool_lock); 940 /* 941 * Wake up threads waiting in poll() or for enough accumulated 942 * random bytes to read from /dev/random. In case a poll() is 943 * concurrent with a read(), the polling process may be woken up 944 * indicating that enough randomness is now available for reading, 945 * and another process *steals* the bits from the pool, causing the 946 * subsequent read() from the first process to block. It is acceptable 947 * since the blocking will eventually end, after the timeout 948 * has expired enough times to honor the read. 949 * 950 * Note - Since we hold the rndpool_lock across the pollwakeup() call 951 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 952 */ 953 if (rnbyte_cnt >= MINEXTRACTBYTES) 954 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 955 956 if (num_waiters > 0) 957 cv_broadcast(&rndpool_read_cv); 958 mutex_exit(&rndpool_lock); 959 960 kcf_rnd_schedule_timeout(B_FALSE); 961 } 962 963 static void 964 rndc_addbytes(uint8_t *ptr, size_t len) 965 { 966 ASSERT(ptr != NULL && len > 0); 967 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 968 969 mutex_enter(&rndpool_lock); 970 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 971 rndpool[rindex] ^= *ptr; 972 ptr++; len--; 973 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 974 rnbyte_cnt++; 975 } 976 977 /* Handle buffer full case */ 978 while (len > 0) { 979 rndpool[rindex] ^= *ptr; 980 ptr++; len--; 981 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 982 } 983 mutex_exit(&rndpool_lock); 984 } 985 986 /* 987 * Caller should check len <= rnbyte_cnt under the 988 * rndpool_lock before calling. 989 */ 990 static void 991 rndc_getbytes(uint8_t *ptr, size_t len) 992 { 993 ASSERT(MUTEX_HELD(&rndpool_lock)); 994 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 995 996 BUMP_RND_STATS(rs_rndcOut, len); 997 998 while (len > 0) { 999 *ptr = rndpool[findex]; 1000 ptr++; len--; 1001 findex = (findex + 1) & (RNDPOOLSIZE - 1); 1002 rnbyte_cnt--; 1003 } 1004 } 1005 1006 /* Random number exported entry points */ 1007 1008 /* 1009 * Mix the supplied bytes into the entropy pool of a kCF 1010 * RNG provider. 1011 */ 1012 int 1013 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1014 { 1015 if (len < 1) 1016 return (-1); 1017 1018 rngprov_seed(ptr, len, entropy_est, 0); 1019 1020 return (0); 1021 } 1022 1023 /* 1024 * Mix the supplied bytes into the entropy pool of a kCF 1025 * RNG provider. Mix immediately. 1026 */ 1027 int 1028 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1029 { 1030 if (len < 1) 1031 return (-1); 1032 1033 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1034 1035 return (0); 1036 } 1037 1038 /* 1039 * Get bytes from the /dev/urandom generator. This function 1040 * always succeeds. Returns 0. 1041 */ 1042 int 1043 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1044 { 1045 ASSERT(!mutex_owned(&rndpool_lock)); 1046 1047 if (len < 1) 1048 return (0); 1049 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1050 } 1051 1052 /* 1053 * Get bytes from the /dev/random generator. Returns 0 1054 * on success. Returns EAGAIN if there is insufficient entropy. 1055 */ 1056 int 1057 random_get_bytes(uint8_t *ptr, size_t len) 1058 { 1059 ASSERT(!mutex_owned(&rndpool_lock)); 1060 1061 if (len < 1) 1062 return (0); 1063 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1064 } 1065