1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file implements the interfaces that the /dev/random 28 * driver uses for read(2), write(2) and poll(2) on /dev/random or 29 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 30 * random_add_pseudo_entropy(), random_get_pseudo_bytes() 31 * and random_get_bytes(). 32 * 33 * We periodically collect random bits from providers which are registered 34 * with the Kernel Cryptographic Framework (kCF) as capable of random 35 * number generation. The random bits are maintained in a cache and 36 * it is used for high quality random numbers (/dev/random) requests. 37 * We pick a provider and call its SPI routine, if the cache does not have 38 * enough bytes to satisfy a request. 39 * 40 * /dev/urandom requests use a software-based generator algorithm that uses the 41 * random bits in the cache as a seed. We create one pseudo-random generator 42 * (for /dev/urandom) per possible CPU on the system, and use it, 43 * kmem-magazine-style, to avoid cache line contention. 44 * 45 * LOCKING HIERARCHY: 46 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 47 * 2) rndpool_lock protects the high-quality randomness pool. 48 * It may be locked while a rmp->rm_mag.rm_lock is held. 49 * 50 * A history note: The kernel API and the software-based algorithms in this 51 * file used to be part of the /dev/random driver. 52 */ 53 54 #include <sys/types.h> 55 #include <sys/conf.h> 56 #include <sys/sunddi.h> 57 #include <sys/disp.h> 58 #include <sys/modctl.h> 59 #include <sys/ddi.h> 60 #include <sys/crypto/common.h> 61 #include <sys/crypto/api.h> 62 #include <sys/crypto/impl.h> 63 #include <sys/crypto/sched_impl.h> 64 #include <sys/crypto/ioctladmin.h> 65 #include <sys/random.h> 66 #include <sys/sha1.h> 67 #include <sys/time.h> 68 #include <sys/sysmacros.h> 69 #include <sys/cpuvar.h> 70 #include <sys/taskq.h> 71 #include <rng/fips_random.h> 72 73 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 74 #define MINEXTRACTBYTES 20 75 #define MAXEXTRACTBYTES 1024 76 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 77 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 78 79 typedef enum extract_type { 80 NONBLOCK_EXTRACT, 81 BLOCKING_EXTRACT, 82 ALWAYS_EXTRACT 83 } extract_type_t; 84 85 /* 86 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 87 * routines directly instead of using k-API because we can't return any 88 * error code in /dev/urandom case and we can get an error using k-API 89 * if a mechanism is disabled. 90 */ 91 #define HASHSIZE 20 92 #define HASH_CTX SHA1_CTX 93 #define HashInit(ctx) SHA1Init((ctx)) 94 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 95 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 96 97 /* HMAC-SHA1 */ 98 #define HMAC_KEYSIZE 20 99 100 /* 101 * Cache of random bytes implemented as a circular buffer. findex and rindex 102 * track the front and back of the circular buffer. 103 */ 104 uint8_t rndpool[RNDPOOLSIZE]; 105 static int findex, rindex; 106 static int rnbyte_cnt; /* Number of bytes in the cache */ 107 108 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 109 /* and the global variables */ 110 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 111 static int num_waiters; /* #threads waiting to read from /dev/random */ 112 113 static struct pollhead rnd_pollhead; 114 /* LINTED E_STATIC_UNUSED */ 115 static timeout_id_t kcf_rndtimeout_id; 116 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 117 rnd_stats_t rnd_stats; 118 static boolean_t rng_prov_found = B_TRUE; 119 static boolean_t rng_ok_to_log = B_TRUE; 120 static boolean_t rngprov_task_idle = B_TRUE; 121 122 static void rndc_addbytes(uint8_t *, size_t); 123 static void rndc_getbytes(uint8_t *ptr, size_t len); 124 static void rnd_handler(void *); 125 static void rnd_alloc_magazines(); 126 127 void 128 kcf_rnd_init() 129 { 130 hrtime_t ts; 131 time_t now; 132 133 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 134 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 135 136 /* 137 * Add bytes to the cache using 138 * . 2 unpredictable times: high resolution time since the boot-time, 139 * and the current time-of-the day. 140 * This is used only to make the timeout value in the timer 141 * unpredictable. 142 */ 143 ts = gethrtime(); 144 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 145 146 (void) drv_getparm(TIME, &now); 147 rndc_addbytes((uint8_t *)&now, sizeof (now)); 148 149 rnbyte_cnt = 0; 150 findex = rindex = 0; 151 num_waiters = 0; 152 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 153 154 rnd_alloc_magazines(); 155 } 156 157 /* 158 * Return TRUE if at least one provider exists that can 159 * supply random numbers. 160 */ 161 boolean_t 162 kcf_rngprov_check(void) 163 { 164 int rv; 165 kcf_provider_desc_t *pd; 166 167 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 168 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 169 KCF_PROV_REFRELE(pd); 170 /* 171 * We logged a warning once about no provider being available 172 * and now a provider became available. So, set the flag so 173 * that we can log again if the problem recurs. 174 */ 175 rng_ok_to_log = B_TRUE; 176 rng_prov_found = B_TRUE; 177 return (B_TRUE); 178 } else { 179 rng_prov_found = B_FALSE; 180 return (B_FALSE); 181 } 182 } 183 184 /* 185 * Pick a software-based provider and submit a request to seed 186 * its random number generator. 187 */ 188 static void 189 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 190 { 191 kcf_provider_desc_t *pd = NULL; 192 193 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 194 CRYPTO_SUCCESS) { 195 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 196 entropy_est, flags, NULL); 197 KCF_PROV_REFRELE(pd); 198 } 199 } 200 201 /* 202 * This routine is called for blocking reads. 203 * 204 * The argument is_taskq_thr indicates whether the caller is 205 * the taskq thread dispatched by the timeout handler routine. 206 * In this case, we cycle through all the providers 207 * submitting a request to each provider to generate random numbers. 208 * 209 * For other cases, we pick a provider and submit a request to generate 210 * random numbers. We retry using another provider if we get an error. 211 * 212 * Returns the number of bytes that are written to 'ptr'. Returns -1 213 * if no provider is found. ptr and need are unchanged. 214 */ 215 static int 216 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr) 217 { 218 int rv; 219 int prov_cnt = 0; 220 int total_bytes = 0; 221 kcf_provider_desc_t *pd; 222 kcf_req_params_t params; 223 kcf_prov_tried_t *list = NULL; 224 225 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 226 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 227 228 prov_cnt++; 229 230 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 231 pd->pd_sid, ptr, need, 0, 0); 232 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 233 ASSERT(rv != CRYPTO_QUEUED); 234 235 if (rv == CRYPTO_SUCCESS) { 236 total_bytes += need; 237 if (is_taskq_thr) 238 rndc_addbytes(ptr, need); 239 else { 240 KCF_PROV_REFRELE(pd); 241 break; 242 } 243 } 244 245 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 246 /* Add pd to the linked list of providers tried. */ 247 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 248 KCF_PROV_REFRELE(pd); 249 break; 250 } 251 } 252 253 } 254 255 if (list != NULL) 256 kcf_free_triedlist(list); 257 258 if (prov_cnt == 0) { /* no provider could be found. */ 259 rng_prov_found = B_FALSE; 260 return (-1); 261 } else { 262 rng_prov_found = B_TRUE; 263 /* See comments in kcf_rngprov_check() */ 264 rng_ok_to_log = B_TRUE; 265 } 266 267 return (total_bytes); 268 } 269 270 static void 271 notify_done(void *arg, int rv) 272 { 273 uchar_t *rndbuf = arg; 274 275 if (rv == CRYPTO_SUCCESS) 276 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 277 278 bzero(rndbuf, MINEXTRACTBYTES); 279 kmem_free(rndbuf, MINEXTRACTBYTES); 280 } 281 282 /* 283 * Cycle through all the providers submitting a request to each provider 284 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 285 * and ALWAYS_EXTRACT. 286 * 287 * Returns the number of bytes that are written to 'ptr'. Returns -1 288 * if no provider is found. ptr and len are unchanged. 289 */ 290 static int 291 rngprov_getbytes_nblk(uint8_t *ptr, size_t len) 292 { 293 int rv, total_bytes; 294 size_t blen; 295 uchar_t *rndbuf; 296 kcf_provider_desc_t *pd; 297 kcf_req_params_t params; 298 crypto_call_req_t req; 299 kcf_prov_tried_t *list = NULL; 300 int prov_cnt = 0; 301 302 blen = 0; 303 total_bytes = 0; 304 req.cr_flag = CRYPTO_SKIP_REQID; 305 req.cr_callback_func = notify_done; 306 307 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 308 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 309 310 prov_cnt ++; 311 switch (pd->pd_prov_type) { 312 case CRYPTO_HW_PROVIDER: 313 /* 314 * We have to allocate a buffer here as we can not 315 * assume that the input buffer will remain valid 316 * when the callback comes. We use a fixed size buffer 317 * to simplify the book keeping. 318 */ 319 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 320 if (rndbuf == NULL) { 321 KCF_PROV_REFRELE(pd); 322 if (list != NULL) 323 kcf_free_triedlist(list); 324 return (total_bytes); 325 } 326 req.cr_callback_arg = rndbuf; 327 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 328 KCF_OP_RANDOM_GENERATE, 329 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 330 break; 331 332 case CRYPTO_SW_PROVIDER: 333 /* 334 * We do not need to allocate a buffer in the software 335 * provider case as there is no callback involved. We 336 * avoid any extra data copy by directly passing 'ptr'. 337 */ 338 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 339 KCF_OP_RANDOM_GENERATE, 340 pd->pd_sid, ptr, len, 0, 0); 341 break; 342 } 343 344 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 345 if (rv == CRYPTO_SUCCESS) { 346 switch (pd->pd_prov_type) { 347 case CRYPTO_HW_PROVIDER: 348 /* 349 * Since we have the input buffer handy, 350 * we directly copy to it rather than 351 * adding to the pool. 352 */ 353 blen = min(MINEXTRACTBYTES, len); 354 bcopy(rndbuf, ptr, blen); 355 if (len < MINEXTRACTBYTES) 356 rndc_addbytes(rndbuf + len, 357 MINEXTRACTBYTES - len); 358 ptr += blen; 359 len -= blen; 360 total_bytes += blen; 361 break; 362 363 case CRYPTO_SW_PROVIDER: 364 total_bytes += len; 365 len = 0; 366 break; 367 } 368 } 369 370 /* 371 * We free the buffer in the callback routine 372 * for the CRYPTO_QUEUED case. 373 */ 374 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 375 rv != CRYPTO_QUEUED) { 376 bzero(rndbuf, MINEXTRACTBYTES); 377 kmem_free(rndbuf, MINEXTRACTBYTES); 378 } 379 380 if (len == 0) { 381 KCF_PROV_REFRELE(pd); 382 break; 383 } 384 385 if (rv != CRYPTO_SUCCESS) { 386 /* Add pd to the linked list of providers tried. */ 387 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 388 NULL) { 389 KCF_PROV_REFRELE(pd); 390 break; 391 } 392 } 393 } 394 395 if (list != NULL) { 396 kcf_free_triedlist(list); 397 } 398 399 if (prov_cnt == 0) { /* no provider could be found. */ 400 rng_prov_found = B_FALSE; 401 return (-1); 402 } else { 403 rng_prov_found = B_TRUE; 404 /* See comments in kcf_rngprov_check() */ 405 rng_ok_to_log = B_TRUE; 406 } 407 408 return (total_bytes); 409 } 410 411 static void 412 rngprov_task(void *arg) 413 { 414 int len = (int)(uintptr_t)arg; 415 uchar_t tbuf[MAXEXTRACTBYTES]; 416 417 ASSERT(len <= MAXEXTRACTBYTES); 418 (void) rngprov_getbytes(tbuf, len, B_TRUE); 419 rngprov_task_idle = B_TRUE; 420 } 421 422 /* 423 * Returns "len" random or pseudo-random bytes in *ptr. 424 * Will block if not enough random bytes are available and the 425 * call is blocking. 426 * 427 * Called with rndpool_lock held (allowing caller to do optimistic locking; 428 * releases the lock before return). 429 */ 430 static int 431 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how) 432 { 433 size_t bytes; 434 size_t got; 435 436 ASSERT(mutex_owned(&rndpool_lock)); 437 /* 438 * Check if the request can be satisfied from the cache 439 * of random bytes. 440 */ 441 if (len <= rnbyte_cnt) { 442 rndc_getbytes(ptr, len); 443 mutex_exit(&rndpool_lock); 444 return (0); 445 } 446 mutex_exit(&rndpool_lock); 447 448 switch (how) { 449 case BLOCKING_EXTRACT: 450 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1) 451 break; /* No provider found */ 452 453 if (got == len) 454 return (0); 455 len -= got; 456 ptr += got; 457 break; 458 459 case NONBLOCK_EXTRACT: 460 case ALWAYS_EXTRACT: 461 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) { 462 /* No provider found */ 463 if (how == NONBLOCK_EXTRACT) { 464 return (EAGAIN); 465 } 466 } else { 467 if (got == len) 468 return (0); 469 len -= got; 470 ptr += got; 471 } 472 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 473 return (EAGAIN); 474 break; 475 } 476 477 mutex_enter(&rndpool_lock); 478 while (len > 0) { 479 if (how == BLOCKING_EXTRACT) { 480 /* Check if there is enough */ 481 while (rnbyte_cnt < MINEXTRACTBYTES) { 482 num_waiters++; 483 if (cv_wait_sig(&rndpool_read_cv, 484 &rndpool_lock) == 0) { 485 num_waiters--; 486 mutex_exit(&rndpool_lock); 487 return (EINTR); 488 } 489 num_waiters--; 490 } 491 } 492 493 /* Figure out how many bytes to extract */ 494 bytes = min(len, rnbyte_cnt); 495 rndc_getbytes(ptr, bytes); 496 497 len -= bytes; 498 ptr += bytes; 499 500 if (len > 0 && how == ALWAYS_EXTRACT) { 501 /* 502 * There are not enough bytes, but we can not block. 503 * This only happens in the case of /dev/urandom which 504 * runs an additional generation algorithm. So, there 505 * is no problem. 506 */ 507 while (len > 0) { 508 *ptr = rndpool[findex]; 509 ptr++; len--; 510 rindex = findex = (findex + 1) & 511 (RNDPOOLSIZE - 1); 512 } 513 break; 514 } 515 } 516 517 mutex_exit(&rndpool_lock); 518 return (0); 519 } 520 521 int 522 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock) 523 { 524 extract_type_t how; 525 int error; 526 527 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 528 mutex_enter(&rndpool_lock); 529 if ((error = rnd_get_bytes(ptr, len, how)) != 0) 530 return (error); 531 532 BUMP_RND_STATS(rs_rndOut, len); 533 return (0); 534 } 535 536 /* 537 * Revisit this if the structs grow or we come up with a better way 538 * of cache-line-padding structures. 539 */ 540 #define RND_CPU_CACHE_SIZE 64 541 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 542 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 543 sizeof (rndmag_t)) 544 /* 545 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 546 * a per-CPU instance of the pseudo-random generator. We have it much easier 547 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 548 * 549 * Note that this usage is preemption-safe; a thread 550 * entering a critical section remembers which generator it locked 551 * and unlocks the same one; should it be preempted and wind up running on 552 * a different CPU, there will be a brief period of increased contention 553 * before it exits the critical section but nothing will melt. 554 */ 555 typedef struct rndmag_s 556 { 557 kmutex_t rm_lock; 558 uint8_t *rm_buffer; /* Start of buffer */ 559 uint8_t *rm_eptr; /* End of buffer */ 560 uint8_t *rm_rptr; /* Current read pointer */ 561 uint32_t rm_oblocks; /* time to rekey? */ 562 uint32_t rm_ofuzz; /* Rekey backoff state */ 563 uint32_t rm_olimit; /* Hard rekey limit */ 564 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 565 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */ 566 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */ 567 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */ 568 } rndmag_t; 569 570 typedef struct rndmag_pad_s 571 { 572 rndmag_t rm_mag; 573 uint8_t rm_pad[RND_CPU_PAD]; 574 } rndmag_pad_t; 575 576 /* 577 * Generate random bytes for /dev/urandom by applying the 578 * FIPS 186-2 algorithm with a key created from bytes extracted 579 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 580 * is generated before a new key is obtained. 581 * 582 * Note that callers to this routine are likely to assume it can't fail. 583 * 584 * Called with rmp locked; releases lock. 585 */ 586 static int 587 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 588 { 589 size_t bytes = len, size; 590 int nblock; 591 uint32_t oblocks; 592 uint32_t tempout[HASHSIZE/BYTES_IN_WORD]; 593 uint32_t seed[HASHSIZE/BYTES_IN_WORD]; 594 int i; 595 hrtime_t timestamp; 596 uint8_t *src, *dst; 597 598 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 599 600 /* Nothing is being asked */ 601 if (len == 0) { 602 mutex_exit(&rmp->rm_mag.rm_lock); 603 return (0); 604 } 605 606 nblock = howmany(len, HASHSIZE); 607 608 rmp->rm_mag.rm_oblocks += nblock; 609 oblocks = rmp->rm_mag.rm_oblocks; 610 611 do { 612 if (oblocks >= rmp->rm_mag.rm_olimit) { 613 614 /* 615 * Contention-avoiding rekey: see if 616 * the pool is locked, and if so, wait a bit. 617 * Do an 'exponential back-in' to ensure we don't 618 * run too long without rekey. 619 */ 620 if (rmp->rm_mag.rm_ofuzz) { 621 /* 622 * Decaying exponential back-in for rekey. 623 */ 624 if ((rnbyte_cnt < MINEXTRACTBYTES) || 625 (!mutex_tryenter(&rndpool_lock))) { 626 rmp->rm_mag.rm_olimit += 627 rmp->rm_mag.rm_ofuzz; 628 rmp->rm_mag.rm_ofuzz >>= 1; 629 goto punt; 630 } 631 } else { 632 mutex_enter(&rndpool_lock); 633 } 634 635 /* Get a new chunk of entropy */ 636 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 637 HMAC_KEYSIZE, ALWAYS_EXTRACT); 638 639 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 640 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 641 oblocks = 0; 642 rmp->rm_mag.rm_oblocks = nblock; 643 } 644 punt: 645 timestamp = gethrtime(); 646 647 src = (uint8_t *)×tamp; 648 dst = (uint8_t *)rmp->rm_mag.rm_seed; 649 650 for (i = 0; i < HASHSIZE; i++) { 651 dst[i] ^= src[i % sizeof (timestamp)]; 652 } 653 654 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 655 656 fips_random_inner(rmp->rm_mag.rm_key, tempout, 657 seed); 658 659 if (bytes >= HASHSIZE) { 660 size = HASHSIZE; 661 } else { 662 size = min(bytes, HASHSIZE); 663 } 664 665 /* 666 * FIPS 140-2: Continuous RNG test - each generation 667 * of an n-bit block shall be compared with the previously 668 * generated block. Test shall fail if any two compared 669 * n-bit blocks are equal. 670 */ 671 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) { 672 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 673 break; 674 } 675 if (i == HASHSIZE/BYTES_IN_WORD) { 676 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 677 "block random bytes are same as the previous " 678 "one.\n"); 679 /* discard random bytes and return error */ 680 return (EIO); 681 } 682 683 bcopy(tempout, rmp->rm_mag.rm_previous, 684 HASHSIZE); 685 686 bcopy(tempout, ptr, size); 687 ptr += size; 688 bytes -= size; 689 oblocks++; 690 nblock--; 691 } while (bytes > 0); 692 693 /* Zero out sensitive information */ 694 bzero(seed, HASHSIZE); 695 bzero(tempout, HASHSIZE); 696 mutex_exit(&rmp->rm_mag.rm_lock); 697 return (0); 698 } 699 700 /* 701 * Per-CPU Random magazines. 702 */ 703 static rndmag_pad_t *rndmag; 704 static uint8_t *rndbuf; 705 static size_t rndmag_total; 706 /* 707 * common/os/cpu.c says that platform support code can shrinkwrap 708 * max_ncpus. On the off chance that we get loaded very early, we 709 * read it exactly once, to copy it here. 710 */ 711 static uint32_t random_max_ncpus = 0; 712 713 /* 714 * Boot-time tunables, for experimentation. 715 */ 716 size_t rndmag_threshold = 2560; 717 size_t rndbuf_len = 5120; 718 size_t rndmag_size = 1280; 719 720 721 int 722 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 723 { 724 rndmag_pad_t *rmp; 725 uint8_t *cptr, *eptr; 726 727 /* 728 * Anyone who asks for zero bytes of randomness should get slapped. 729 */ 730 ASSERT(len > 0); 731 732 /* 733 * Fast path. 734 */ 735 for (;;) { 736 rmp = &rndmag[CPU->cpu_seqid]; 737 mutex_enter(&rmp->rm_mag.rm_lock); 738 739 /* 740 * Big requests bypass buffer and tail-call the 741 * generate routine directly. 742 */ 743 if (len > rndmag_threshold) { 744 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 745 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 746 } 747 748 cptr = rmp->rm_mag.rm_rptr; 749 eptr = cptr + len; 750 751 if (eptr <= rmp->rm_mag.rm_eptr) { 752 rmp->rm_mag.rm_rptr = eptr; 753 bcopy(cptr, ptr, len); 754 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 755 mutex_exit(&rmp->rm_mag.rm_lock); 756 757 return (0); 758 } 759 /* 760 * End fast path. 761 */ 762 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 763 /* 764 * Note: We assume the generate routine always succeeds 765 * in this case (because it does at present..) 766 * It also always releases rm_lock. 767 */ 768 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 769 rndbuf_len); 770 } 771 } 772 773 /* 774 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 775 * little memory on big systems that don't have the full set installed. 776 * See above; "empty" means "rptr equal to eptr"; this will trigger the 777 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 778 * 779 * TODO: make rndmag_size tunable at run time! 780 */ 781 static void 782 rnd_alloc_magazines() 783 { 784 rndmag_pad_t *rmp; 785 int i; 786 uint8_t discard_buf[HASHSIZE]; 787 788 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 789 if (rndmag_size < rndbuf_len) 790 rndmag_size = rndbuf_len; 791 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 792 793 random_max_ncpus = max_ncpus; 794 rndmag_total = rndmag_size * random_max_ncpus; 795 796 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 797 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 798 KM_SLEEP); 799 800 for (i = 0; i < random_max_ncpus; i++) { 801 uint8_t *buf; 802 803 rmp = &rndmag[i]; 804 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 805 806 buf = rndbuf + i * rndmag_size; 807 808 rmp->rm_mag.rm_buffer = buf; 809 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 810 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 811 rmp->rm_mag.rm_oblocks = 1; 812 813 mutex_enter(&rndpool_lock); 814 /* 815 * FIPS 140-2: the first n-bit (n > 15) block generated 816 * after power-up, initialization, or reset shall not 817 * be used, but shall be saved for comparison. 818 */ 819 (void) rnd_get_bytes(discard_buf, 820 HMAC_KEYSIZE, ALWAYS_EXTRACT); 821 bcopy(discard_buf, rmp->rm_mag.rm_previous, 822 HMAC_KEYSIZE); 823 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 824 mutex_enter(&rndpool_lock); 825 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 826 HMAC_KEYSIZE, ALWAYS_EXTRACT); 827 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 828 mutex_enter(&rndpool_lock); 829 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 830 HMAC_KEYSIZE, ALWAYS_EXTRACT); 831 } 832 } 833 834 void 835 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 836 { 837 clock_t ut; /* time in microseconds */ 838 839 if (do_mech2id) 840 rngmech_type = crypto_mech2id(SUN_RANDOM); 841 842 /* 843 * The new timeout value is taken from the buffer of random bytes. 844 * We're merely reading the first 32 bits from the buffer here, not 845 * consuming any random bytes. 846 * The timeout multiplier value is a random value between 0.5 sec and 847 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 848 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 849 */ 850 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 851 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 852 TIMEOUT_INTERVAL * drv_usectohz(ut)); 853 } 854 855 /* 856 * Called from the driver for a poll on /dev/random 857 * . POLLOUT always succeeds. 858 * . POLLIN and POLLRDNORM will block until a 859 * minimum amount of entropy is available. 860 * 861 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 862 * will block. When enough random bytes are available, later, the timeout 863 * handler routine will issue the pollwakeup() calls. 864 */ 865 void 866 kcf_rnd_chpoll(short events, int anyyet, short *reventsp, 867 struct pollhead **phpp) 868 { 869 *reventsp = events & POLLOUT; 870 871 if (events & (POLLIN | POLLRDNORM)) { 872 /* 873 * Sampling of rnbyte_cnt is an atomic 874 * operation. Hence we do not need any locking. 875 */ 876 if (rnbyte_cnt >= MINEXTRACTBYTES) 877 *reventsp |= (events & (POLLIN | POLLRDNORM)); 878 } 879 880 if (*reventsp == 0 && !anyyet) 881 *phpp = &rnd_pollhead; 882 } 883 884 /*ARGSUSED*/ 885 static void 886 rnd_handler(void *arg) 887 { 888 int len = 0; 889 890 if (!rng_prov_found && rng_ok_to_log) { 891 cmn_err(CE_WARN, "No randomness provider enabled for " 892 "/dev/random. Use cryptoadm(1M) to enable a provider."); 893 rng_ok_to_log = B_FALSE; 894 } 895 896 if (num_waiters > 0) 897 /* 898 * Note: len has no relationship with how many bytes 899 * a poll thread needs. 900 */ 901 len = MAXEXTRACTBYTES; 902 else if (rnbyte_cnt < RNDPOOLSIZE) 903 len = MINEXTRACTBYTES; 904 905 /* 906 * Only one thread gets to set rngprov_task_idle at a given point 907 * of time and the order of the writes is defined. Also, it is OK 908 * if we read an older value of it and skip the dispatch once 909 * since we will get the correct value during the next time here. 910 * So, no locking is needed here. 911 */ 912 if (len > 0 && rngprov_task_idle) { 913 rngprov_task_idle = B_FALSE; 914 915 /* 916 * It is OK if taskq_dispatch fails here. We will retry 917 * the next time around. Meanwhile, a thread doing a 918 * read() will go to the provider directly, if the 919 * cache becomes empty. 920 */ 921 if (taskq_dispatch(system_taskq, rngprov_task, 922 (void *)(uintptr_t)len, TQ_NOSLEEP | TQ_NOQUEUE) == 0) { 923 rngprov_task_idle = B_TRUE; 924 } 925 } 926 927 mutex_enter(&rndpool_lock); 928 /* 929 * Wake up threads waiting in poll() or for enough accumulated 930 * random bytes to read from /dev/random. In case a poll() is 931 * concurrent with a read(), the polling process may be woken up 932 * indicating that enough randomness is now available for reading, 933 * and another process *steals* the bits from the pool, causing the 934 * subsequent read() from the first process to block. It is acceptable 935 * since the blocking will eventually end, after the timeout 936 * has expired enough times to honor the read. 937 * 938 * Note - Since we hold the rndpool_lock across the pollwakeup() call 939 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 940 */ 941 if (rnbyte_cnt >= MINEXTRACTBYTES) 942 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 943 944 if (num_waiters > 0) 945 cv_broadcast(&rndpool_read_cv); 946 mutex_exit(&rndpool_lock); 947 948 kcf_rnd_schedule_timeout(B_FALSE); 949 } 950 951 static void 952 rndc_addbytes(uint8_t *ptr, size_t len) 953 { 954 ASSERT(ptr != NULL && len > 0); 955 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 956 957 mutex_enter(&rndpool_lock); 958 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 959 rndpool[rindex] ^= *ptr; 960 ptr++; len--; 961 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 962 rnbyte_cnt++; 963 } 964 965 /* Handle buffer full case */ 966 while (len > 0) { 967 rndpool[rindex] ^= *ptr; 968 ptr++; len--; 969 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 970 } 971 mutex_exit(&rndpool_lock); 972 } 973 974 /* 975 * Caller should check len <= rnbyte_cnt under the 976 * rndpool_lock before calling. 977 */ 978 static void 979 rndc_getbytes(uint8_t *ptr, size_t len) 980 { 981 ASSERT(MUTEX_HELD(&rndpool_lock)); 982 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 983 984 BUMP_RND_STATS(rs_rndcOut, len); 985 986 while (len > 0) { 987 *ptr = rndpool[findex]; 988 ptr++; len--; 989 findex = (findex + 1) & (RNDPOOLSIZE - 1); 990 rnbyte_cnt--; 991 } 992 } 993 994 /* Random number exported entry points */ 995 996 /* 997 * Mix the supplied bytes into the entropy pool of a kCF 998 * RNG provider. 999 */ 1000 int 1001 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1002 { 1003 if (len < 1) 1004 return (-1); 1005 1006 rngprov_seed(ptr, len, entropy_est, 0); 1007 1008 return (0); 1009 } 1010 1011 /* 1012 * Mix the supplied bytes into the entropy pool of a kCF 1013 * RNG provider. Mix immediately. 1014 */ 1015 int 1016 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1017 { 1018 if (len < 1) 1019 return (-1); 1020 1021 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1022 1023 return (0); 1024 } 1025 1026 /* 1027 * Get bytes from the /dev/urandom generator. This function 1028 * always succeeds. Returns 0. 1029 */ 1030 int 1031 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1032 { 1033 ASSERT(!mutex_owned(&rndpool_lock)); 1034 1035 if (len < 1) 1036 return (0); 1037 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1038 } 1039 1040 /* 1041 * Get bytes from the /dev/random generator. Returns 0 1042 * on success. Returns EAGAIN if there is insufficient entropy. 1043 */ 1044 int 1045 random_get_bytes(uint8_t *ptr, size_t len) 1046 { 1047 ASSERT(!mutex_owned(&rndpool_lock)); 1048 1049 if (len < 1) 1050 return (0); 1051 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1052 } 1053 1054 /* 1055 * The two functions below are identical to random_get_pseudo_bytes() and 1056 * random_get_bytes_fips, this function is called for consumers that want 1057 * FIPS 140-2. This function waits until the FIPS boundary can be verified. 1058 */ 1059 1060 /* 1061 * Get bytes from the /dev/urandom generator. This function 1062 * always succeeds. Returns 0. 1063 */ 1064 int 1065 random_get_pseudo_bytes_fips140(uint8_t *ptr, size_t len) 1066 { 1067 ASSERT(!mutex_owned(&rndpool_lock)); 1068 1069 mutex_enter(&fips140_mode_lock); 1070 while (global_fips140_mode < FIPS140_MODE_ENABLED) { 1071 cv_wait(&cv_fips140, &fips140_mode_lock); 1072 } 1073 mutex_exit(&fips140_mode_lock); 1074 1075 if (len < 1) 1076 return (0); 1077 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1078 } 1079 1080 /* 1081 * Get bytes from the /dev/random generator. Returns 0 1082 * on success. Returns EAGAIN if there is insufficient entropy. 1083 */ 1084 int 1085 random_get_bytes_fips140(uint8_t *ptr, size_t len) 1086 { 1087 ASSERT(!mutex_owned(&rndpool_lock)); 1088 1089 mutex_enter(&fips140_mode_lock); 1090 while (global_fips140_mode < FIPS140_MODE_ENABLED) { 1091 cv_wait(&cv_fips140, &fips140_mode_lock); 1092 } 1093 mutex_exit(&fips140_mode_lock); 1094 1095 if (len < 1) 1096 return (0); 1097 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1098 } 1099