1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 24 */ 25 26 /* 27 * This file implements the interfaces that the /dev/random 28 * driver uses for read(2), write(2) and poll(2) on /dev/random or 29 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 30 * random_add_pseudo_entropy(), random_get_pseudo_bytes() 31 * and random_get_bytes(). 32 * 33 * We periodically collect random bits from providers which are registered 34 * with the Kernel Cryptographic Framework (kCF) as capable of random 35 * number generation. The random bits are maintained in a cache and 36 * it is used for high quality random numbers (/dev/random) requests. 37 * We pick a provider and call its SPI routine, if the cache does not have 38 * enough bytes to satisfy a request. 39 * 40 * /dev/urandom requests use a software-based generator algorithm that uses the 41 * random bits in the cache as a seed. We create one pseudo-random generator 42 * (for /dev/urandom) per possible CPU on the system, and use it, 43 * kmem-magazine-style, to avoid cache line contention. 44 * 45 * LOCKING HIERARCHY: 46 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators. 47 * 2) rndpool_lock protects the high-quality randomness pool. 48 * It may be locked while a rmp->rm_mag.rm_lock is held. 49 * 50 * A history note: The kernel API and the software-based algorithms in this 51 * file used to be part of the /dev/random driver. 52 */ 53 54 #include <sys/types.h> 55 #include <sys/conf.h> 56 #include <sys/sunddi.h> 57 #include <sys/disp.h> 58 #include <sys/modctl.h> 59 #include <sys/ddi.h> 60 #include <sys/crypto/common.h> 61 #include <sys/crypto/api.h> 62 #include <sys/crypto/impl.h> 63 #include <sys/crypto/sched_impl.h> 64 #include <sys/crypto/ioctladmin.h> 65 #include <sys/random.h> 66 #include <sys/sha1.h> 67 #include <sys/time.h> 68 #include <sys/sysmacros.h> 69 #include <sys/cpuvar.h> 70 #include <sys/taskq.h> 71 #include <rng/fips_random.h> 72 73 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 74 #define MINEXTRACTBYTES 20 75 #define MAXEXTRACTBYTES 1024 76 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 77 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 78 79 typedef enum extract_type { 80 NONBLOCK_EXTRACT, 81 BLOCKING_EXTRACT, 82 ALWAYS_EXTRACT 83 } extract_type_t; 84 85 /* 86 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 87 * routines directly instead of using k-API because we can't return any 88 * error code in /dev/urandom case and we can get an error using k-API 89 * if a mechanism is disabled. 90 */ 91 #define HASHSIZE 20 92 #define HASH_CTX SHA1_CTX 93 #define HashInit(ctx) SHA1Init((ctx)) 94 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 95 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 96 97 /* HMAC-SHA1 */ 98 #define HMAC_KEYSIZE 20 99 100 /* 101 * Cache of random bytes implemented as a circular buffer. findex and rindex 102 * track the front and back of the circular buffer. 103 */ 104 uint8_t rndpool[RNDPOOLSIZE]; 105 static int findex, rindex; 106 static int rnbyte_cnt; /* Number of bytes in the cache */ 107 108 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 109 /* and the global variables */ 110 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 111 static int num_waiters; /* #threads waiting to read from /dev/random */ 112 113 static struct pollhead rnd_pollhead; 114 /* LINTED E_STATIC_UNUSED */ 115 static timeout_id_t kcf_rndtimeout_id; 116 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 117 rnd_stats_t rnd_stats; 118 static boolean_t rng_prov_found = B_TRUE; 119 static boolean_t rng_ok_to_log = B_TRUE; 120 static boolean_t rngprov_task_idle = B_TRUE; 121 122 static void rndc_addbytes(uint8_t *, size_t); 123 static void rndc_getbytes(uint8_t *ptr, size_t len); 124 static void rnd_handler(void *); 125 static void rnd_alloc_magazines(void); 126 static void rnd_fips_discard_initial(void); 127 static void rnd_init2(void *); 128 static void rnd_schedule_timeout(void); 129 130 /* 131 * Called from kcf:_init() 132 */ 133 void 134 kcf_rnd_init() 135 { 136 hrtime_t ts; 137 time_t now; 138 139 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 140 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 141 142 /* 143 * Add bytes to the cache using 144 * . 2 unpredictable times: high resolution time since the boot-time, 145 * and the current time-of-the day. 146 * This is used only to make the timeout value in the timer 147 * unpredictable. 148 */ 149 ts = gethrtime(); 150 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 151 152 (void) drv_getparm(TIME, &now); 153 rndc_addbytes((uint8_t *)&now, sizeof (now)); 154 155 rnbyte_cnt = 0; 156 findex = rindex = 0; 157 num_waiters = 0; 158 159 rnd_alloc_magazines(); 160 161 (void) taskq_dispatch(system_taskq, rnd_init2, NULL, TQ_SLEEP); 162 } 163 164 /* 165 * This is called via the system taskq, so that we can do further 166 * initializations that have to wait until the kcf module itself is 167 * done loading. (After kcf:_init returns.) 168 */ 169 static void 170 rnd_init2(void *unused) 171 { 172 173 _NOTE(ARGUNUSED(unused)); 174 175 /* 176 * This will load a randomness provider; typically "swrand", 177 * but could be another provider if so configured. 178 */ 179 rngmech_type = crypto_mech2id(SUN_RANDOM); 180 181 /* Update rng_prov_found etc. */ 182 (void) kcf_rngprov_check(); 183 184 /* FIPS 140-2 init. */ 185 rnd_fips_discard_initial(); 186 187 /* Start rnd_handler calls. */ 188 rnd_schedule_timeout(); 189 } 190 191 /* 192 * Return TRUE if at least one provider exists that can 193 * supply random numbers. 194 */ 195 boolean_t 196 kcf_rngprov_check(void) 197 { 198 int rv; 199 kcf_provider_desc_t *pd; 200 201 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 202 NULL, CRYPTO_FG_RANDOM, 0)) != NULL) { 203 KCF_PROV_REFRELE(pd); 204 /* 205 * We logged a warning once about no provider being available 206 * and now a provider became available. So, set the flag so 207 * that we can log again if the problem recurs. 208 */ 209 rng_ok_to_log = B_TRUE; 210 rng_prov_found = B_TRUE; 211 return (B_TRUE); 212 } else { 213 rng_prov_found = B_FALSE; 214 return (B_FALSE); 215 } 216 } 217 218 /* 219 * Pick a software-based provider and submit a request to seed 220 * its random number generator. 221 */ 222 static void 223 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 224 { 225 kcf_provider_desc_t *pd = NULL; 226 227 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 228 CRYPTO_SUCCESS) { 229 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 230 entropy_est, flags, NULL); 231 KCF_PROV_REFRELE(pd); 232 } 233 } 234 235 /* 236 * This routine is called for blocking reads. 237 * 238 * The argument is_taskq_thr indicates whether the caller is 239 * the taskq thread dispatched by the timeout handler routine. 240 * In this case, we cycle through all the providers 241 * submitting a request to each provider to generate random numbers. 242 * 243 * For other cases, we pick a provider and submit a request to generate 244 * random numbers. We retry using another provider if we get an error. 245 * 246 * Returns the number of bytes that are written to 'ptr'. Returns -1 247 * if no provider is found. ptr and need are unchanged. 248 */ 249 static int 250 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr) 251 { 252 int rv; 253 int prov_cnt = 0; 254 int total_bytes = 0; 255 kcf_provider_desc_t *pd; 256 kcf_req_params_t params; 257 kcf_prov_tried_t *list = NULL; 258 259 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 260 list, CRYPTO_FG_RANDOM, 0)) != NULL) { 261 262 prov_cnt++; 263 264 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 265 pd->pd_sid, ptr, need, 0, 0); 266 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 267 ASSERT(rv != CRYPTO_QUEUED); 268 269 if (rv == CRYPTO_SUCCESS) { 270 total_bytes += need; 271 if (is_taskq_thr) 272 rndc_addbytes(ptr, need); 273 else { 274 KCF_PROV_REFRELE(pd); 275 break; 276 } 277 } 278 279 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 280 /* Add pd to the linked list of providers tried. */ 281 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 282 KCF_PROV_REFRELE(pd); 283 break; 284 } 285 } 286 287 } 288 289 if (list != NULL) 290 kcf_free_triedlist(list); 291 292 if (prov_cnt == 0) { /* no provider could be found. */ 293 rng_prov_found = B_FALSE; 294 return (-1); 295 } else { 296 rng_prov_found = B_TRUE; 297 /* See comments in kcf_rngprov_check() */ 298 rng_ok_to_log = B_TRUE; 299 } 300 301 return (total_bytes); 302 } 303 304 static void 305 notify_done(void *arg, int rv) 306 { 307 uchar_t *rndbuf = arg; 308 309 if (rv == CRYPTO_SUCCESS) 310 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 311 312 bzero(rndbuf, MINEXTRACTBYTES); 313 kmem_free(rndbuf, MINEXTRACTBYTES); 314 } 315 316 /* 317 * Cycle through all the providers submitting a request to each provider 318 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 319 * and ALWAYS_EXTRACT. 320 * 321 * Returns the number of bytes that are written to 'ptr'. Returns -1 322 * if no provider is found. ptr and len are unchanged. 323 */ 324 static int 325 rngprov_getbytes_nblk(uint8_t *ptr, size_t len) 326 { 327 int rv, total_bytes; 328 size_t blen; 329 uchar_t *rndbuf; 330 kcf_provider_desc_t *pd; 331 kcf_req_params_t params; 332 crypto_call_req_t req; 333 kcf_prov_tried_t *list = NULL; 334 int prov_cnt = 0; 335 336 blen = 0; 337 total_bytes = 0; 338 req.cr_flag = CRYPTO_SKIP_REQID; 339 req.cr_callback_func = notify_done; 340 341 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, 342 list, CRYPTO_FG_RANDOM, 0)) != NULL) { 343 344 prov_cnt ++; 345 switch (pd->pd_prov_type) { 346 case CRYPTO_HW_PROVIDER: 347 /* 348 * We have to allocate a buffer here as we can not 349 * assume that the input buffer will remain valid 350 * when the callback comes. We use a fixed size buffer 351 * to simplify the book keeping. 352 */ 353 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 354 if (rndbuf == NULL) { 355 KCF_PROV_REFRELE(pd); 356 if (list != NULL) 357 kcf_free_triedlist(list); 358 return (total_bytes); 359 } 360 req.cr_callback_arg = rndbuf; 361 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 362 KCF_OP_RANDOM_GENERATE, 363 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 364 break; 365 366 case CRYPTO_SW_PROVIDER: 367 /* 368 * We do not need to allocate a buffer in the software 369 * provider case as there is no callback involved. We 370 * avoid any extra data copy by directly passing 'ptr'. 371 */ 372 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 373 KCF_OP_RANDOM_GENERATE, 374 pd->pd_sid, ptr, len, 0, 0); 375 break; 376 } 377 378 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 379 if (rv == CRYPTO_SUCCESS) { 380 switch (pd->pd_prov_type) { 381 case CRYPTO_HW_PROVIDER: 382 /* 383 * Since we have the input buffer handy, 384 * we directly copy to it rather than 385 * adding to the pool. 386 */ 387 blen = min(MINEXTRACTBYTES, len); 388 bcopy(rndbuf, ptr, blen); 389 if (len < MINEXTRACTBYTES) 390 rndc_addbytes(rndbuf + len, 391 MINEXTRACTBYTES - len); 392 ptr += blen; 393 len -= blen; 394 total_bytes += blen; 395 break; 396 397 case CRYPTO_SW_PROVIDER: 398 total_bytes += len; 399 len = 0; 400 break; 401 } 402 } 403 404 /* 405 * We free the buffer in the callback routine 406 * for the CRYPTO_QUEUED case. 407 */ 408 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 409 rv != CRYPTO_QUEUED) { 410 bzero(rndbuf, MINEXTRACTBYTES); 411 kmem_free(rndbuf, MINEXTRACTBYTES); 412 } 413 414 if (len == 0) { 415 KCF_PROV_REFRELE(pd); 416 break; 417 } 418 419 if (rv != CRYPTO_SUCCESS) { 420 /* Add pd to the linked list of providers tried. */ 421 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 422 NULL) { 423 KCF_PROV_REFRELE(pd); 424 break; 425 } 426 } 427 } 428 429 if (list != NULL) { 430 kcf_free_triedlist(list); 431 } 432 433 if (prov_cnt == 0) { /* no provider could be found. */ 434 rng_prov_found = B_FALSE; 435 return (-1); 436 } else { 437 rng_prov_found = B_TRUE; 438 /* See comments in kcf_rngprov_check() */ 439 rng_ok_to_log = B_TRUE; 440 } 441 442 return (total_bytes); 443 } 444 445 static void 446 rngprov_task(void *arg) 447 { 448 int len = (int)(uintptr_t)arg; 449 uchar_t tbuf[MAXEXTRACTBYTES]; 450 451 ASSERT(len <= MAXEXTRACTBYTES); 452 (void) rngprov_getbytes(tbuf, len, B_TRUE); 453 rngprov_task_idle = B_TRUE; 454 } 455 456 /* 457 * Returns "len" random or pseudo-random bytes in *ptr. 458 * Will block if not enough random bytes are available and the 459 * call is blocking. 460 * 461 * Called with rndpool_lock held (allowing caller to do optimistic locking; 462 * releases the lock before return). 463 */ 464 static int 465 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how) 466 { 467 size_t bytes; 468 int got; 469 470 ASSERT(mutex_owned(&rndpool_lock)); 471 /* 472 * Check if the request can be satisfied from the cache 473 * of random bytes. 474 */ 475 if (len <= rnbyte_cnt) { 476 rndc_getbytes(ptr, len); 477 mutex_exit(&rndpool_lock); 478 return (0); 479 } 480 mutex_exit(&rndpool_lock); 481 482 switch (how) { 483 case BLOCKING_EXTRACT: 484 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1) 485 break; /* No provider found */ 486 487 if (got == len) 488 return (0); 489 len -= got; 490 ptr += got; 491 break; 492 493 case NONBLOCK_EXTRACT: 494 case ALWAYS_EXTRACT: 495 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) { 496 /* No provider found */ 497 if (how == NONBLOCK_EXTRACT) { 498 return (EAGAIN); 499 } 500 } else { 501 if (got == len) 502 return (0); 503 len -= got; 504 ptr += got; 505 } 506 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 507 return (EAGAIN); 508 break; 509 } 510 511 mutex_enter(&rndpool_lock); 512 while (len > 0) { 513 if (how == BLOCKING_EXTRACT) { 514 /* Check if there is enough */ 515 while (rnbyte_cnt < MINEXTRACTBYTES) { 516 num_waiters++; 517 if (cv_wait_sig(&rndpool_read_cv, 518 &rndpool_lock) == 0) { 519 num_waiters--; 520 mutex_exit(&rndpool_lock); 521 return (EINTR); 522 } 523 num_waiters--; 524 } 525 } 526 527 /* Figure out how many bytes to extract */ 528 bytes = min(len, rnbyte_cnt); 529 rndc_getbytes(ptr, bytes); 530 531 len -= bytes; 532 ptr += bytes; 533 534 if (len > 0 && how == ALWAYS_EXTRACT) { 535 /* 536 * There are not enough bytes, but we can not block. 537 * This only happens in the case of /dev/urandom which 538 * runs an additional generation algorithm. So, there 539 * is no problem. 540 */ 541 while (len > 0) { 542 *ptr = rndpool[findex]; 543 ptr++; len--; 544 rindex = findex = (findex + 1) & 545 (RNDPOOLSIZE - 1); 546 } 547 break; 548 } 549 } 550 551 mutex_exit(&rndpool_lock); 552 return (0); 553 } 554 555 int 556 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock) 557 { 558 extract_type_t how; 559 int error; 560 561 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 562 mutex_enter(&rndpool_lock); 563 if ((error = rnd_get_bytes(ptr, len, how)) != 0) 564 return (error); 565 566 BUMP_RND_STATS(rs_rndOut, len); 567 return (0); 568 } 569 570 /* 571 * Revisit this if the structs grow or we come up with a better way 572 * of cache-line-padding structures. 573 */ 574 #define RND_CPU_CACHE_SIZE 64 575 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6 576 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 577 sizeof (rndmag_t)) 578 /* 579 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 580 * a per-CPU instance of the pseudo-random generator. We have it much easier 581 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 582 * 583 * Note that this usage is preemption-safe; a thread 584 * entering a critical section remembers which generator it locked 585 * and unlocks the same one; should it be preempted and wind up running on 586 * a different CPU, there will be a brief period of increased contention 587 * before it exits the critical section but nothing will melt. 588 */ 589 typedef struct rndmag_s 590 { 591 kmutex_t rm_lock; 592 uint8_t *rm_buffer; /* Start of buffer */ 593 uint8_t *rm_eptr; /* End of buffer */ 594 uint8_t *rm_rptr; /* Current read pointer */ 595 uint32_t rm_oblocks; /* time to rekey? */ 596 uint32_t rm_ofuzz; /* Rekey backoff state */ 597 uint32_t rm_olimit; /* Hard rekey limit */ 598 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 599 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */ 600 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */ 601 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */ 602 } rndmag_t; 603 604 typedef struct rndmag_pad_s 605 { 606 rndmag_t rm_mag; 607 uint8_t rm_pad[RND_CPU_PAD]; 608 } rndmag_pad_t; 609 610 /* 611 * Generate random bytes for /dev/urandom by applying the 612 * FIPS 186-2 algorithm with a key created from bytes extracted 613 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 614 * is generated before a new key is obtained. 615 * 616 * Note that callers to this routine are likely to assume it can't fail. 617 * 618 * Called with rmp locked; releases lock. 619 */ 620 static int 621 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len) 622 { 623 size_t bytes = len, size; 624 int nblock; 625 uint32_t oblocks; 626 uint32_t tempout[HASHSIZE/BYTES_IN_WORD]; 627 uint32_t seed[HASHSIZE/BYTES_IN_WORD]; 628 int i; 629 hrtime_t timestamp; 630 uint8_t *src, *dst; 631 632 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock)); 633 634 /* Nothing is being asked */ 635 if (len == 0) { 636 mutex_exit(&rmp->rm_mag.rm_lock); 637 return (0); 638 } 639 640 nblock = howmany(len, HASHSIZE); 641 642 rmp->rm_mag.rm_oblocks += nblock; 643 oblocks = rmp->rm_mag.rm_oblocks; 644 645 do { 646 if (oblocks >= rmp->rm_mag.rm_olimit) { 647 648 /* 649 * Contention-avoiding rekey: see if 650 * the pool is locked, and if so, wait a bit. 651 * Do an 'exponential back-in' to ensure we don't 652 * run too long without rekey. 653 */ 654 if (rmp->rm_mag.rm_ofuzz) { 655 /* 656 * Decaying exponential back-in for rekey. 657 */ 658 if ((rnbyte_cnt < MINEXTRACTBYTES) || 659 (!mutex_tryenter(&rndpool_lock))) { 660 rmp->rm_mag.rm_olimit += 661 rmp->rm_mag.rm_ofuzz; 662 rmp->rm_mag.rm_ofuzz >>= 1; 663 goto punt; 664 } 665 } else { 666 mutex_enter(&rndpool_lock); 667 } 668 669 /* Get a new chunk of entropy */ 670 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 671 HMAC_KEYSIZE, ALWAYS_EXTRACT); 672 673 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2; 674 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4; 675 oblocks = 0; 676 rmp->rm_mag.rm_oblocks = nblock; 677 } 678 punt: 679 timestamp = gethrtime(); 680 681 src = (uint8_t *)×tamp; 682 dst = (uint8_t *)rmp->rm_mag.rm_seed; 683 684 for (i = 0; i < HASHSIZE; i++) { 685 dst[i] ^= src[i % sizeof (timestamp)]; 686 } 687 688 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE); 689 690 fips_random_inner(rmp->rm_mag.rm_key, tempout, 691 seed); 692 693 if (bytes >= HASHSIZE) { 694 size = HASHSIZE; 695 } else { 696 size = min(bytes, HASHSIZE); 697 } 698 699 /* 700 * FIPS 140-2: Continuous RNG test - each generation 701 * of an n-bit block shall be compared with the previously 702 * generated block. Test shall fail if any two compared 703 * n-bit blocks are equal. 704 */ 705 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) { 706 if (tempout[i] != rmp->rm_mag.rm_previous[i]) 707 break; 708 } 709 if (i == HASHSIZE/BYTES_IN_WORD) { 710 cmn_err(CE_WARN, "kcf_random: The value of 160-bit " 711 "block random bytes are same as the previous " 712 "one.\n"); 713 /* discard random bytes and return error */ 714 mutex_exit(&rmp->rm_mag.rm_lock); 715 return (EIO); 716 } 717 718 bcopy(tempout, rmp->rm_mag.rm_previous, 719 HASHSIZE); 720 721 bcopy(tempout, ptr, size); 722 ptr += size; 723 bytes -= size; 724 oblocks++; 725 nblock--; 726 } while (bytes > 0); 727 728 /* Zero out sensitive information */ 729 bzero(seed, HASHSIZE); 730 bzero(tempout, HASHSIZE); 731 mutex_exit(&rmp->rm_mag.rm_lock); 732 return (0); 733 } 734 735 /* 736 * Per-CPU Random magazines. 737 */ 738 static rndmag_pad_t *rndmag; 739 static uint8_t *rndbuf; 740 static size_t rndmag_total; 741 /* 742 * common/os/cpu.c says that platform support code can shrinkwrap 743 * max_ncpus. On the off chance that we get loaded very early, we 744 * read it exactly once, to copy it here. 745 */ 746 static uint32_t random_max_ncpus = 0; 747 748 /* 749 * Boot-time tunables, for experimentation. 750 */ 751 size_t rndmag_threshold = 2560; 752 size_t rndbuf_len = 5120; 753 size_t rndmag_size = 1280; 754 755 756 int 757 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 758 { 759 rndmag_pad_t *rmp; 760 uint8_t *cptr, *eptr; 761 762 /* 763 * Anyone who asks for zero bytes of randomness should get slapped. 764 */ 765 ASSERT(len > 0); 766 767 /* 768 * Fast path. 769 */ 770 for (;;) { 771 rmp = &rndmag[CPU->cpu_seqid]; 772 mutex_enter(&rmp->rm_mag.rm_lock); 773 774 /* 775 * Big requests bypass buffer and tail-call the 776 * generate routine directly. 777 */ 778 if (len > rndmag_threshold) { 779 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 780 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 781 } 782 783 cptr = rmp->rm_mag.rm_rptr; 784 eptr = cptr + len; 785 786 if (eptr <= rmp->rm_mag.rm_eptr) { 787 rmp->rm_mag.rm_rptr = eptr; 788 bcopy(cptr, ptr, len); 789 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 790 mutex_exit(&rmp->rm_mag.rm_lock); 791 792 return (0); 793 } 794 /* 795 * End fast path. 796 */ 797 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer; 798 /* 799 * Note: We assume the generate routine always succeeds 800 * in this case (because it does at present..) 801 * It also always releases rm_lock. 802 */ 803 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer, 804 rndbuf_len); 805 } 806 } 807 808 /* 809 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 810 * little memory on big systems that don't have the full set installed. 811 * See above; "empty" means "rptr equal to eptr"; this will trigger the 812 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 813 * 814 * TODO: make rndmag_size tunable at run time! 815 */ 816 static void 817 rnd_alloc_magazines() 818 { 819 rndmag_pad_t *rmp; 820 int i; 821 822 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 823 if (rndmag_size < rndbuf_len) 824 rndmag_size = rndbuf_len; 825 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 826 827 random_max_ncpus = max_ncpus; 828 rndmag_total = rndmag_size * random_max_ncpus; 829 830 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 831 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus, 832 KM_SLEEP); 833 834 for (i = 0; i < random_max_ncpus; i++) { 835 uint8_t *buf; 836 837 rmp = &rndmag[i]; 838 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL); 839 840 buf = rndbuf + i * rndmag_size; 841 842 rmp->rm_mag.rm_buffer = buf; 843 rmp->rm_mag.rm_eptr = buf + rndbuf_len; 844 rmp->rm_mag.rm_rptr = buf + rndbuf_len; 845 rmp->rm_mag.rm_oblocks = 1; 846 } 847 } 848 849 /* 850 * FIPS 140-2: the first n-bit (n > 15) block generated 851 * after power-up, initialization, or reset shall not 852 * be used, but shall be saved for comparison. 853 */ 854 static void 855 rnd_fips_discard_initial(void) 856 { 857 uint8_t discard_buf[HASHSIZE]; 858 rndmag_pad_t *rmp; 859 int i; 860 861 for (i = 0; i < random_max_ncpus; i++) { 862 rmp = &rndmag[i]; 863 864 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 865 mutex_enter(&rndpool_lock); 866 (void) rnd_get_bytes(discard_buf, 867 HMAC_KEYSIZE, ALWAYS_EXTRACT); 868 bcopy(discard_buf, rmp->rm_mag.rm_previous, 869 HMAC_KEYSIZE); 870 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 871 mutex_enter(&rndpool_lock); 872 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key, 873 HMAC_KEYSIZE, ALWAYS_EXTRACT); 874 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */ 875 mutex_enter(&rndpool_lock); 876 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed, 877 HMAC_KEYSIZE, ALWAYS_EXTRACT); 878 } 879 } 880 881 static void 882 rnd_schedule_timeout(void) 883 { 884 clock_t ut; /* time in microseconds */ 885 886 /* 887 * The new timeout value is taken from the buffer of random bytes. 888 * We're merely reading the first 32 bits from the buffer here, not 889 * consuming any random bytes. 890 * The timeout multiplier value is a random value between 0.5 sec and 891 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 892 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 893 */ 894 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 895 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 896 TIMEOUT_INTERVAL * drv_usectohz(ut)); 897 } 898 899 /* 900 * Called from the driver for a poll on /dev/random 901 * . POLLOUT always succeeds. 902 * . POLLIN and POLLRDNORM will block until a 903 * minimum amount of entropy is available. 904 * 905 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 906 * will block. When enough random bytes are available, later, the timeout 907 * handler routine will issue the pollwakeup() calls. 908 */ 909 void 910 kcf_rnd_chpoll(short events, int anyyet, short *reventsp, 911 struct pollhead **phpp) 912 { 913 *reventsp = events & POLLOUT; 914 915 if (events & (POLLIN | POLLRDNORM)) { 916 /* 917 * Sampling of rnbyte_cnt is an atomic 918 * operation. Hence we do not need any locking. 919 */ 920 if (rnbyte_cnt >= MINEXTRACTBYTES) 921 *reventsp |= (events & (POLLIN | POLLRDNORM)); 922 } 923 924 if (*reventsp == 0 && !anyyet) 925 *phpp = &rnd_pollhead; 926 } 927 928 /*ARGSUSED*/ 929 static void 930 rnd_handler(void *arg) 931 { 932 int len = 0; 933 934 if (!rng_prov_found && rng_ok_to_log) { 935 cmn_err(CE_WARN, "No randomness provider enabled for " 936 "/dev/random. Use cryptoadm(1M) to enable a provider."); 937 rng_ok_to_log = B_FALSE; 938 } 939 940 if (num_waiters > 0) 941 /* 942 * Note: len has no relationship with how many bytes 943 * a poll thread needs. 944 */ 945 len = MAXEXTRACTBYTES; 946 else if (rnbyte_cnt < RNDPOOLSIZE) 947 len = MINEXTRACTBYTES; 948 949 /* 950 * Only one thread gets to set rngprov_task_idle at a given point 951 * of time and the order of the writes is defined. Also, it is OK 952 * if we read an older value of it and skip the dispatch once 953 * since we will get the correct value during the next time here. 954 * So, no locking is needed here. 955 */ 956 if (len > 0 && rngprov_task_idle) { 957 rngprov_task_idle = B_FALSE; 958 959 /* 960 * It is OK if taskq_dispatch fails here. We will retry 961 * the next time around. Meanwhile, a thread doing a 962 * read() will go to the provider directly, if the 963 * cache becomes empty. 964 */ 965 if (taskq_dispatch(system_taskq, rngprov_task, 966 (void *)(uintptr_t)len, TQ_NOSLEEP | TQ_NOQUEUE) == 0) { 967 rngprov_task_idle = B_TRUE; 968 } 969 } 970 971 mutex_enter(&rndpool_lock); 972 /* 973 * Wake up threads waiting in poll() or for enough accumulated 974 * random bytes to read from /dev/random. In case a poll() is 975 * concurrent with a read(), the polling process may be woken up 976 * indicating that enough randomness is now available for reading, 977 * and another process *steals* the bits from the pool, causing the 978 * subsequent read() from the first process to block. It is acceptable 979 * since the blocking will eventually end, after the timeout 980 * has expired enough times to honor the read. 981 * 982 * Note - Since we hold the rndpool_lock across the pollwakeup() call 983 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 984 */ 985 if (rnbyte_cnt >= MINEXTRACTBYTES) 986 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 987 988 if (num_waiters > 0) 989 cv_broadcast(&rndpool_read_cv); 990 mutex_exit(&rndpool_lock); 991 992 rnd_schedule_timeout(); 993 } 994 995 static void 996 rndc_addbytes(uint8_t *ptr, size_t len) 997 { 998 ASSERT(ptr != NULL && len > 0); 999 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 1000 1001 mutex_enter(&rndpool_lock); 1002 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 1003 rndpool[rindex] ^= *ptr; 1004 ptr++; len--; 1005 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 1006 rnbyte_cnt++; 1007 } 1008 1009 /* Handle buffer full case */ 1010 while (len > 0) { 1011 rndpool[rindex] ^= *ptr; 1012 ptr++; len--; 1013 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 1014 } 1015 mutex_exit(&rndpool_lock); 1016 } 1017 1018 /* 1019 * Caller should check len <= rnbyte_cnt under the 1020 * rndpool_lock before calling. 1021 */ 1022 static void 1023 rndc_getbytes(uint8_t *ptr, size_t len) 1024 { 1025 ASSERT(MUTEX_HELD(&rndpool_lock)); 1026 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 1027 1028 BUMP_RND_STATS(rs_rndcOut, len); 1029 1030 while (len > 0) { 1031 *ptr = rndpool[findex]; 1032 ptr++; len--; 1033 findex = (findex + 1) & (RNDPOOLSIZE - 1); 1034 rnbyte_cnt--; 1035 } 1036 } 1037 1038 /* Random number exported entry points */ 1039 1040 /* 1041 * Mix the supplied bytes into the entropy pool of a kCF 1042 * RNG provider. 1043 */ 1044 int 1045 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1046 { 1047 if (len < 1) 1048 return (-1); 1049 1050 rngprov_seed(ptr, len, entropy_est, 0); 1051 1052 return (0); 1053 } 1054 1055 /* 1056 * Mix the supplied bytes into the entropy pool of a kCF 1057 * RNG provider. Mix immediately. 1058 */ 1059 int 1060 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1061 { 1062 if (len < 1) 1063 return (-1); 1064 1065 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1066 1067 return (0); 1068 } 1069 1070 /* 1071 * Get bytes from the /dev/urandom generator. This function 1072 * always succeeds. Returns 0. 1073 */ 1074 int 1075 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1076 { 1077 ASSERT(!mutex_owned(&rndpool_lock)); 1078 1079 if (len < 1) 1080 return (0); 1081 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1082 } 1083 1084 /* 1085 * Get bytes from the /dev/random generator. Returns 0 1086 * on success. Returns EAGAIN if there is insufficient entropy. 1087 */ 1088 int 1089 random_get_bytes(uint8_t *ptr, size_t len) 1090 { 1091 ASSERT(!mutex_owned(&rndpool_lock)); 1092 1093 if (len < 1) 1094 return (0); 1095 return (kcf_rnd_get_bytes(ptr, len, B_TRUE)); 1096 } 1097