1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file implements the interfaces that the /dev/random 30 * driver uses for read(2), write(2) and poll(2) on /dev/random or 31 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 32 * random_get_pseudo_bytes() and random_get_bytes(). 33 * 34 * We periodically collect random bits from providers which are registered 35 * with the Kernel Cryptographic Framework (kCF) as capable of random 36 * number generation. The random bits are maintained in a cache and 37 * it is used for high quality random numbers (/dev/random) requests. 38 * We pick a provider and call its SPI routine, if the cache does not have 39 * enough bytes to satisfy a request. 40 * 41 * /dev/urandom requests use a software-based generator algorithm that uses the 42 * random bits in the cache as a seed. We create one pseudo-random generator 43 * (for /dev/urandom) per possible CPU on the system, and use it, 44 * kmem-magazine-style, to avoid cache line contention. 45 * 46 * LOCKING HIERARCHY: 47 * 1) rmp->rm_lock protects the per-cpu pseudo-random generators. 48 * 2) rndpool_lock protects the high-quality randomness pool. 49 * It may be locked while a rmp->rm_lock is held. 50 * 51 * A history note: The kernel API and the software-based algorithms in this 52 * file used to be part of the /dev/random driver. 53 */ 54 55 #include <sys/types.h> 56 #include <sys/conf.h> 57 #include <sys/sunddi.h> 58 #include <sys/disp.h> 59 #include <sys/modctl.h> 60 #include <sys/ddi.h> 61 #include <sys/crypto/common.h> 62 #include <sys/crypto/api.h> 63 #include <sys/crypto/impl.h> 64 #include <sys/crypto/sched_impl.h> 65 #include <sys/random.h> 66 #include <sys/sha1.h> 67 #include <sys/time.h> 68 #include <sys/sysmacros.h> 69 #include <sys/cpuvar.h> 70 #include <sys/taskq.h> 71 72 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 73 #define MINEXTRACTBYTES 20 74 #define MAXEXTRACTBYTES 1024 75 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 76 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 77 78 typedef enum extract_type { 79 NONBLOCK_EXTRACT, 80 BLOCKING_EXTRACT, 81 ALWAYS_EXTRACT 82 } extract_type_t; 83 84 /* 85 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 86 * routines directly instead of using k-API because we can't return any 87 * error code in /dev/urandom case and we can get an error using k-API 88 * if a mechanism is disabled. 89 */ 90 #define HASHSIZE 20 91 #define HASH_CTX SHA1_CTX 92 #define HashInit(ctx) SHA1Init((ctx)) 93 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 94 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 95 96 /* HMAC-SHA1 */ 97 #define HMAC_KEYSIZE 20 98 #define HMAC_BLOCK_SIZE 64 99 #define HMAC_KEYSCHED sha1keysched_t 100 #define SET_ENCRYPT_KEY(k, s, ks) hmac_key((k), (s), (ks)) 101 #define HMAC_ENCRYPT(ks, p, s, d) hmac_encr((ks), (uint8_t *)(p), s, d) 102 103 /* HMAC-SHA1 "keyschedule" */ 104 typedef struct sha1keysched_s { 105 SHA1_CTX ictx; 106 SHA1_CTX octx; 107 } sha1keysched_t; 108 109 /* 110 * Cache of random bytes implemented as a circular buffer. findex and rindex 111 * track the front and back of the circular buffer. 112 */ 113 uint8_t rndpool[RNDPOOLSIZE]; 114 static int findex, rindex; 115 static int rnbyte_cnt; /* Number of bytes in the cache */ 116 117 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 118 /* and the global variables */ 119 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 120 static int num_waiters; /* #threads waiting to read from /dev/random */ 121 122 static struct pollhead rnd_pollhead; 123 static timeout_id_t kcf_rndtimeout_id; 124 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 125 rnd_stats_t rnd_stats; 126 static boolean_t rng_prov_found = B_TRUE; 127 static boolean_t rng_ok_to_log = B_TRUE; 128 129 static void rndc_addbytes(uint8_t *, size_t); 130 static void rndc_getbytes(uint8_t *ptr, size_t len); 131 static void rnd_handler(void *); 132 static void rnd_alloc_magazines(); 133 static void hmac_key(uint8_t *, size_t, void *); 134 static void hmac_encr(void *, uint8_t *, size_t, uint8_t *); 135 136 137 void 138 kcf_rnd_init() 139 { 140 hrtime_t ts; 141 time_t now; 142 143 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 144 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 145 146 /* 147 * Add bytes to the cache using 148 * . 2 unpredictable times: high resolution time since the boot-time, 149 * and the current time-of-the day. 150 * This is used only to make the timeout value in the timer 151 * unpredictable. 152 */ 153 ts = gethrtime(); 154 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 155 156 (void) drv_getparm(TIME, &now); 157 rndc_addbytes((uint8_t *)&now, sizeof (now)); 158 159 rnbyte_cnt = 0; 160 findex = rindex = 0; 161 num_waiters = 0; 162 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 163 164 rnd_alloc_magazines(); 165 } 166 167 /* 168 * Return TRUE if at least one provider exists that can 169 * supply random numbers. 170 */ 171 boolean_t 172 kcf_rngprov_check(void) 173 { 174 int rv; 175 kcf_provider_desc_t *pd; 176 177 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 178 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 179 KCF_PROV_REFRELE(pd); 180 /* 181 * We logged a warning once about no provider being available 182 * and now a provider became available. So, set the flag so 183 * that we can log again if the problem recurs. 184 */ 185 rng_ok_to_log = B_TRUE; 186 rng_prov_found = B_TRUE; 187 return (B_TRUE); 188 } else { 189 rng_prov_found = B_FALSE; 190 return (B_FALSE); 191 } 192 } 193 194 /* 195 * Pick a software-based provider and submit a request to seed 196 * its random number generator. 197 */ 198 static void 199 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 200 { 201 kcf_provider_desc_t *pd = NULL; 202 203 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) == 204 CRYPTO_SUCCESS) { 205 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 206 entropy_est, flags, NULL); 207 KCF_PROV_REFRELE(pd); 208 } 209 } 210 211 /* Boot-time tunable for experimentation. */ 212 int kcf_limit_hwrng = 1; 213 214 215 /* 216 * This routine is called for blocking reads. 217 * 218 * The argument from_user_api indicates whether the caller is 219 * from userland coming via the /dev/random driver. 220 * 221 * The argument is_taskq_thr indicates whether the caller is 222 * the taskq thread dispatched by the timeout handler routine. 223 * In this case, we cycle through all the providers 224 * submitting a request to each provider to generate random numbers. 225 * 226 * For other cases, we pick a provider and submit a request to generate 227 * random numbers. We retry using another provider if we get an error. 228 * 229 * Returns the number of bytes that are written to 'ptr'. Returns -1 230 * if no provider is found. ptr and need are unchanged. 231 */ 232 static int 233 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 234 boolean_t is_taskq_thr) 235 { 236 int rv; 237 int prov_cnt = 0; 238 int total_bytes = 0; 239 kcf_provider_desc_t *pd; 240 kcf_req_params_t params; 241 kcf_prov_tried_t *list = NULL; 242 243 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 244 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 245 246 prov_cnt++; 247 /* 248 * Typically a hardware RNG is a multi-purpose 249 * crypto card and hence we do not want to overload the card 250 * just for random numbers. The following check is to prevent 251 * a user process from hogging the hardware RNG. Note that we 252 * still use the hardware RNG from the periodically run 253 * taskq thread. 254 */ 255 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 256 kcf_limit_hwrng == 1) { 257 ASSERT(is_taskq_thr == B_FALSE); 258 goto try_next; 259 } 260 261 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 262 pd->pd_sid, ptr, need, 0, 0); 263 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 264 ASSERT(rv != CRYPTO_QUEUED); 265 266 if (rv == CRYPTO_SUCCESS) { 267 total_bytes += need; 268 if (is_taskq_thr) 269 rndc_addbytes(ptr, need); 270 else { 271 KCF_PROV_REFRELE(pd); 272 break; 273 } 274 } 275 276 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 277 try_next: 278 /* Add pd to the linked list of providers tried. */ 279 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 280 KCF_PROV_REFRELE(pd); 281 break; 282 } 283 } 284 285 } 286 287 if (list != NULL) 288 kcf_free_triedlist(list); 289 290 if (prov_cnt == 0) { /* no provider could be found. */ 291 rng_prov_found = B_FALSE; 292 return (-1); 293 } else { 294 rng_prov_found = B_TRUE; 295 /* See comments in kcf_rngprov_check() */ 296 rng_ok_to_log = B_TRUE; 297 } 298 299 return (total_bytes); 300 } 301 302 static void 303 notify_done(void *arg, int rv) 304 { 305 uchar_t *rndbuf = arg; 306 307 if (rv == CRYPTO_SUCCESS) 308 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 309 310 bzero(rndbuf, MINEXTRACTBYTES); 311 kmem_free(rndbuf, MINEXTRACTBYTES); 312 } 313 314 /* 315 * Cycle through all the providers submitting a request to each provider 316 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 317 * and ALWAYS_EXTRACT. 318 * 319 * Returns the number of bytes that are written to 'ptr'. Returns -1 320 * if no provider is found. ptr and len are unchanged. 321 */ 322 static int 323 rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 324 { 325 int rv, blen, total_bytes; 326 uchar_t *rndbuf; 327 kcf_provider_desc_t *pd; 328 kcf_req_params_t params; 329 crypto_call_req_t req; 330 kcf_prov_tried_t *list = NULL; 331 int prov_cnt = 0; 332 333 blen = 0; 334 total_bytes = 0; 335 req.cr_flag = CRYPTO_SKIP_REQID; 336 req.cr_callback_func = notify_done; 337 338 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 339 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 340 341 prov_cnt ++; 342 switch (pd->pd_prov_type) { 343 case CRYPTO_HW_PROVIDER: 344 /* See comments in rngprov_getbytes() */ 345 if (from_user_api && kcf_limit_hwrng == 1) 346 goto try_next; 347 348 /* 349 * We have to allocate a buffer here as we can not 350 * assume that the input buffer will remain valid 351 * when the callback comes. We use a fixed size buffer 352 * to simplify the book keeping. 353 */ 354 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 355 if (rndbuf == NULL) { 356 KCF_PROV_REFRELE(pd); 357 if (list != NULL) 358 kcf_free_triedlist(list); 359 return (total_bytes); 360 } 361 req.cr_callback_arg = rndbuf; 362 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 363 KCF_OP_RANDOM_GENERATE, 364 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 365 break; 366 367 case CRYPTO_SW_PROVIDER: 368 /* 369 * We do not need to allocate a buffer in the software 370 * provider case as there is no callback involved. We 371 * avoid any extra data copy by directly passing 'ptr'. 372 */ 373 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 374 KCF_OP_RANDOM_GENERATE, 375 pd->pd_sid, ptr, len, 0, 0); 376 break; 377 } 378 379 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 380 if (rv == CRYPTO_SUCCESS) { 381 switch (pd->pd_prov_type) { 382 case CRYPTO_HW_PROVIDER: 383 /* 384 * Since we have the input buffer handy, 385 * we directly copy to it rather than 386 * adding to the pool. 387 */ 388 blen = min(MINEXTRACTBYTES, len); 389 bcopy(rndbuf, ptr, blen); 390 if (len < MINEXTRACTBYTES) 391 rndc_addbytes(rndbuf + len, 392 MINEXTRACTBYTES - len); 393 ptr += blen; 394 len -= blen; 395 total_bytes += blen; 396 break; 397 398 case CRYPTO_SW_PROVIDER: 399 total_bytes += len; 400 len = 0; 401 break; 402 } 403 } 404 405 /* 406 * We free the buffer in the callback routine 407 * for the CRYPTO_QUEUED case. 408 */ 409 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 410 rv != CRYPTO_QUEUED) { 411 bzero(rndbuf, MINEXTRACTBYTES); 412 kmem_free(rndbuf, MINEXTRACTBYTES); 413 } 414 415 if (len == 0) { 416 KCF_PROV_REFRELE(pd); 417 break; 418 } 419 420 if (rv != CRYPTO_SUCCESS) { 421 try_next: 422 /* Add pd to the linked list of providers tried. */ 423 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 424 NULL) { 425 KCF_PROV_REFRELE(pd); 426 break; 427 } 428 } 429 } 430 431 if (list != NULL) { 432 kcf_free_triedlist(list); 433 } 434 435 if (prov_cnt == 0) { /* no provider could be found. */ 436 rng_prov_found = B_FALSE; 437 return (-1); 438 } else { 439 rng_prov_found = B_TRUE; 440 /* See comments in kcf_rngprov_check() */ 441 rng_ok_to_log = B_TRUE; 442 } 443 444 return (total_bytes); 445 } 446 447 static void 448 rngprov_task(void *arg) 449 { 450 int len = (int)(uintptr_t)arg; 451 uchar_t tbuf[MAXEXTRACTBYTES]; 452 453 ASSERT(len <= MAXEXTRACTBYTES); 454 (void) rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE); 455 } 456 457 /* 458 * Returns "len" random or pseudo-random bytes in *ptr. 459 * Will block if not enough random bytes are available and the 460 * call is blocking. 461 * 462 * Called with rndpool_lock held (allowing caller to do optimistic locking; 463 * releases the lock before return). 464 */ 465 static int 466 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 467 boolean_t from_user_api) 468 { 469 int bytes; 470 size_t got; 471 472 ASSERT(mutex_owned(&rndpool_lock)); 473 /* 474 * Check if the request can be satisfied from the cache 475 * of random bytes. 476 */ 477 if (len <= rnbyte_cnt) { 478 rndc_getbytes(ptr, len); 479 mutex_exit(&rndpool_lock); 480 return (0); 481 } 482 mutex_exit(&rndpool_lock); 483 484 switch (how) { 485 case BLOCKING_EXTRACT: 486 if ((got = rngprov_getbytes(ptr, len, from_user_api, 487 B_FALSE)) == -1) 488 break; /* No provider found */ 489 490 if (got == len) 491 return (0); 492 len -= got; 493 ptr += got; 494 break; 495 496 case NONBLOCK_EXTRACT: 497 case ALWAYS_EXTRACT: 498 if ((got = rngprov_getbytes_nblk(ptr, len, 499 from_user_api)) == -1) { 500 /* No provider found */ 501 if (how == NONBLOCK_EXTRACT) { 502 return (EAGAIN); 503 } 504 } else { 505 if (got == len) 506 return (0); 507 len -= got; 508 ptr += got; 509 } 510 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 511 return (EAGAIN); 512 break; 513 } 514 515 mutex_enter(&rndpool_lock); 516 while (len > 0) { 517 if (how == BLOCKING_EXTRACT) { 518 /* Check if there is enough */ 519 while (rnbyte_cnt < MINEXTRACTBYTES) { 520 num_waiters++; 521 if (cv_wait_sig(&rndpool_read_cv, 522 &rndpool_lock) == 0) { 523 num_waiters--; 524 mutex_exit(&rndpool_lock); 525 return (EINTR); 526 } 527 num_waiters--; 528 } 529 } 530 531 /* Figure out how many bytes to extract */ 532 bytes = min(len, rnbyte_cnt); 533 rndc_getbytes(ptr, bytes); 534 535 len -= bytes; 536 ptr += bytes; 537 538 if (len > 0 && how == ALWAYS_EXTRACT) { 539 /* 540 * There are not enough bytes, but we can not block. 541 * This only happens in the case of /dev/urandom which 542 * runs an additional generation algorithm. So, there 543 * is no problem. 544 */ 545 while (len > 0) { 546 *ptr = rndpool[findex]; 547 ptr++; len--; 548 rindex = findex = (findex + 1) & 549 (RNDPOOLSIZE - 1); 550 } 551 break; 552 } 553 } 554 555 mutex_exit(&rndpool_lock); 556 return (0); 557 } 558 559 int 560 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 561 boolean_t from_user_api) 562 { 563 extract_type_t how; 564 int error; 565 566 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 567 mutex_enter(&rndpool_lock); 568 if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 569 return (error); 570 571 BUMP_RND_STATS(rs_rndOut, len); 572 return (0); 573 } 574 575 /* 576 * Revisit this if the structs grow or we come up with a better way 577 * of cache-line-padding structures. 578 */ 579 #define RND_CPU_CACHE_SIZE 64 580 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*5 581 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 582 (sizeof (kmutex_t) + 3*sizeof (uint8_t *) + sizeof (HMAC_KEYSCHED) + \ 583 sizeof (uint64_t) + 3*sizeof (uint32_t) + sizeof (rnd_stats_t))) 584 585 /* 586 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 587 * a per-CPU instance of the pseudo-random generator. We have it much easier 588 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 589 * 590 * Note that this usage is preemption-safe; a thread 591 * entering a critical section remembers which generator it locked 592 * and unlocks the same one; should it be preempted and wind up running on 593 * a different CPU, there will be a brief period of increased contention 594 * before it exits the critical section but nothing will melt. 595 */ 596 typedef struct rndmag_s 597 { 598 kmutex_t rm_lock; 599 uint8_t *rm_buffer; /* Start of buffer */ 600 uint8_t *rm_eptr; /* End of buffer */ 601 uint8_t *rm_rptr; /* Current read pointer */ 602 HMAC_KEYSCHED rm_ks; /* seed */ 603 uint64_t rm_counter; /* rotating counter for extracting */ 604 uint32_t rm_oblocks; /* time to rekey? */ 605 uint32_t rm_ofuzz; /* Rekey backoff state */ 606 uint32_t rm_olimit; /* Hard rekey limit */ 607 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 608 uint8_t rm_pad[RND_CPU_PAD]; 609 } rndmag_t; 610 611 /* 612 * Generate random bytes for /dev/urandom by encrypting a 613 * rotating counter with a key created from bytes extracted 614 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 615 * is generated before a new key is obtained. 616 * 617 * Note that callers to this routine are likely to assume it can't fail. 618 * 619 * Called with rmp locked; releases lock. 620 */ 621 static int 622 rnd_generate_pseudo_bytes(rndmag_t *rmp, uint8_t *ptr, size_t len) 623 { 624 size_t bytes = len; 625 int nblock, size; 626 uint32_t oblocks; 627 uint8_t digest[HASHSIZE]; 628 629 ASSERT(mutex_owned(&rmp->rm_lock)); 630 631 /* Nothing is being asked */ 632 if (len == 0) { 633 mutex_exit(&rmp->rm_lock); 634 return (0); 635 } 636 637 nblock = howmany(len, HASHSIZE); 638 639 rmp->rm_oblocks += nblock; 640 oblocks = rmp->rm_oblocks; 641 642 do { 643 if (oblocks >= rmp->rm_olimit) { 644 hrtime_t timestamp; 645 uint8_t key[HMAC_KEYSIZE]; 646 647 /* 648 * Contention-avoiding rekey: see if 649 * the pool is locked, and if so, wait a bit. 650 * Do an 'exponential back-in' to ensure we don't 651 * run too long without rekey. 652 */ 653 if (rmp->rm_ofuzz) { 654 /* 655 * Decaying exponential back-in for rekey. 656 */ 657 if ((rnbyte_cnt < MINEXTRACTBYTES) || 658 (!mutex_tryenter(&rndpool_lock))) { 659 rmp->rm_olimit += rmp->rm_ofuzz; 660 rmp->rm_ofuzz >>= 1; 661 goto punt; 662 } 663 } else { 664 mutex_enter(&rndpool_lock); 665 } 666 667 /* Get a new chunk of entropy */ 668 (void) rnd_get_bytes(key, HMAC_KEYSIZE, 669 ALWAYS_EXTRACT, B_FALSE); 670 671 /* Set up key */ 672 SET_ENCRYPT_KEY(key, HMAC_KEYSIZE, &rmp->rm_ks); 673 674 /* Get new counter value by encrypting timestamp */ 675 timestamp = gethrtime(); 676 HMAC_ENCRYPT(&rmp->rm_ks, ×tamp, 677 sizeof (timestamp), digest); 678 rmp->rm_olimit = PRNG_MAXOBLOCKS/2; 679 rmp->rm_ofuzz = PRNG_MAXOBLOCKS/4; 680 bcopy(digest, &rmp->rm_counter, sizeof (uint64_t)); 681 oblocks = 0; 682 rmp->rm_oblocks = nblock; 683 } 684 punt: 685 /* Hash counter to produce prn stream */ 686 if (bytes >= HASHSIZE) { 687 size = HASHSIZE; 688 HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 689 sizeof (rmp->rm_counter), ptr); 690 } else { 691 size = min(bytes, HASHSIZE); 692 HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 693 sizeof (rmp->rm_counter), digest); 694 bcopy(digest, ptr, size); 695 } 696 ptr += size; 697 bytes -= size; 698 rmp->rm_counter++; 699 oblocks++; 700 nblock--; 701 } while (bytes > 0); 702 703 mutex_exit(&rmp->rm_lock); 704 return (0); 705 } 706 707 /* 708 * Per-CPU Random magazines. 709 */ 710 static rndmag_t *rndmag; 711 static uint8_t *rndbuf; 712 static size_t rndmag_total; 713 /* 714 * common/os/cpu.c says that platform support code can shrinkwrap 715 * max_ncpus. On the off chance that we get loaded very early, we 716 * read it exactly once, to copy it here. 717 */ 718 static uint32_t random_max_ncpus = 0; 719 720 /* 721 * Boot-time tunables, for experimentation. 722 */ 723 size_t rndmag_threshold = 2560; 724 size_t rndbuf_len = 5120; 725 size_t rndmag_size = 1280; 726 727 728 int 729 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 730 { 731 rndmag_t *rmp; 732 uint8_t *cptr, *eptr; 733 734 /* 735 * Anyone who asks for zero bytes of randomness should get slapped. 736 */ 737 ASSERT(len > 0); 738 739 /* 740 * Fast path. 741 */ 742 for (;;) { 743 rmp = &rndmag[CPU->cpu_seqid]; 744 mutex_enter(&rmp->rm_lock); 745 746 /* 747 * Big requests bypass buffer and tail-call the 748 * generate routine directly. 749 */ 750 if (len > rndmag_threshold) { 751 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 752 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 753 } 754 755 cptr = rmp->rm_rptr; 756 eptr = cptr + len; 757 758 if (eptr <= rmp->rm_eptr) { 759 rmp->rm_rptr = eptr; 760 bcopy(cptr, ptr, len); 761 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 762 mutex_exit(&rmp->rm_lock); 763 764 return (0); 765 } 766 /* 767 * End fast path. 768 */ 769 rmp->rm_rptr = rmp->rm_buffer; 770 /* 771 * Note: We assume the generate routine always succeeds 772 * in this case (because it does at present..) 773 * It also always releases rm_lock. 774 */ 775 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_buffer, 776 rndbuf_len); 777 } 778 } 779 780 /* 781 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 782 * little memory on big systems that don't have the full set installed. 783 * See above; "empty" means "rptr equal to eptr"; this will trigger the 784 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 785 * 786 * TODO: make rndmag_size tunable at run time! 787 */ 788 static void 789 rnd_alloc_magazines() 790 { 791 rndmag_t *rmp; 792 int i; 793 794 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 795 if (rndmag_size < rndbuf_len) 796 rndmag_size = rndbuf_len; 797 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 798 799 random_max_ncpus = max_ncpus; 800 rndmag_total = rndmag_size * random_max_ncpus; 801 802 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 803 rndmag = kmem_zalloc(sizeof (rndmag_t) * random_max_ncpus, KM_SLEEP); 804 805 for (i = 0; i < random_max_ncpus; i++) { 806 uint8_t *buf; 807 808 rmp = &rndmag[i]; 809 mutex_init(&rmp->rm_lock, NULL, MUTEX_DRIVER, NULL); 810 811 buf = rndbuf + i * rndmag_size; 812 813 rmp->rm_buffer = buf; 814 rmp->rm_eptr = buf + rndbuf_len; 815 rmp->rm_rptr = buf + rndbuf_len; 816 rmp->rm_oblocks = 1; 817 } 818 } 819 820 void 821 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 822 { 823 clock_t ut; /* time in microseconds */ 824 825 if (do_mech2id) 826 rngmech_type = crypto_mech2id(SUN_RANDOM); 827 828 /* 829 * The new timeout value is taken from the buffer of random bytes. 830 * We're merely reading the first 32 bits from the buffer here, not 831 * consuming any random bytes. 832 * The timeout multiplier value is a random value between 0.5 sec and 833 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 834 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 835 */ 836 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 837 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 838 TIMEOUT_INTERVAL * drv_usectohz(ut)); 839 } 840 841 /* 842 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 843 * will block. When enough random bytes are available, later, the timeout 844 * handler routine will issue the pollwakeup() calls. 845 */ 846 void 847 kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 848 { 849 /* 850 * Sampling of rnbyte_cnt is an atomic 851 * operation. Hence we do not need any locking. 852 */ 853 if (rnbyte_cnt >= MINEXTRACTBYTES) { 854 *reventsp |= (POLLIN | POLLRDNORM); 855 } else { 856 *reventsp = 0; 857 if (!anyyet) 858 *phpp = &rnd_pollhead; 859 } 860 } 861 862 /*ARGSUSED*/ 863 static void 864 rnd_handler(void *arg) 865 { 866 int len = 0; 867 868 if (!rng_prov_found && rng_ok_to_log) { 869 cmn_err(CE_WARN, "No randomness provider enabled for " 870 "/dev/random. Use cryptoadm(1M) to enable a provider."); 871 rng_ok_to_log = B_FALSE; 872 } 873 874 if (num_waiters > 0) 875 len = MAXEXTRACTBYTES; 876 else if (rnbyte_cnt < RNDPOOLSIZE) 877 len = MINEXTRACTBYTES; 878 879 if (len > 0) { 880 (void) taskq_dispatch(system_taskq, rngprov_task, 881 (void *)(uintptr_t)len, TQ_NOSLEEP); 882 } 883 884 mutex_enter(&rndpool_lock); 885 /* 886 * Wake up threads waiting in poll() or for enough accumulated 887 * random bytes to read from /dev/random. In case a poll() is 888 * concurrent with a read(), the polling process may be woken up 889 * indicating that enough randomness is now available for reading, 890 * and another process *steals* the bits from the pool, causing the 891 * subsequent read() from the first process to block. It is acceptable 892 * since the blocking will eventually end, after the timeout 893 * has expired enough times to honor the read. 894 * 895 * Note - Since we hold the rndpool_lock across the pollwakeup() call 896 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 897 */ 898 if (rnbyte_cnt >= MINEXTRACTBYTES) 899 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 900 901 if (num_waiters > 0) 902 cv_broadcast(&rndpool_read_cv); 903 mutex_exit(&rndpool_lock); 904 905 kcf_rnd_schedule_timeout(B_FALSE); 906 } 907 908 /* Hashing functions */ 909 910 static void 911 hmac_key(uint8_t *key, size_t keylen, void *buf) 912 { 913 uint32_t *ip, *op; 914 uint32_t ipad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 915 uint32_t opad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 916 HASH_CTX *icontext, *ocontext; 917 int i; 918 int nints; 919 920 icontext = buf; 921 ocontext = (SHA1_CTX *)((uint8_t *)buf + sizeof (HASH_CTX)); 922 923 bzero((uchar_t *)ipad, HMAC_BLOCK_SIZE); 924 bzero((uchar_t *)opad, HMAC_BLOCK_SIZE); 925 bcopy(key, (uchar_t *)ipad, keylen); 926 bcopy(key, (uchar_t *)opad, keylen); 927 928 /* 929 * XOR key with ipad (0x36) and opad (0x5c) as defined 930 * in RFC 2104. 931 */ 932 ip = ipad; 933 op = opad; 934 nints = HMAC_BLOCK_SIZE/sizeof (uint32_t); 935 936 for (i = 0; i < nints; i++) { 937 ip[i] ^= 0x36363636; 938 op[i] ^= 0x5c5c5c5c; 939 } 940 941 /* Perform hash with ipad */ 942 HashInit(icontext); 943 HashUpdate(icontext, (uchar_t *)ipad, HMAC_BLOCK_SIZE); 944 945 /* Perform hash with opad */ 946 HashInit(ocontext); 947 HashUpdate(ocontext, (uchar_t *)opad, HMAC_BLOCK_SIZE); 948 } 949 950 static void 951 hmac_encr(void *ctx, uint8_t *ptr, size_t len, uint8_t *digest) 952 { 953 HASH_CTX *saved_contexts; 954 HASH_CTX icontext; 955 HASH_CTX ocontext; 956 957 saved_contexts = (HASH_CTX *)ctx; 958 icontext = saved_contexts[0]; 959 ocontext = saved_contexts[1]; 960 961 HashUpdate(&icontext, ptr, len); 962 HashFinal(digest, &icontext); 963 964 /* 965 * Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the 966 * Hash(K XOR IPAD, DATA). 967 */ 968 HashUpdate(&ocontext, digest, HASHSIZE); 969 HashFinal(digest, &ocontext); 970 } 971 972 973 static void 974 rndc_addbytes(uint8_t *ptr, size_t len) 975 { 976 ASSERT(ptr != NULL && len > 0); 977 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 978 979 mutex_enter(&rndpool_lock); 980 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 981 rndpool[rindex] ^= *ptr; 982 ptr++; len--; 983 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 984 rnbyte_cnt++; 985 } 986 987 /* Handle buffer full case */ 988 while (len > 0) { 989 rndpool[rindex] ^= *ptr; 990 ptr++; len--; 991 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 992 } 993 mutex_exit(&rndpool_lock); 994 } 995 996 /* 997 * Caller should check len <= rnbyte_cnt under the 998 * rndpool_lock before calling. 999 */ 1000 static void 1001 rndc_getbytes(uint8_t *ptr, size_t len) 1002 { 1003 ASSERT(MUTEX_HELD(&rndpool_lock)); 1004 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 1005 1006 BUMP_RND_STATS(rs_rndcOut, len); 1007 1008 while (len > 0) { 1009 *ptr = rndpool[findex]; 1010 ptr++; len--; 1011 findex = (findex + 1) & (RNDPOOLSIZE - 1); 1012 rnbyte_cnt--; 1013 } 1014 } 1015 1016 /* Random number exported entry points */ 1017 1018 /* 1019 * Mix the supplied bytes into the entropy pool of a kCF 1020 * RNG provider. 1021 */ 1022 int 1023 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1024 { 1025 if (len < 1) 1026 return (-1); 1027 1028 rngprov_seed(ptr, len, entropy_est, 0); 1029 1030 return (0); 1031 } 1032 1033 /* 1034 * Mix the supplied bytes into the entropy pool of a kCF 1035 * RNG provider. Mix immediately. 1036 */ 1037 int 1038 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1039 { 1040 if (len < 1) 1041 return (-1); 1042 1043 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 1044 1045 return (0); 1046 } 1047 1048 /* 1049 * Get bytes from the /dev/urandom generator. This function 1050 * always succeeds. Returns 0. 1051 */ 1052 int 1053 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1054 { 1055 ASSERT(!mutex_owned(&rndpool_lock)); 1056 1057 if (len < 1) 1058 return (0); 1059 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1060 } 1061 1062 /* 1063 * Get bytes from the /dev/random generator. Returns 0 1064 * on success. Returns EAGAIN if there is insufficient entropy. 1065 */ 1066 int 1067 random_get_bytes(uint8_t *ptr, size_t len) 1068 { 1069 ASSERT(!mutex_owned(&rndpool_lock)); 1070 1071 if (len < 1) 1072 return (0); 1073 return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 1074 } 1075