1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file implements the interfaces that the /dev/random 30 * driver uses for read(2), write(2) and poll(2) on /dev/random or 31 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 32 * random_get_pseudo_bytes() and random_get_bytes(). 33 * 34 * We periodically collect random bits from providers which are registered 35 * with the Kernel Cryptographic Framework (kCF) as capable of random 36 * number generation. The random bits are maintained in a cache and 37 * it is used for high quality random numbers (/dev/random) requests. 38 * We pick a provider and call its SPI routine, if the cache does not have 39 * enough bytes to satisfy a request. 40 * 41 * /dev/urandom requests use a software-based generator algorithm that uses the 42 * random bits in the cache as a seed. We create one pseudo-random generator 43 * (for /dev/urandom) per possible CPU on the system, and use it, 44 * kmem-magazine-style, to avoid cache line contention. 45 * 46 * LOCKING HIERARCHY: 47 * 1) rmp->rm_lock protects the per-cpu pseudo-random generators. 48 * 2) rndpool_lock protects the high-quality randomness pool. 49 * It may be locked while a rmp->rm_lock is held. 50 * 51 * A history note: The kernel API and the software-based algorithms in this 52 * file used to be part of the /dev/random driver. 53 */ 54 55 #include <sys/types.h> 56 #include <sys/conf.h> 57 #include <sys/sunddi.h> 58 #include <sys/disp.h> 59 #include <sys/modctl.h> 60 #include <sys/ddi.h> 61 #include <sys/crypto/common.h> 62 #include <sys/crypto/api.h> 63 #include <sys/crypto/impl.h> 64 #include <sys/crypto/sched_impl.h> 65 #include <sys/random.h> 66 #include <sys/sha1.h> 67 #include <sys/time.h> 68 #include <sys/sysmacros.h> 69 #include <sys/cpuvar.h> 70 #include <sys/taskq.h> 71 72 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 73 #define MINEXTRACTBYTES 20 74 #define MAXEXTRACTBYTES 1024 75 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 76 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 77 78 typedef enum extract_type { 79 NONBLOCK_EXTRACT, 80 BLOCKING_EXTRACT, 81 ALWAYS_EXTRACT 82 } extract_type_t; 83 84 /* 85 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 86 * routines directly instead of using k-API because we can't return any 87 * error code in /dev/urandom case and we can get an error using k-API 88 * if a mechanism is disabled. 89 */ 90 #define HASHSIZE 20 91 #define HASH_CTX SHA1_CTX 92 #define HashInit(ctx) SHA1Init((ctx)) 93 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 94 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 95 96 /* HMAC-SHA1 */ 97 #define HMAC_KEYSIZE 20 98 #define HMAC_BLOCK_SIZE 64 99 #define HMAC_KEYSCHED sha1keysched_t 100 #define SET_ENCRYPT_KEY(k, s, ks) hmac_key((k), (s), (ks)) 101 #define HMAC_ENCRYPT(ks, p, s, d) hmac_encr((ks), (uint8_t *)(p), s, d) 102 103 /* HMAC-SHA1 "keyschedule" */ 104 typedef struct sha1keysched_s { 105 SHA1_CTX ictx; 106 SHA1_CTX octx; 107 } sha1keysched_t; 108 109 /* 110 * Cache of random bytes implemented as a circular buffer. findex and rindex 111 * track the front and back of the circular buffer. 112 */ 113 uint8_t rndpool[RNDPOOLSIZE]; 114 static int findex, rindex; 115 static int rnbyte_cnt; /* Number of bytes in the cache */ 116 117 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 118 /* and the global variables */ 119 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 120 static int num_waiters; /* #threads waiting to read from /dev/random */ 121 122 static struct pollhead rnd_pollhead; 123 static timeout_id_t kcf_rndtimeout_id; 124 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 125 rnd_stats_t rnd_stats; 126 127 static void rndc_addbytes(uint8_t *, size_t); 128 static void rndc_getbytes(uint8_t *ptr, size_t len); 129 static void rnd_handler(void *); 130 static void rnd_alloc_magazines(); 131 static void hmac_key(uint8_t *, size_t, void *); 132 static void hmac_encr(void *, uint8_t *, size_t, uint8_t *); 133 134 135 void 136 kcf_rnd_init() 137 { 138 hrtime_t ts; 139 time_t now; 140 141 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 142 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 143 144 /* 145 * Add bytes to the cache using 146 * . 2 unpredictable times: high resolution time since the boot-time, 147 * and the current time-of-the day. 148 * This is used only to make the timeout value in the timer 149 * unpredictable. 150 */ 151 ts = gethrtime(); 152 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 153 154 (void) drv_getparm(TIME, &now); 155 rndc_addbytes((uint8_t *)&now, sizeof (now)); 156 157 rnbyte_cnt = 0; 158 findex = rindex = 0; 159 num_waiters = 0; 160 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 161 162 rnd_alloc_magazines(); 163 } 164 165 /* 166 * Return TRUE if at least one provider exists that can 167 * supply random numbers. 168 */ 169 boolean_t 170 kcf_rngprov_check(void) 171 { 172 int rv; 173 kcf_provider_desc_t *pd; 174 175 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 176 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 177 KCF_PROV_REFRELE(pd); 178 return (B_TRUE); 179 } else 180 return (B_FALSE); 181 } 182 183 /* 184 * Pick a software-based provider and submit a request to seed 185 * its random number generator. 186 */ 187 static void 188 rngprov_seed(uint8_t *buf, int len) 189 { 190 kcf_provider_desc_t *pd = NULL; 191 kcf_req_params_t params; 192 193 if (kcf_get_sw_prov(rngmech_type, &pd, B_FALSE) == CRYPTO_SUCCESS) { 194 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_SEED, 195 pd->pd_sid, buf, len); 196 (void) kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 197 KCF_PROV_REFRELE(pd); 198 } 199 } 200 201 /* Boot-time tunable for experimentation. */ 202 int kcf_limit_hwrng = 1; 203 204 205 /* 206 * This routine is called for blocking reads. 207 * 208 * The argument from_user_api indicates whether the caller is 209 * from userland coming via the /dev/random driver. 210 * 211 * The argument is_taskq_thr indicates whether the caller is 212 * the taskq thread dispatched by the timeout handler routine. 213 * In this case, we cycle through all the providers 214 * submitting a request to each provider to generate random numbers. 215 * 216 * For other cases, we pick a provider and submit a request to generate 217 * random numbers. We retry using another provider if we get an error. 218 * 219 * Returns the number of bytes that are written to 'ptr'. Returns -1 220 * if no provider is found. ptr and need are unchanged. 221 */ 222 static int 223 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 224 boolean_t is_taskq_thr) 225 { 226 int rv; 227 int prov_cnt = 0; 228 int total_bytes = 0; 229 kcf_provider_desc_t *pd; 230 kcf_req_params_t params; 231 kcf_prov_tried_t *list = NULL; 232 233 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 234 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 235 236 prov_cnt++; 237 /* 238 * Typically a hardware RNG is a multi-purpose 239 * crypto card and hence we do not want to overload the card 240 * just for random numbers. The following check is to prevent 241 * a user process from hogging the hardware RNG. Note that we 242 * still use the hardware RNG from the periodically run 243 * taskq thread. 244 */ 245 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 246 kcf_limit_hwrng == 1) { 247 ASSERT(is_taskq_thr == B_FALSE); 248 goto try_next; 249 } 250 251 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 252 pd->pd_sid, ptr, need); 253 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 254 ASSERT(rv != CRYPTO_QUEUED); 255 256 if (rv == CRYPTO_SUCCESS) { 257 total_bytes += need; 258 if (is_taskq_thr) 259 rndc_addbytes(ptr, need); 260 else { 261 KCF_PROV_REFRELE(pd); 262 break; 263 } 264 } 265 266 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 267 try_next: 268 /* Add pd to the linked list of providers tried. */ 269 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 270 KCF_PROV_REFRELE(pd); 271 break; 272 } 273 } 274 275 } 276 277 if (list != NULL) 278 kcf_free_triedlist(list); 279 280 if (prov_cnt == 0) { /* no provider could be found. */ 281 return (-1); 282 } 283 284 return (total_bytes); 285 } 286 287 static void 288 notify_done(void *arg, int rv) 289 { 290 uchar_t *rndbuf = arg; 291 292 if (rv == CRYPTO_SUCCESS) 293 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 294 295 bzero(rndbuf, MINEXTRACTBYTES); 296 kmem_free(rndbuf, MINEXTRACTBYTES); 297 } 298 299 /* 300 * Cycle through all the providers submitting a request to each provider 301 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 302 * and ALWAYS_EXTRACT. 303 * 304 * Returns the number of bytes that are written to 'ptr'. Returns -1 305 * if no provider is found. ptr and len are unchanged. 306 */ 307 static int 308 rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 309 { 310 int rv, blen, total_bytes; 311 uchar_t *rndbuf; 312 kcf_provider_desc_t *pd; 313 kcf_req_params_t params; 314 crypto_call_req_t req; 315 kcf_prov_tried_t *list = NULL; 316 int prov_cnt = 0; 317 318 blen = 0; 319 total_bytes = 0; 320 req.cr_flag = CRYPTO_SKIP_REQID; 321 req.cr_callback_func = notify_done; 322 323 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 324 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 325 326 prov_cnt ++; 327 switch (pd->pd_prov_type) { 328 case CRYPTO_HW_PROVIDER: 329 /* See comments in rngprov_getbytes() */ 330 if (from_user_api && kcf_limit_hwrng == 1) 331 goto try_next; 332 333 /* 334 * We have to allocate a buffer here as we can not 335 * assume that the input buffer will remain valid 336 * when the callback comes. We use a fixed size buffer 337 * to simplify the book keeping. 338 */ 339 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 340 if (rndbuf == NULL) { 341 KCF_PROV_REFRELE(pd); 342 if (list != NULL) 343 kcf_free_triedlist(list); 344 return (total_bytes); 345 } 346 req.cr_callback_arg = rndbuf; 347 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 348 KCF_OP_RANDOM_GENERATE, 349 pd->pd_sid, rndbuf, MINEXTRACTBYTES); 350 break; 351 352 case CRYPTO_SW_PROVIDER: 353 /* 354 * We do not need to allocate a buffer in the software 355 * provider case as there is no callback involved. We 356 * avoid any extra data copy by directly passing 'ptr'. 357 */ 358 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 359 KCF_OP_RANDOM_GENERATE, 360 pd->pd_sid, ptr, len); 361 break; 362 } 363 364 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 365 if (rv == CRYPTO_SUCCESS) { 366 switch (pd->pd_prov_type) { 367 case CRYPTO_HW_PROVIDER: 368 /* 369 * Since we have the input buffer handy, 370 * we directly copy to it rather than 371 * adding to the pool. 372 */ 373 blen = min(MINEXTRACTBYTES, len); 374 bcopy(rndbuf, ptr, blen); 375 if (len < MINEXTRACTBYTES) 376 rndc_addbytes(rndbuf + len, 377 MINEXTRACTBYTES - len); 378 ptr += blen; 379 len -= blen; 380 total_bytes += blen; 381 break; 382 383 case CRYPTO_SW_PROVIDER: 384 total_bytes += len; 385 len = 0; 386 break; 387 } 388 } 389 390 /* 391 * We free the buffer in the callback routine 392 * for the CRYPTO_QUEUED case. 393 */ 394 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 395 rv != CRYPTO_QUEUED) { 396 bzero(rndbuf, MINEXTRACTBYTES); 397 kmem_free(rndbuf, MINEXTRACTBYTES); 398 } 399 400 if (len == 0) { 401 KCF_PROV_REFRELE(pd); 402 break; 403 } 404 405 if (rv != CRYPTO_SUCCESS) { 406 try_next: 407 /* Add pd to the linked list of providers tried. */ 408 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 409 NULL) { 410 KCF_PROV_REFRELE(pd); 411 break; 412 } 413 } 414 } 415 416 if (list != NULL) { 417 kcf_free_triedlist(list); 418 } 419 420 if (prov_cnt == 0) { /* no provider could be found. */ 421 return (-1); 422 } 423 424 return (total_bytes); 425 } 426 427 static void 428 rngprov_task(void *arg) 429 { 430 int len = (int)(uintptr_t)arg; 431 uchar_t tbuf[MAXEXTRACTBYTES]; 432 433 ASSERT(len <= MAXEXTRACTBYTES); 434 if (rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE) == -1) { 435 cmn_err(CE_WARN, "No randomness provider enabled for " 436 "/dev/random. Use cryptoadm(1M) to enable a provider."); 437 } 438 } 439 440 /* 441 * Returns "len" random or pseudo-random bytes in *ptr. 442 * Will block if not enough random bytes are available and the 443 * call is blocking. 444 * 445 * Called with rndpool_lock held (allowing caller to do optimistic locking; 446 * releases the lock before return). 447 */ 448 static int 449 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 450 boolean_t from_user_api) 451 { 452 int bytes; 453 size_t got; 454 455 ASSERT(mutex_owned(&rndpool_lock)); 456 /* 457 * Check if the request can be satisfied from the cache 458 * of random bytes. 459 */ 460 if (len <= rnbyte_cnt) { 461 rndc_getbytes(ptr, len); 462 mutex_exit(&rndpool_lock); 463 return (0); 464 } 465 mutex_exit(&rndpool_lock); 466 467 switch (how) { 468 case BLOCKING_EXTRACT: 469 if ((got = rngprov_getbytes(ptr, len, from_user_api, 470 B_FALSE)) == -1) 471 break; /* No provider found */ 472 473 if (got == len) 474 return (0); 475 len -= got; 476 ptr += got; 477 break; 478 479 case NONBLOCK_EXTRACT: 480 case ALWAYS_EXTRACT: 481 if ((got = rngprov_getbytes_nblk(ptr, len, 482 from_user_api)) == -1) { 483 /* No provider found */ 484 if (how == NONBLOCK_EXTRACT) { 485 return (EAGAIN); 486 } 487 } else { 488 if (got == len) 489 return (0); 490 len -= got; 491 ptr += got; 492 } 493 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 494 return (EAGAIN); 495 break; 496 } 497 498 mutex_enter(&rndpool_lock); 499 while (len > 0) { 500 if (how == BLOCKING_EXTRACT) { 501 /* Check if there is enough */ 502 while (rnbyte_cnt < MINEXTRACTBYTES) { 503 num_waiters++; 504 if (cv_wait_sig(&rndpool_read_cv, 505 &rndpool_lock) == 0) { 506 num_waiters--; 507 mutex_exit(&rndpool_lock); 508 return (EINTR); 509 } 510 num_waiters--; 511 } 512 } 513 514 /* Figure out how many bytes to extract */ 515 bytes = min(len, rnbyte_cnt); 516 rndc_getbytes(ptr, bytes); 517 518 len -= bytes; 519 ptr += bytes; 520 521 if (len > 0 && how == ALWAYS_EXTRACT) { 522 /* 523 * There are not enough bytes, but we can not block. 524 * This only happens in the case of /dev/urandom which 525 * runs an additional generation algorithm. So, there 526 * is no problem. 527 */ 528 while (len > 0) { 529 *ptr = rndpool[findex]; 530 ptr++; len--; 531 rindex = findex = (findex + 1) & 532 (RNDPOOLSIZE - 1); 533 } 534 break; 535 } 536 } 537 538 mutex_exit(&rndpool_lock); 539 return (0); 540 } 541 542 int 543 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 544 boolean_t from_user_api) 545 { 546 extract_type_t how; 547 int error; 548 549 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 550 mutex_enter(&rndpool_lock); 551 if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 552 return (error); 553 554 BUMP_RND_STATS(rs_rndOut, len); 555 return (0); 556 } 557 558 /* 559 * Revisit this if the structs grow or we come up with a better way 560 * of cache-line-padding structures. 561 */ 562 #define RND_CPU_CACHE_SIZE 64 563 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*5 564 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 565 (sizeof (kmutex_t) + 3*sizeof (uint8_t *) + sizeof (HMAC_KEYSCHED) + \ 566 sizeof (uint64_t) + 3*sizeof (uint32_t) + sizeof (rnd_stats_t))) 567 568 /* 569 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 570 * a per-CPU instance of the pseudo-random generator. We have it much easier 571 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 572 * 573 * Note that this usage is preemption-safe; a thread 574 * entering a critical section remembers which generator it locked 575 * and unlocks the same one; should it be preempted and wind up running on 576 * a different CPU, there will be a brief period of increased contention 577 * before it exits the critical section but nothing will melt. 578 */ 579 typedef struct rndmag_s 580 { 581 kmutex_t rm_lock; 582 uint8_t *rm_buffer; /* Start of buffer */ 583 uint8_t *rm_eptr; /* End of buffer */ 584 uint8_t *rm_rptr; /* Current read pointer */ 585 HMAC_KEYSCHED rm_ks; /* seed */ 586 uint64_t rm_counter; /* rotating counter for extracting */ 587 uint32_t rm_oblocks; /* time to rekey? */ 588 uint32_t rm_ofuzz; /* Rekey backoff state */ 589 uint32_t rm_olimit; /* Hard rekey limit */ 590 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 591 uint8_t rm_pad[RND_CPU_PAD]; 592 } rndmag_t; 593 594 /* 595 * Generate random bytes for /dev/urandom by encrypting a 596 * rotating counter with a key created from bytes extracted 597 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 598 * is generated before a new key is obtained. 599 * 600 * Note that callers to this routine are likely to assume it can't fail. 601 * 602 * Called with rmp locked; releases lock. 603 */ 604 static int 605 rnd_generate_pseudo_bytes(rndmag_t *rmp, uint8_t *ptr, size_t len) 606 { 607 size_t bytes = len; 608 int nblock, size; 609 uint32_t oblocks; 610 uint8_t digest[HASHSIZE]; 611 612 ASSERT(mutex_owned(&rmp->rm_lock)); 613 614 /* Nothing is being asked */ 615 if (len == 0) { 616 mutex_exit(&rmp->rm_lock); 617 return (0); 618 } 619 620 nblock = howmany(len, HASHSIZE); 621 622 rmp->rm_oblocks += nblock; 623 oblocks = rmp->rm_oblocks; 624 625 do { 626 if (oblocks >= rmp->rm_olimit) { 627 hrtime_t timestamp; 628 uint8_t key[HMAC_KEYSIZE]; 629 630 /* 631 * Contention-avoiding rekey: see if 632 * the pool is locked, and if so, wait a bit. 633 * Do an 'exponential back-in' to ensure we don't 634 * run too long without rekey. 635 */ 636 if (rmp->rm_ofuzz) { 637 /* 638 * Decaying exponential back-in for rekey. 639 */ 640 if ((rnbyte_cnt < MINEXTRACTBYTES) || 641 (!mutex_tryenter(&rndpool_lock))) { 642 rmp->rm_olimit += rmp->rm_ofuzz; 643 rmp->rm_ofuzz >>= 1; 644 goto punt; 645 } 646 } else { 647 mutex_enter(&rndpool_lock); 648 } 649 650 /* Get a new chunk of entropy */ 651 (void) rnd_get_bytes(key, HMAC_KEYSIZE, 652 ALWAYS_EXTRACT, B_FALSE); 653 654 /* Set up key */ 655 SET_ENCRYPT_KEY(key, HMAC_KEYSIZE, &rmp->rm_ks); 656 657 /* Get new counter value by encrypting timestamp */ 658 timestamp = gethrtime(); 659 HMAC_ENCRYPT(&rmp->rm_ks, ×tamp, 660 sizeof (timestamp), digest); 661 rmp->rm_olimit = PRNG_MAXOBLOCKS/2; 662 rmp->rm_ofuzz = PRNG_MAXOBLOCKS/4; 663 bcopy(digest, &rmp->rm_counter, sizeof (uint64_t)); 664 oblocks = 0; 665 rmp->rm_oblocks = nblock; 666 } 667 punt: 668 /* Hash counter to produce prn stream */ 669 if (bytes >= HASHSIZE) { 670 size = HASHSIZE; 671 HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 672 sizeof (rmp->rm_counter), ptr); 673 } else { 674 size = min(bytes, HASHSIZE); 675 HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 676 sizeof (rmp->rm_counter), digest); 677 bcopy(digest, ptr, size); 678 } 679 ptr += size; 680 bytes -= size; 681 rmp->rm_counter++; 682 oblocks++; 683 nblock--; 684 } while (bytes > 0); 685 686 mutex_exit(&rmp->rm_lock); 687 return (0); 688 } 689 690 /* 691 * Per-CPU Random magazines. 692 */ 693 static rndmag_t *rndmag; 694 static uint8_t *rndbuf; 695 static size_t rndmag_total; 696 /* 697 * common/os/cpu.c says that platform support code can shrinkwrap 698 * max_ncpus. On the off chance that we get loaded very early, we 699 * read it exactly once, to copy it here. 700 */ 701 static uint32_t random_max_ncpus = 0; 702 703 /* 704 * Boot-time tunables, for experimentation. 705 */ 706 size_t rndmag_threshold = 2560; 707 size_t rndbuf_len = 5120; 708 size_t rndmag_size = 1280; 709 710 711 int 712 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 713 { 714 rndmag_t *rmp; 715 uint8_t *cptr, *eptr; 716 717 /* 718 * Anyone who asks for zero bytes of randomness should get slapped. 719 */ 720 ASSERT(len > 0); 721 722 /* 723 * Fast path. 724 */ 725 for (;;) { 726 rmp = &rndmag[CPU->cpu_seqid]; 727 mutex_enter(&rmp->rm_lock); 728 729 /* 730 * Big requests bypass buffer and tail-call the 731 * generate routine directly. 732 */ 733 if (len > rndmag_threshold) { 734 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 735 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 736 } 737 738 cptr = rmp->rm_rptr; 739 eptr = cptr + len; 740 741 if (eptr <= rmp->rm_eptr) { 742 rmp->rm_rptr = eptr; 743 bcopy(cptr, ptr, len); 744 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 745 mutex_exit(&rmp->rm_lock); 746 747 return (0); 748 } 749 /* 750 * End fast path. 751 */ 752 rmp->rm_rptr = rmp->rm_buffer; 753 /* 754 * Note: We assume the generate routine always succeeds 755 * in this case (because it does at present..) 756 * It also always releases rm_lock. 757 */ 758 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_buffer, 759 rndbuf_len); 760 } 761 } 762 763 /* 764 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 765 * little memory on big systems that don't have the full set installed. 766 * See above; "empty" means "rptr equal to eptr"; this will trigger the 767 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 768 * 769 * TODO: make rndmag_size tunable at run time! 770 */ 771 static void 772 rnd_alloc_magazines() 773 { 774 rndmag_t *rmp; 775 int i; 776 777 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 778 if (rndmag_size < rndbuf_len) 779 rndmag_size = rndbuf_len; 780 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 781 782 random_max_ncpus = max_ncpus; 783 rndmag_total = rndmag_size * random_max_ncpus; 784 785 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 786 rndmag = kmem_zalloc(sizeof (rndmag_t) * random_max_ncpus, KM_SLEEP); 787 788 for (i = 0; i < random_max_ncpus; i++) { 789 uint8_t *buf; 790 791 rmp = &rndmag[i]; 792 mutex_init(&rmp->rm_lock, NULL, MUTEX_DRIVER, NULL); 793 794 buf = rndbuf + i * rndmag_size; 795 796 rmp->rm_buffer = buf; 797 rmp->rm_eptr = buf + rndbuf_len; 798 rmp->rm_rptr = buf + rndbuf_len; 799 rmp->rm_oblocks = 1; 800 } 801 } 802 803 void 804 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 805 { 806 clock_t ut; /* time in microseconds */ 807 808 if (do_mech2id) 809 rngmech_type = crypto_mech2id(SUN_RANDOM); 810 811 /* 812 * The new timeout value is taken from the buffer of random bytes. 813 * We're merely reading the first 32 bits from the buffer here, not 814 * consuming any random bytes. 815 * The timeout multiplier value is a random value between 0.5 sec and 816 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 817 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 818 */ 819 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 820 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 821 TIMEOUT_INTERVAL * drv_usectohz(ut)); 822 } 823 824 /* 825 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 826 * will block. When enough random bytes are available, later, the timeout 827 * handler routine will issue the pollwakeup() calls. 828 */ 829 void 830 kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 831 { 832 /* 833 * Sampling of rnbyte_cnt is an atomic 834 * operation. Hence we do not need any locking. 835 */ 836 if (rnbyte_cnt >= MINEXTRACTBYTES) { 837 *reventsp |= (POLLIN | POLLRDNORM); 838 } else { 839 *reventsp = 0; 840 if (!anyyet) 841 *phpp = &rnd_pollhead; 842 } 843 } 844 845 /*ARGSUSED*/ 846 static void 847 rnd_handler(void *arg) 848 { 849 int len = 0; 850 851 if (num_waiters > 0) 852 len = MAXEXTRACTBYTES; 853 else if (rnbyte_cnt < RNDPOOLSIZE) 854 len = MINEXTRACTBYTES; 855 856 if (len > 0) { 857 (void) taskq_dispatch(system_taskq, rngprov_task, 858 (void *)(uintptr_t)len, TQ_NOSLEEP); 859 } else if (!kcf_rngprov_check()) { 860 cmn_err(CE_WARN, "No randomness provider enabled for " 861 "/dev/random. Use cryptoadm(1M) to enable a provider."); 862 } 863 864 mutex_enter(&rndpool_lock); 865 /* 866 * Wake up threads waiting in poll() or for enough accumulated 867 * random bytes to read from /dev/random. In case a poll() is 868 * concurrent with a read(), the polling process may be woken up 869 * indicating that enough randomness is now available for reading, 870 * and another process *steals* the bits from the pool, causing the 871 * subsequent read() from the first process to block. It is acceptable 872 * since the blocking will eventually end, after the timeout 873 * has expired enough times to honor the read. 874 * 875 * Note - Since we hold the rndpool_lock across the pollwakeup() call 876 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 877 */ 878 if (rnbyte_cnt >= MINEXTRACTBYTES) 879 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 880 881 if (num_waiters > 0) 882 cv_broadcast(&rndpool_read_cv); 883 mutex_exit(&rndpool_lock); 884 885 kcf_rnd_schedule_timeout(B_FALSE); 886 } 887 888 /* Hashing functions */ 889 890 static void 891 hmac_key(uint8_t *key, size_t keylen, void *buf) 892 { 893 uint32_t *ip, *op; 894 uint32_t ipad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 895 uint32_t opad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 896 HASH_CTX *icontext, *ocontext; 897 int i; 898 int nints; 899 900 icontext = buf; 901 ocontext = (SHA1_CTX *)((uint8_t *)buf + sizeof (HASH_CTX)); 902 903 bzero((uchar_t *)ipad, HMAC_BLOCK_SIZE); 904 bzero((uchar_t *)opad, HMAC_BLOCK_SIZE); 905 bcopy(key, (uchar_t *)ipad, keylen); 906 bcopy(key, (uchar_t *)opad, keylen); 907 908 /* 909 * XOR key with ipad (0x36) and opad (0x5c) as defined 910 * in RFC 2104. 911 */ 912 ip = ipad; 913 op = opad; 914 nints = HMAC_BLOCK_SIZE/sizeof (uint32_t); 915 916 for (i = 0; i < nints; i++) { 917 ip[i] ^= 0x36363636; 918 op[i] ^= 0x5c5c5c5c; 919 } 920 921 /* Perform hash with ipad */ 922 HashInit(icontext); 923 HashUpdate(icontext, (uchar_t *)ipad, HMAC_BLOCK_SIZE); 924 925 /* Perform hash with opad */ 926 HashInit(ocontext); 927 HashUpdate(ocontext, (uchar_t *)opad, HMAC_BLOCK_SIZE); 928 } 929 930 static void 931 hmac_encr(void *ctx, uint8_t *ptr, size_t len, uint8_t *digest) 932 { 933 HASH_CTX *saved_contexts; 934 HASH_CTX icontext; 935 HASH_CTX ocontext; 936 937 saved_contexts = (HASH_CTX *)ctx; 938 icontext = saved_contexts[0]; 939 ocontext = saved_contexts[1]; 940 941 HashUpdate(&icontext, ptr, len); 942 HashFinal(digest, &icontext); 943 944 /* 945 * Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the 946 * Hash(K XOR IPAD, DATA). 947 */ 948 HashUpdate(&ocontext, digest, HASHSIZE); 949 HashFinal(digest, &ocontext); 950 } 951 952 953 static void 954 rndc_addbytes(uint8_t *ptr, size_t len) 955 { 956 ASSERT(ptr != NULL && len > 0); 957 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 958 959 mutex_enter(&rndpool_lock); 960 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 961 rndpool[rindex] ^= *ptr; 962 ptr++; len--; 963 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 964 rnbyte_cnt++; 965 } 966 967 /* Handle buffer full case */ 968 while (len > 0) { 969 rndpool[rindex] ^= *ptr; 970 ptr++; len--; 971 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 972 } 973 mutex_exit(&rndpool_lock); 974 } 975 976 /* 977 * Caller should check len <= rnbyte_cnt under the 978 * rndpool_lock before calling. 979 */ 980 static void 981 rndc_getbytes(uint8_t *ptr, size_t len) 982 { 983 ASSERT(MUTEX_HELD(&rndpool_lock)); 984 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 985 986 BUMP_RND_STATS(rs_rndcOut, len); 987 988 while (len > 0) { 989 *ptr = rndpool[findex]; 990 ptr++; len--; 991 findex = (findex + 1) & (RNDPOOLSIZE - 1); 992 rnbyte_cnt--; 993 } 994 } 995 996 /* Random number exported entry points */ 997 998 /* 999 * Mix the supplied bytes into the entropy pool of a kCF 1000 * RNG provider. 1001 */ 1002 /* ARGSUSED */ 1003 int 1004 random_add_entropy(uint8_t *ptr, size_t len, uint16_t entropy_est) 1005 { 1006 if (len < 1) 1007 return (-1); 1008 1009 rngprov_seed(ptr, len); 1010 1011 return (0); 1012 } 1013 1014 /* 1015 * Get bytes from the /dev/urandom generator. This function 1016 * always succeeds. Returns 0. 1017 */ 1018 int 1019 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1020 { 1021 ASSERT(!mutex_owned(&rndpool_lock)); 1022 1023 if (len < 1) 1024 return (0); 1025 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1026 } 1027 1028 /* 1029 * Get bytes from the /dev/random generator. Returns 0 1030 * on success. Returns EAGAIN if there is insufficient entropy. 1031 */ 1032 int 1033 random_get_bytes(uint8_t *ptr, size_t len) 1034 { 1035 ASSERT(!mutex_owned(&rndpool_lock)); 1036 1037 if (len < 1) 1038 return (0); 1039 return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 1040 } 1041