1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file implements the interfaces that the /dev/random 31 * driver uses for read(2), write(2) and poll(2) on /dev/random or 32 * /dev/urandom. It also implements the kernel API - random_add_entropy(), 33 * random_get_pseudo_bytes() and random_get_bytes(). 34 * 35 * We periodically collect random bits from providers which are registered 36 * with the Kernel Cryptographic Framework (kCF) as capable of random 37 * number generation. The random bits are maintained in a cache and 38 * it is used for high quality random numbers (/dev/random) requests. 39 * We pick a provider and call its SPI routine, if the cache does not have 40 * enough bytes to satisfy a request. 41 * 42 * /dev/urandom requests use a software-based generator algorithm that uses the 43 * random bits in the cache as a seed. We create one pseudo-random generator 44 * (for /dev/urandom) per possible CPU on the system, and use it, 45 * kmem-magazine-style, to avoid cache line contention. 46 * 47 * LOCKING HIERARCHY: 48 * 1) rmp->rm_lock protects the per-cpu pseudo-random generators. 49 * 2) rndpool_lock protects the high-quality randomness pool. 50 * It may be locked while a rmp->rm_lock is held. 51 * 52 * A history note: The kernel API and the software-based algorithms in this 53 * file used to be part of the /dev/random driver. 54 */ 55 56 #include <sys/types.h> 57 #include <sys/conf.h> 58 #include <sys/sunddi.h> 59 #include <sys/disp.h> 60 #include <sys/modctl.h> 61 #include <sys/ddi.h> 62 #include <sys/crypto/common.h> 63 #include <sys/crypto/api.h> 64 #include <sys/crypto/impl.h> 65 #include <sys/crypto/sched_impl.h> 66 #include <sys/random.h> 67 #include <sys/sha1.h> 68 #include <sys/time.h> 69 #include <sys/sysmacros.h> 70 #include <sys/cpuvar.h> 71 #include <sys/taskq.h> 72 73 #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 74 #define MINEXTRACTBYTES 20 75 #define MAXEXTRACTBYTES 1024 76 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 77 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 78 79 typedef enum extract_type { 80 NONBLOCK_EXTRACT, 81 BLOCKING_EXTRACT, 82 ALWAYS_EXTRACT 83 } extract_type_t; 84 85 /* 86 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 87 * routines directly instead of using k-API because we can't return any 88 * error code in /dev/urandom case and we can get an error using k-API 89 * if a mechanism is disabled. 90 */ 91 #define HASHSIZE 20 92 #define HASH_CTX SHA1_CTX 93 #define HashInit(ctx) SHA1Init((ctx)) 94 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 95 #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 96 97 /* HMAC-SHA1 */ 98 #define HMAC_KEYSIZE 20 99 #define HMAC_BLOCK_SIZE 64 100 #define HMAC_KEYSCHED sha1keysched_t 101 #define SET_ENCRYPT_KEY(k, s, ks) hmac_key((k), (s), (ks)) 102 #define HMAC_ENCRYPT(ks, p, s, d) hmac_encr((ks), (uint8_t *)(p), s, d) 103 104 /* HMAC-SHA1 "keyschedule" */ 105 typedef struct sha1keysched_s { 106 SHA1_CTX ictx; 107 SHA1_CTX octx; 108 } sha1keysched_t; 109 110 /* 111 * Cache of random bytes implemented as a circular buffer. findex and rindex 112 * track the front and back of the circular buffer. 113 */ 114 uint8_t rndpool[RNDPOOLSIZE]; 115 static int findex, rindex; 116 static int rnbyte_cnt; /* Number of bytes in the cache */ 117 118 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 119 /* and the global variables */ 120 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 121 static int num_waiters; /* #threads waiting to read from /dev/random */ 122 123 static struct pollhead rnd_pollhead; 124 static timeout_id_t kcf_rndtimeout_id; 125 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 126 rnd_stats_t rnd_stats; 127 128 static void rndc_addbytes(uint8_t *, size_t); 129 static void rndc_getbytes(uint8_t *ptr, size_t len); 130 static void rnd_handler(void *); 131 static void rnd_alloc_magazines(); 132 static void hmac_key(uint8_t *, size_t, void *); 133 static void hmac_encr(void *, uint8_t *, size_t, uint8_t *); 134 135 136 void 137 kcf_rnd_init() 138 { 139 hrtime_t ts; 140 time_t now; 141 142 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 143 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 144 145 /* 146 * Add bytes to the cache using 147 * . 2 unpredictable times: high resolution time since the boot-time, 148 * and the current time-of-the day. 149 * This is used only to make the timeout value in the timer 150 * unpredictable. 151 */ 152 ts = gethrtime(); 153 rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 154 155 (void) drv_getparm(TIME, &now); 156 rndc_addbytes((uint8_t *)&now, sizeof (now)); 157 158 rnbyte_cnt = 0; 159 findex = rindex = 0; 160 num_waiters = 0; 161 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 162 163 rnd_alloc_magazines(); 164 } 165 166 /* 167 * Return TRUE if at least one provider exists that can 168 * supply random numbers. 169 */ 170 boolean_t 171 kcf_rngprov_check(void) 172 { 173 int rv; 174 kcf_provider_desc_t *pd; 175 176 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 177 NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 178 KCF_PROV_REFRELE(pd); 179 return (B_TRUE); 180 } else 181 return (B_FALSE); 182 } 183 184 /* 185 * Pick a software-based provider and submit a request to seed 186 * its random number generator. 187 */ 188 static void 189 rngprov_seed(uint8_t *buf, int len) 190 { 191 kcf_provider_desc_t *pd = NULL; 192 kcf_req_params_t params; 193 194 if (kcf_get_sw_prov(rngmech_type, &pd, B_FALSE) == CRYPTO_SUCCESS) { 195 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_SEED, 196 pd->pd_sid, buf, len); 197 (void) kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 198 KCF_PROV_REFRELE(pd); 199 } 200 } 201 202 /* Boot-time tunable for experimentation. */ 203 int kcf_limit_hwrng = 1; 204 205 206 /* 207 * This routine is called for blocking reads. 208 * 209 * The argument from_user_api indicates whether the caller is 210 * from userland coming via the /dev/random driver. 211 * 212 * The argument is_taskq_thr indicates whether the caller is 213 * the taskq thread dispatched by the timeout handler routine. 214 * In this case, we cycle through all the providers 215 * submitting a request to each provider to generate random numbers. 216 * 217 * For other cases, we pick a provider and submit a request to generate 218 * random numbers. We retry using another provider if we get an error. 219 * 220 * Returns the number of bytes that are written to 'ptr'. Returns -1 221 * if no provider is found. ptr and need are unchanged. 222 */ 223 static int 224 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 225 boolean_t is_taskq_thr) 226 { 227 int rv; 228 int prov_cnt = 0; 229 int total_bytes = 0; 230 kcf_provider_desc_t *pd; 231 kcf_req_params_t params; 232 kcf_prov_tried_t *list = NULL; 233 234 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 235 list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 236 237 prov_cnt++; 238 /* 239 * Typically a hardware RNG is a multi-purpose 240 * crypto card and hence we do not want to overload the card 241 * just for random numbers. The following check is to prevent 242 * a user process from hogging the hardware RNG. Note that we 243 * still use the hardware RNG from the periodically run 244 * taskq thread. 245 */ 246 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 247 kcf_limit_hwrng == 1) { 248 ASSERT(is_taskq_thr == B_FALSE); 249 goto try_next; 250 } 251 252 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 253 pd->pd_sid, ptr, need); 254 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 255 ASSERT(rv != CRYPTO_QUEUED); 256 257 if (rv == CRYPTO_SUCCESS) { 258 total_bytes += need; 259 if (is_taskq_thr) 260 rndc_addbytes(ptr, need); 261 else { 262 KCF_PROV_REFRELE(pd); 263 break; 264 } 265 } 266 267 if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 268 try_next: 269 /* Add pd to the linked list of providers tried. */ 270 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 271 KCF_PROV_REFRELE(pd); 272 break; 273 } 274 } 275 276 } 277 278 if (list != NULL) 279 kcf_free_triedlist(list); 280 281 if (prov_cnt == 0) { /* no provider could be found. */ 282 return (-1); 283 } 284 285 return (total_bytes); 286 } 287 288 static void 289 notify_done(void *arg, int rv) 290 { 291 uchar_t *rndbuf = arg; 292 293 if (rv == CRYPTO_SUCCESS) 294 rndc_addbytes(rndbuf, MINEXTRACTBYTES); 295 296 bzero(rndbuf, MINEXTRACTBYTES); 297 kmem_free(rndbuf, MINEXTRACTBYTES); 298 } 299 300 /* 301 * Cycle through all the providers submitting a request to each provider 302 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 303 * and ALWAYS_EXTRACT. 304 * 305 * Returns the number of bytes that are written to 'ptr'. Returns -1 306 * if no provider is found. ptr and len are unchanged. 307 */ 308 static int 309 rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 310 { 311 int rv, blen, total_bytes; 312 uchar_t *rndbuf; 313 kcf_provider_desc_t *pd; 314 kcf_req_params_t params; 315 crypto_call_req_t req; 316 kcf_prov_tried_t *list = NULL; 317 int prov_cnt = 0; 318 319 blen = 0; 320 total_bytes = 0; 321 req.cr_flag = CRYPTO_SKIP_REQID; 322 req.cr_callback_func = notify_done; 323 324 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 325 list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 326 327 prov_cnt ++; 328 switch (pd->pd_prov_type) { 329 case CRYPTO_HW_PROVIDER: 330 /* See comments in rngprov_getbytes() */ 331 if (from_user_api && kcf_limit_hwrng == 1) 332 goto try_next; 333 334 /* 335 * We have to allocate a buffer here as we can not 336 * assume that the input buffer will remain valid 337 * when the callback comes. We use a fixed size buffer 338 * to simplify the book keeping. 339 */ 340 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 341 if (rndbuf == NULL) { 342 KCF_PROV_REFRELE(pd); 343 if (list != NULL) 344 kcf_free_triedlist(list); 345 return (total_bytes); 346 } 347 req.cr_callback_arg = rndbuf; 348 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 349 KCF_OP_RANDOM_GENERATE, 350 pd->pd_sid, rndbuf, MINEXTRACTBYTES); 351 break; 352 353 case CRYPTO_SW_PROVIDER: 354 /* 355 * We do not need to allocate a buffer in the software 356 * provider case as there is no callback involved. We 357 * avoid any extra data copy by directly passing 'ptr'. 358 */ 359 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 360 KCF_OP_RANDOM_GENERATE, 361 pd->pd_sid, ptr, len); 362 break; 363 } 364 365 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 366 if (rv == CRYPTO_SUCCESS) { 367 switch (pd->pd_prov_type) { 368 case CRYPTO_HW_PROVIDER: 369 /* 370 * Since we have the input buffer handy, 371 * we directly copy to it rather than 372 * adding to the pool. 373 */ 374 blen = min(MINEXTRACTBYTES, len); 375 bcopy(rndbuf, ptr, blen); 376 if (len < MINEXTRACTBYTES) 377 rndc_addbytes(rndbuf + len, 378 MINEXTRACTBYTES - len); 379 ptr += blen; 380 len -= blen; 381 total_bytes += blen; 382 break; 383 384 case CRYPTO_SW_PROVIDER: 385 total_bytes += len; 386 len = 0; 387 break; 388 } 389 } 390 391 /* 392 * We free the buffer in the callback routine 393 * for the CRYPTO_QUEUED case. 394 */ 395 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 396 rv != CRYPTO_QUEUED) { 397 bzero(rndbuf, MINEXTRACTBYTES); 398 kmem_free(rndbuf, MINEXTRACTBYTES); 399 } 400 401 if (len == 0) { 402 KCF_PROV_REFRELE(pd); 403 break; 404 } 405 406 if (rv != CRYPTO_SUCCESS) { 407 try_next: 408 /* Add pd to the linked list of providers tried. */ 409 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 410 NULL) { 411 KCF_PROV_REFRELE(pd); 412 break; 413 } 414 } 415 } 416 417 if (list != NULL) { 418 kcf_free_triedlist(list); 419 } 420 421 if (prov_cnt == 0) { /* no provider could be found. */ 422 return (-1); 423 } 424 425 return (total_bytes); 426 } 427 428 static void 429 rngprov_task(void *arg) 430 { 431 int len = (int)(uintptr_t)arg; 432 uchar_t tbuf[MAXEXTRACTBYTES]; 433 434 ASSERT(len <= MAXEXTRACTBYTES); 435 if (rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE) == -1) { 436 cmn_err(CE_WARN, "No randomness provider enabled for " 437 "/dev/random. Use cryptoadm(1M) to enable a provider."); 438 } 439 } 440 441 /* 442 * Returns "len" random or pseudo-random bytes in *ptr. 443 * Will block if not enough random bytes are available and the 444 * call is blocking. 445 * 446 * Called with rndpool_lock held (allowing caller to do optimistic locking; 447 * releases the lock before return). 448 */ 449 static int 450 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 451 boolean_t from_user_api) 452 { 453 int bytes; 454 size_t got; 455 456 ASSERT(mutex_owned(&rndpool_lock)); 457 /* 458 * Check if the request can be satisfied from the cache 459 * of random bytes. 460 */ 461 if (len <= rnbyte_cnt) { 462 rndc_getbytes(ptr, len); 463 mutex_exit(&rndpool_lock); 464 return (0); 465 } 466 mutex_exit(&rndpool_lock); 467 468 switch (how) { 469 case BLOCKING_EXTRACT: 470 if ((got = rngprov_getbytes(ptr, len, from_user_api, 471 B_FALSE)) == -1) 472 break; /* No provider found */ 473 474 if (got == len) 475 return (0); 476 len -= got; 477 ptr += got; 478 break; 479 480 case NONBLOCK_EXTRACT: 481 case ALWAYS_EXTRACT: 482 if ((got = rngprov_getbytes_nblk(ptr, len, 483 from_user_api)) == -1) { 484 /* No provider found */ 485 if (how == NONBLOCK_EXTRACT) { 486 return (EAGAIN); 487 } 488 } else { 489 if (got == len) 490 return (0); 491 len -= got; 492 ptr += got; 493 } 494 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 495 return (EAGAIN); 496 break; 497 } 498 499 mutex_enter(&rndpool_lock); 500 while (len > 0) { 501 if (how == BLOCKING_EXTRACT) { 502 /* Check if there is enough */ 503 while (rnbyte_cnt < MINEXTRACTBYTES) { 504 num_waiters++; 505 if (cv_wait_sig(&rndpool_read_cv, 506 &rndpool_lock) == 0) { 507 num_waiters--; 508 mutex_exit(&rndpool_lock); 509 return (EINTR); 510 } 511 num_waiters--; 512 } 513 } 514 515 /* Figure out how many bytes to extract */ 516 bytes = min(len, rnbyte_cnt); 517 rndc_getbytes(ptr, bytes); 518 519 len -= bytes; 520 ptr += bytes; 521 522 if (len > 0 && how == ALWAYS_EXTRACT) { 523 /* 524 * There are not enough bytes, but we can not block. 525 * This only happens in the case of /dev/urandom which 526 * runs an additional generation algorithm. So, there 527 * is no problem. 528 */ 529 while (len > 0) { 530 *ptr = rndpool[findex]; 531 ptr++; len--; 532 rindex = findex = (findex + 1) & 533 (RNDPOOLSIZE - 1); 534 } 535 break; 536 } 537 } 538 539 mutex_exit(&rndpool_lock); 540 return (0); 541 } 542 543 int 544 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 545 boolean_t from_user_api) 546 { 547 extract_type_t how; 548 int error; 549 550 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 551 mutex_enter(&rndpool_lock); 552 if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 553 return (error); 554 555 BUMP_RND_STATS(rs_rndOut, len); 556 return (0); 557 } 558 559 /* 560 * Revisit this if the structs grow or we come up with a better way 561 * of cache-line-padding structures. 562 */ 563 #define RND_CPU_CACHE_SIZE 64 564 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*5 565 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 566 (sizeof (kmutex_t) + 3*sizeof (uint8_t *) + sizeof (HMAC_KEYSCHED) + \ 567 sizeof (uint64_t) + 3*sizeof (uint32_t) + sizeof (rnd_stats_t))) 568 569 /* 570 * Per-CPU random state. Somewhat like like kmem's magazines, this provides 571 * a per-CPU instance of the pseudo-random generator. We have it much easier 572 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 573 * 574 * Note that this usage is preemption-safe; a thread 575 * entering a critical section remembers which generator it locked 576 * and unlocks the same one; should it be preempted and wind up running on 577 * a different CPU, there will be a brief period of increased contention 578 * before it exits the critical section but nothing will melt. 579 */ 580 typedef struct rndmag_s 581 { 582 kmutex_t rm_lock; 583 uint8_t *rm_buffer; /* Start of buffer */ 584 uint8_t *rm_eptr; /* End of buffer */ 585 uint8_t *rm_rptr; /* Current read pointer */ 586 HMAC_KEYSCHED rm_ks; /* seed */ 587 uint64_t rm_counter; /* rotating counter for extracting */ 588 uint32_t rm_oblocks; /* time to rekey? */ 589 uint32_t rm_ofuzz; /* Rekey backoff state */ 590 uint32_t rm_olimit; /* Hard rekey limit */ 591 rnd_stats_t rm_stats; /* Per-CPU Statistics */ 592 uint8_t rm_pad[RND_CPU_PAD]; 593 } rndmag_t; 594 595 /* 596 * Generate random bytes for /dev/urandom by encrypting a 597 * rotating counter with a key created from bytes extracted 598 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 599 * is generated before a new key is obtained. 600 * 601 * Note that callers to this routine are likely to assume it can't fail. 602 * 603 * Called with rmp locked; releases lock. 604 */ 605 static int 606 rnd_generate_pseudo_bytes(rndmag_t *rmp, uint8_t *ptr, size_t len) 607 { 608 size_t bytes = len; 609 int nblock, size; 610 uint32_t oblocks; 611 uint8_t digest[HASHSIZE]; 612 613 ASSERT(mutex_owned(&rmp->rm_lock)); 614 615 /* Nothing is being asked */ 616 if (len == 0) { 617 mutex_exit(&rmp->rm_lock); 618 return (0); 619 } 620 621 nblock = howmany(len, HASHSIZE); 622 623 rmp->rm_oblocks += nblock; 624 oblocks = rmp->rm_oblocks; 625 626 do { 627 if (oblocks >= rmp->rm_olimit) { 628 hrtime_t timestamp; 629 uint8_t key[HMAC_KEYSIZE]; 630 631 /* 632 * Contention-avoiding rekey: see if 633 * the pool is locked, and if so, wait a bit. 634 * Do an 'exponential back-in' to ensure we don't 635 * run too long without rekey. 636 */ 637 if (rmp->rm_ofuzz) { 638 /* 639 * Decaying exponential back-in for rekey. 640 */ 641 if ((rnbyte_cnt < MINEXTRACTBYTES) || 642 (!mutex_tryenter(&rndpool_lock))) { 643 rmp->rm_olimit += rmp->rm_ofuzz; 644 rmp->rm_ofuzz >>= 1; 645 goto punt; 646 } 647 } else { 648 mutex_enter(&rndpool_lock); 649 } 650 651 /* Get a new chunk of entropy */ 652 (void) rnd_get_bytes(key, HMAC_KEYSIZE, 653 ALWAYS_EXTRACT, B_FALSE); 654 655 /* Set up key */ 656 SET_ENCRYPT_KEY(key, HMAC_KEYSIZE, &rmp->rm_ks); 657 658 /* Get new counter value by encrypting timestamp */ 659 timestamp = gethrtime(); 660 HMAC_ENCRYPT(&rmp->rm_ks, ×tamp, 661 sizeof (timestamp), digest); 662 rmp->rm_olimit = PRNG_MAXOBLOCKS/2; 663 rmp->rm_ofuzz = PRNG_MAXOBLOCKS/4; 664 bcopy(digest, &rmp->rm_counter, sizeof (uint64_t)); 665 oblocks = 0; 666 rmp->rm_oblocks = nblock; 667 } 668 punt: 669 /* Hash counter to produce prn stream */ 670 if (bytes >= HASHSIZE) { 671 size = HASHSIZE; 672 HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 673 sizeof (rmp->rm_counter), ptr); 674 } else { 675 size = min(bytes, HASHSIZE); 676 HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 677 sizeof (rmp->rm_counter), digest); 678 bcopy(digest, ptr, size); 679 } 680 ptr += size; 681 bytes -= size; 682 rmp->rm_counter++; 683 oblocks++; 684 nblock--; 685 } while (bytes > 0); 686 687 mutex_exit(&rmp->rm_lock); 688 return (0); 689 } 690 691 /* 692 * Per-CPU Random magazines. 693 */ 694 static rndmag_t *rndmag; 695 static uint8_t *rndbuf; 696 static size_t rndmag_total; 697 /* 698 * common/os/cpu.c says that platform support code can shrinkwrap 699 * max_ncpus. On the off chance that we get loaded very early, we 700 * read it exactly once, to copy it here. 701 */ 702 static uint32_t random_max_ncpus = 0; 703 704 /* 705 * Boot-time tunables, for experimentation. 706 */ 707 size_t rndmag_threshold = 32; 708 size_t rndbuf_len = 64; 709 size_t rndmag_size = 64; 710 711 712 int 713 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 714 { 715 rndmag_t *rmp; 716 uint8_t *cptr, *eptr; 717 718 /* 719 * Anyone who asks for zero bytes of randomness should get slapped. 720 */ 721 ASSERT(len > 0); 722 723 /* 724 * Fast path. 725 */ 726 for (;;) { 727 rmp = &rndmag[CPU->cpu_seqid]; 728 mutex_enter(&rmp->rm_lock); 729 730 /* 731 * Big requests bypass buffer and tail-call the 732 * generate routine directly. 733 */ 734 if (len > rndmag_threshold) { 735 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 736 return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 737 } 738 739 cptr = rmp->rm_rptr; 740 eptr = cptr + len; 741 742 if (eptr <= rmp->rm_eptr) { 743 rmp->rm_rptr = eptr; 744 bcopy(cptr, ptr, len); 745 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 746 mutex_exit(&rmp->rm_lock); 747 748 return (0); 749 } 750 /* 751 * End fast path. 752 */ 753 rmp->rm_rptr = rmp->rm_buffer; 754 /* 755 * Note: We assume the generate routine always succeeds 756 * in this case (because it does at present..) 757 * It also always releases rm_lock. 758 */ 759 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_buffer, 760 rndbuf_len); 761 } 762 } 763 764 /* 765 * We set up (empty) magazines for all of max_ncpus, possibly wasting a 766 * little memory on big systems that don't have the full set installed. 767 * See above; "empty" means "rptr equal to eptr"; this will trigger the 768 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 769 * 770 * TODO: make rndmag_size tunable at run time! 771 */ 772 static void 773 rnd_alloc_magazines() 774 { 775 rndmag_t *rmp; 776 int i; 777 778 rndbuf_len = roundup(rndbuf_len, HASHSIZE); 779 if (rndmag_size < rndbuf_len) 780 rndmag_size = rndbuf_len; 781 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 782 783 random_max_ncpus = max_ncpus; 784 rndmag_total = rndmag_size * random_max_ncpus; 785 786 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 787 rndmag = kmem_zalloc(sizeof (rndmag_t) * random_max_ncpus, KM_SLEEP); 788 789 for (i = 0; i < random_max_ncpus; i++) { 790 uint8_t *buf; 791 792 rmp = &rndmag[i]; 793 mutex_init(&rmp->rm_lock, NULL, MUTEX_DRIVER, NULL); 794 795 buf = rndbuf + i * rndmag_size; 796 797 rmp->rm_buffer = buf; 798 rmp->rm_eptr = buf + rndbuf_len; 799 rmp->rm_rptr = buf + rndbuf_len; 800 rmp->rm_oblocks = 1; 801 } 802 } 803 804 void 805 kcf_rnd_schedule_timeout(boolean_t do_mech2id) 806 { 807 clock_t ut; /* time in microseconds */ 808 809 if (do_mech2id) 810 rngmech_type = crypto_mech2id(SUN_RANDOM); 811 812 /* 813 * The new timeout value is taken from the buffer of random bytes. 814 * We're merely reading the first 32 bits from the buffer here, not 815 * consuming any random bytes. 816 * The timeout multiplier value is a random value between 0.5 sec and 817 * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 818 * The new timeout is TIMEOUT_INTERVAL times that multiplier. 819 */ 820 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 821 kcf_rndtimeout_id = timeout(rnd_handler, NULL, 822 TIMEOUT_INTERVAL * drv_usectohz(ut)); 823 } 824 825 /* 826 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 827 * will block. When enough random bytes are available, later, the timeout 828 * handler routine will issue the pollwakeup() calls. 829 */ 830 void 831 kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 832 { 833 /* 834 * Sampling of rnbyte_cnt is an atomic 835 * operation. Hence we do not need any locking. 836 */ 837 if (rnbyte_cnt >= MINEXTRACTBYTES) { 838 *reventsp |= (POLLIN | POLLRDNORM); 839 } else { 840 *reventsp = 0; 841 if (!anyyet) 842 *phpp = &rnd_pollhead; 843 } 844 } 845 846 /*ARGSUSED*/ 847 static void 848 rnd_handler(void *arg) 849 { 850 int len = 0; 851 852 if (num_waiters > 0) 853 len = MAXEXTRACTBYTES; 854 else if (rnbyte_cnt < RNDPOOLSIZE) 855 len = MINEXTRACTBYTES; 856 857 if (len > 0) { 858 (void) taskq_dispatch(system_taskq, rngprov_task, 859 (void *)(uintptr_t)len, TQ_NOSLEEP); 860 } else if (!kcf_rngprov_check()) { 861 cmn_err(CE_WARN, "No randomness provider enabled for " 862 "/dev/random. Use cryptoadm(1M) to enable a provider."); 863 } 864 865 mutex_enter(&rndpool_lock); 866 /* 867 * Wake up threads waiting in poll() or for enough accumulated 868 * random bytes to read from /dev/random. In case a poll() is 869 * concurrent with a read(), the polling process may be woken up 870 * indicating that enough randomness is now available for reading, 871 * and another process *steals* the bits from the pool, causing the 872 * subsequent read() from the first process to block. It is acceptable 873 * since the blocking will eventually end, after the timeout 874 * has expired enough times to honor the read. 875 * 876 * Note - Since we hold the rndpool_lock across the pollwakeup() call 877 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 878 */ 879 if (rnbyte_cnt >= MINEXTRACTBYTES) 880 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 881 882 if (num_waiters > 0) 883 cv_broadcast(&rndpool_read_cv); 884 mutex_exit(&rndpool_lock); 885 886 kcf_rnd_schedule_timeout(B_FALSE); 887 } 888 889 /* Hashing functions */ 890 891 static void 892 hmac_key(uint8_t *key, size_t keylen, void *buf) 893 { 894 uint32_t *ip, *op; 895 uint32_t ipad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 896 uint32_t opad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 897 HASH_CTX *icontext, *ocontext; 898 int i; 899 int nints; 900 901 icontext = buf; 902 ocontext = (SHA1_CTX *)((uint8_t *)buf + sizeof (HASH_CTX)); 903 904 bzero((uchar_t *)ipad, HMAC_BLOCK_SIZE); 905 bzero((uchar_t *)opad, HMAC_BLOCK_SIZE); 906 bcopy(key, (uchar_t *)ipad, keylen); 907 bcopy(key, (uchar_t *)opad, keylen); 908 909 /* 910 * XOR key with ipad (0x36) and opad (0x5c) as defined 911 * in RFC 2104. 912 */ 913 ip = ipad; 914 op = opad; 915 nints = HMAC_BLOCK_SIZE/sizeof (uint32_t); 916 917 for (i = 0; i < nints; i++) { 918 ip[i] ^= 0x36363636; 919 op[i] ^= 0x5c5c5c5c; 920 } 921 922 /* Perform hash with ipad */ 923 HashInit(icontext); 924 HashUpdate(icontext, (uchar_t *)ipad, HMAC_BLOCK_SIZE); 925 926 /* Perform hash with opad */ 927 HashInit(ocontext); 928 HashUpdate(ocontext, (uchar_t *)opad, HMAC_BLOCK_SIZE); 929 } 930 931 static void 932 hmac_encr(void *ctx, uint8_t *ptr, size_t len, uint8_t *digest) 933 { 934 HASH_CTX *saved_contexts; 935 HASH_CTX icontext; 936 HASH_CTX ocontext; 937 938 saved_contexts = (HASH_CTX *)ctx; 939 icontext = saved_contexts[0]; 940 ocontext = saved_contexts[1]; 941 942 HashUpdate(&icontext, ptr, len); 943 HashFinal(digest, &icontext); 944 945 /* 946 * Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the 947 * Hash(K XOR IPAD, DATA). 948 */ 949 HashUpdate(&ocontext, digest, HASHSIZE); 950 HashFinal(digest, &ocontext); 951 } 952 953 954 static void 955 rndc_addbytes(uint8_t *ptr, size_t len) 956 { 957 ASSERT(ptr != NULL && len > 0); 958 ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 959 960 mutex_enter(&rndpool_lock); 961 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 962 rndpool[rindex] ^= *ptr; 963 ptr++; len--; 964 rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 965 rnbyte_cnt++; 966 } 967 968 /* Handle buffer full case */ 969 while (len > 0) { 970 rndpool[rindex] ^= *ptr; 971 ptr++; len--; 972 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 973 } 974 mutex_exit(&rndpool_lock); 975 } 976 977 /* 978 * Caller should check len <= rnbyte_cnt under the 979 * rndpool_lock before calling. 980 */ 981 static void 982 rndc_getbytes(uint8_t *ptr, size_t len) 983 { 984 ASSERT(MUTEX_HELD(&rndpool_lock)); 985 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 986 987 BUMP_RND_STATS(rs_rndcOut, len); 988 989 while (len > 0) { 990 *ptr = rndpool[findex]; 991 ptr++; len--; 992 findex = (findex + 1) & (RNDPOOLSIZE - 1); 993 rnbyte_cnt--; 994 } 995 } 996 997 /* Random number exported entry points */ 998 999 /* 1000 * Mix the supplied bytes into the entropy pool of a kCF 1001 * RNG provider. 1002 */ 1003 /* ARGSUSED */ 1004 int 1005 random_add_entropy(uint8_t *ptr, size_t len, uint16_t entropy_est) 1006 { 1007 if (len < 1) 1008 return (-1); 1009 1010 rngprov_seed(ptr, len); 1011 1012 return (0); 1013 } 1014 1015 /* 1016 * Get bytes from the /dev/urandom generator. This function 1017 * always succeeds. Returns 0. 1018 */ 1019 int 1020 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 1021 { 1022 ASSERT(!mutex_owned(&rndpool_lock)); 1023 1024 if (len < 1) 1025 return (0); 1026 return (kcf_rnd_get_pseudo_bytes(ptr, len)); 1027 } 1028 1029 /* 1030 * Get bytes from the /dev/random generator. Returns 0 1031 * on success. Returns EAGAIN if there is insufficient entropy. 1032 */ 1033 int 1034 random_get_bytes(uint8_t *ptr, size_t len) 1035 { 1036 ASSERT(!mutex_owned(&rndpool_lock)); 1037 1038 if (len < 1) 1039 return (0); 1040 return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 1041 } 1042