1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
24 * Copyright 2017 Joyent, Inc.
25 */
26
27 /*
28 * This file implements the interfaces that the /dev/random
29 * driver uses for read(2), write(2) and poll(2) on /dev/random or
30 * /dev/urandom. It also implements the kernel API - random_add_entropy(),
31 * random_add_pseudo_entropy(), random_get_pseudo_bytes()
32 * and random_get_bytes().
33 *
34 * We periodically collect random bits from providers which are registered
35 * with the Kernel Cryptographic Framework (kCF) as capable of random
36 * number generation. The random bits are maintained in a cache and
37 * it is used for high quality random numbers (/dev/random) requests.
38 * We pick a provider and call its SPI routine, if the cache does not have
39 * enough bytes to satisfy a request.
40 *
41 * /dev/urandom requests use a software-based generator algorithm that uses the
42 * random bits in the cache as a seed. We create one pseudo-random generator
43 * (for /dev/urandom) per possible CPU on the system, and use it,
44 * kmem-magazine-style, to avoid cache line contention.
45 *
46 * LOCKING HIERARCHY:
47 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators.
48 * 2) rndpool_lock protects the high-quality randomness pool.
49 * It may be locked while a rmp->rm_mag.rm_lock is held.
50 *
51 * A history note: The kernel API and the software-based algorithms in this
52 * file used to be part of the /dev/random driver.
53 */
54
55 #include <sys/types.h>
56 #include <sys/conf.h>
57 #include <sys/sunddi.h>
58 #include <sys/disp.h>
59 #include <sys/modctl.h>
60 #include <sys/ddi.h>
61 #include <sys/crypto/common.h>
62 #include <sys/crypto/api.h>
63 #include <sys/crypto/impl.h>
64 #include <sys/crypto/sched_impl.h>
65 #include <sys/crypto/ioctladmin.h>
66 #include <sys/random.h>
67 #include <sys/sha1.h>
68 #include <sys/time.h>
69 #include <sys/sysmacros.h>
70 #include <sys/cpuvar.h>
71 #include <sys/taskq.h>
72 #include <rng/fips_random.h>
73
74 #define RNDPOOLSIZE 1024 /* Pool size in bytes */
75 #define MINEXTRACTBYTES 20
76 #define MAXEXTRACTBYTES 1024
77 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */
78 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */
79
80 typedef enum extract_type {
81 NONBLOCK_EXTRACT,
82 BLOCKING_EXTRACT,
83 ALWAYS_EXTRACT
84 } extract_type_t;
85
86 /*
87 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1
88 * routines directly instead of using k-API because we can't return any
89 * error code in /dev/urandom case and we can get an error using k-API
90 * if a mechanism is disabled.
91 */
92 #define HASHSIZE 20
93 #define HASH_CTX SHA1_CTX
94 #define HashInit(ctx) SHA1Init((ctx))
95 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s))
96 #define HashFinal(d, ctx) SHA1Final((d), (ctx))
97
98 /* HMAC-SHA1 */
99 #define HMAC_KEYSIZE 20
100
101 /*
102 * Cache of random bytes implemented as a circular buffer. findex and rindex
103 * track the front and back of the circular buffer.
104 */
105 uint8_t rndpool[RNDPOOLSIZE];
106 static int findex, rindex;
107 static int rnbyte_cnt; /* Number of bytes in the cache */
108
109 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */
110 /* and the global variables */
111 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */
112 static int num_waiters; /* #threads waiting to read from /dev/random */
113
114 static struct pollhead rnd_pollhead;
115 /* LINTED E_STATIC_UNUSED */
116 static timeout_id_t kcf_rndtimeout_id;
117 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID;
118 rnd_stats_t rnd_stats;
119 static boolean_t rng_prov_found = B_TRUE;
120 static boolean_t rng_ok_to_log = B_TRUE;
121 static boolean_t rngprov_task_idle = B_TRUE;
122
123 static void rndc_addbytes(uint8_t *, size_t);
124 static void rndc_getbytes(uint8_t *ptr, size_t len);
125 static void rnd_handler(void *);
126 static void rnd_alloc_magazines(void);
127 static void rnd_fips_discard_initial(void);
128 static void rnd_init2(void *);
129 static void rnd_schedule_timeout(void);
130
131 /*
132 * Called from kcf:_init()
133 */
134 void
kcf_rnd_init()135 kcf_rnd_init()
136 {
137 hrtime_t ts;
138 time_t now;
139
140 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL);
141 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL);
142
143 /*
144 * Add bytes to the cache using
145 * . 2 unpredictable times: high resolution time since the boot-time,
146 * and the current time-of-the day.
147 * This is used only to make the timeout value in the timer
148 * unpredictable.
149 */
150 ts = gethrtime();
151 rndc_addbytes((uint8_t *)&ts, sizeof (ts));
152
153 (void) drv_getparm(TIME, &now);
154 rndc_addbytes((uint8_t *)&now, sizeof (now));
155
156 rnbyte_cnt = 0;
157 findex = rindex = 0;
158 num_waiters = 0;
159
160 rnd_alloc_magazines();
161
162 (void) taskq_dispatch(system_taskq, rnd_init2, NULL, TQ_SLEEP);
163 }
164
165 /*
166 * This is called via the system taskq, so that we can do further
167 * initializations that have to wait until the kcf module itself is
168 * done loading. (After kcf:_init returns.)
169 */
170 static void
rnd_init2(void * unused)171 rnd_init2(void *unused)
172 {
173
174 _NOTE(ARGUNUSED(unused));
175
176 /*
177 * This will load a randomness provider; typically "swrand",
178 * but could be another provider if so configured.
179 */
180 rngmech_type = crypto_mech2id(SUN_RANDOM);
181
182 /* Update rng_prov_found etc. */
183 (void) kcf_rngprov_check();
184
185 /* FIPS 140-2 init. */
186 rnd_fips_discard_initial();
187
188 /* Start rnd_handler calls. */
189 rnd_schedule_timeout();
190 }
191
192 /*
193 * Return TRUE if at least one provider exists that can
194 * supply random numbers.
195 */
196 boolean_t
kcf_rngprov_check(void)197 kcf_rngprov_check(void)
198 {
199 int rv;
200 kcf_provider_desc_t *pd;
201
202 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
203 NULL, CRYPTO_FG_RANDOM, 0)) != NULL) {
204 KCF_PROV_REFRELE(pd);
205 /*
206 * We logged a warning once about no provider being available
207 * and now a provider became available. So, set the flag so
208 * that we can log again if the problem recurs.
209 */
210 rng_ok_to_log = B_TRUE;
211 rng_prov_found = B_TRUE;
212 return (B_TRUE);
213 } else {
214 rng_prov_found = B_FALSE;
215 return (B_FALSE);
216 }
217 }
218
219 /*
220 * Pick a software-based provider and submit a request to seed
221 * its random number generator.
222 */
223 static void
rngprov_seed(uint8_t * buf,int len,uint_t entropy_est,uint32_t flags)224 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags)
225 {
226 kcf_provider_desc_t *pd = NULL;
227
228 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) ==
229 CRYPTO_SUCCESS) {
230 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len,
231 entropy_est, flags, NULL);
232 KCF_PROV_REFRELE(pd);
233 }
234 }
235
236 /*
237 * This routine is called for blocking reads.
238 *
239 * The argument is_taskq_thr indicates whether the caller is
240 * the taskq thread dispatched by the timeout handler routine.
241 * In this case, we cycle through all the providers
242 * submitting a request to each provider to generate random numbers.
243 *
244 * For other cases, we pick a provider and submit a request to generate
245 * random numbers. We retry using another provider if we get an error.
246 *
247 * Returns the number of bytes that are written to 'ptr'. Returns -1
248 * if no provider is found. ptr and need are unchanged.
249 */
250 static int
rngprov_getbytes(uint8_t * ptr,size_t need,boolean_t is_taskq_thr)251 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr)
252 {
253 int rv;
254 int prov_cnt = 0;
255 int total_bytes = 0;
256 kcf_provider_desc_t *pd;
257 kcf_req_params_t params;
258 kcf_prov_tried_t *list = NULL;
259
260 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
261 list, CRYPTO_FG_RANDOM, 0)) != NULL) {
262
263 prov_cnt++;
264
265 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE,
266 pd->pd_sid, ptr, need, 0, 0);
267 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE);
268 ASSERT(rv != CRYPTO_QUEUED);
269
270 if (rv == CRYPTO_SUCCESS) {
271 total_bytes += need;
272 if (is_taskq_thr)
273 rndc_addbytes(ptr, need);
274 else {
275 KCF_PROV_REFRELE(pd);
276 break;
277 }
278 }
279
280 if (is_taskq_thr || rv != CRYPTO_SUCCESS) {
281 /* Add pd to the linked list of providers tried. */
282 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) {
283 KCF_PROV_REFRELE(pd);
284 break;
285 }
286 }
287
288 }
289
290 if (list != NULL)
291 kcf_free_triedlist(list);
292
293 if (prov_cnt == 0) { /* no provider could be found. */
294 rng_prov_found = B_FALSE;
295 return (-1);
296 } else {
297 rng_prov_found = B_TRUE;
298 /* See comments in kcf_rngprov_check() */
299 rng_ok_to_log = B_TRUE;
300 }
301
302 return (total_bytes);
303 }
304
305 static void
notify_done(void * arg,int rv)306 notify_done(void *arg, int rv)
307 {
308 uchar_t *rndbuf = arg;
309
310 if (rv == CRYPTO_SUCCESS)
311 rndc_addbytes(rndbuf, MINEXTRACTBYTES);
312
313 bzero(rndbuf, MINEXTRACTBYTES);
314 kmem_free(rndbuf, MINEXTRACTBYTES);
315 }
316
317 /*
318 * Cycle through all the providers submitting a request to each provider
319 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
320 * and ALWAYS_EXTRACT.
321 *
322 * Returns the number of bytes that are written to 'ptr'. Returns -1
323 * if no provider is found. ptr and len are unchanged.
324 */
325 static int
rngprov_getbytes_nblk(uint8_t * ptr,size_t len)326 rngprov_getbytes_nblk(uint8_t *ptr, size_t len)
327 {
328 int rv, total_bytes;
329 size_t blen;
330 uchar_t *rndbuf;
331 kcf_provider_desc_t *pd;
332 kcf_req_params_t params;
333 crypto_call_req_t req;
334 kcf_prov_tried_t *list = NULL;
335 int prov_cnt = 0;
336
337 blen = 0;
338 total_bytes = 0;
339 req.cr_flag = CRYPTO_SKIP_REQID;
340 req.cr_callback_func = notify_done;
341
342 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
343 list, CRYPTO_FG_RANDOM, 0)) != NULL) {
344
345 prov_cnt ++;
346 switch (pd->pd_prov_type) {
347 case CRYPTO_HW_PROVIDER:
348 /*
349 * We have to allocate a buffer here as we can not
350 * assume that the input buffer will remain valid
351 * when the callback comes. We use a fixed size buffer
352 * to simplify the book keeping.
353 */
354 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP);
355 if (rndbuf == NULL) {
356 KCF_PROV_REFRELE(pd);
357 if (list != NULL)
358 kcf_free_triedlist(list);
359 return (total_bytes);
360 }
361 req.cr_callback_arg = rndbuf;
362 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms,
363 KCF_OP_RANDOM_GENERATE,
364 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0);
365 break;
366
367 case CRYPTO_SW_PROVIDER:
368 /*
369 * We do not need to allocate a buffer in the software
370 * provider case as there is no callback involved. We
371 * avoid any extra data copy by directly passing 'ptr'.
372 */
373 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms,
374 KCF_OP_RANDOM_GENERATE,
375 pd->pd_sid, ptr, len, 0, 0);
376 break;
377 }
378
379 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE);
380 if (rv == CRYPTO_SUCCESS) {
381 switch (pd->pd_prov_type) {
382 case CRYPTO_HW_PROVIDER:
383 /*
384 * Since we have the input buffer handy,
385 * we directly copy to it rather than
386 * adding to the pool.
387 */
388 blen = min(MINEXTRACTBYTES, len);
389 bcopy(rndbuf, ptr, blen);
390 if (len < MINEXTRACTBYTES)
391 rndc_addbytes(rndbuf + len,
392 MINEXTRACTBYTES - len);
393 ptr += blen;
394 len -= blen;
395 total_bytes += blen;
396 break;
397
398 case CRYPTO_SW_PROVIDER:
399 total_bytes += len;
400 len = 0;
401 break;
402 }
403 }
404
405 /*
406 * We free the buffer in the callback routine
407 * for the CRYPTO_QUEUED case.
408 */
409 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
410 rv != CRYPTO_QUEUED) {
411 bzero(rndbuf, MINEXTRACTBYTES);
412 kmem_free(rndbuf, MINEXTRACTBYTES);
413 }
414
415 if (len == 0) {
416 KCF_PROV_REFRELE(pd);
417 break;
418 }
419
420 if (rv != CRYPTO_SUCCESS) {
421 /* Add pd to the linked list of providers tried. */
422 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) ==
423 NULL) {
424 KCF_PROV_REFRELE(pd);
425 break;
426 }
427 }
428 }
429
430 if (list != NULL) {
431 kcf_free_triedlist(list);
432 }
433
434 if (prov_cnt == 0) { /* no provider could be found. */
435 rng_prov_found = B_FALSE;
436 return (-1);
437 } else {
438 rng_prov_found = B_TRUE;
439 /* See comments in kcf_rngprov_check() */
440 rng_ok_to_log = B_TRUE;
441 }
442
443 return (total_bytes);
444 }
445
446 static void
rngprov_task(void * arg)447 rngprov_task(void *arg)
448 {
449 int len = (int)(uintptr_t)arg;
450 uchar_t tbuf[MAXEXTRACTBYTES];
451
452 ASSERT(len <= MAXEXTRACTBYTES);
453 (void) rngprov_getbytes(tbuf, len, B_TRUE);
454 rngprov_task_idle = B_TRUE;
455 }
456
457 /*
458 * Returns "len" random or pseudo-random bytes in *ptr.
459 * Will block if not enough random bytes are available and the
460 * call is blocking.
461 *
462 * Called with rndpool_lock held (allowing caller to do optimistic locking;
463 * releases the lock before return).
464 */
465 static int
rnd_get_bytes(uint8_t * ptr,size_t len,extract_type_t how)466 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how)
467 {
468 size_t bytes;
469 int got;
470
471 ASSERT(mutex_owned(&rndpool_lock));
472 /*
473 * Check if the request can be satisfied from the cache
474 * of random bytes.
475 */
476 if (len <= rnbyte_cnt) {
477 rndc_getbytes(ptr, len);
478 mutex_exit(&rndpool_lock);
479 return (0);
480 }
481 mutex_exit(&rndpool_lock);
482
483 switch (how) {
484 case BLOCKING_EXTRACT:
485 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1)
486 break; /* No provider found */
487
488 if (got == len)
489 return (0);
490 len -= got;
491 ptr += got;
492 break;
493
494 case NONBLOCK_EXTRACT:
495 case ALWAYS_EXTRACT:
496 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) {
497 /* No provider found */
498 if (how == NONBLOCK_EXTRACT) {
499 return (EAGAIN);
500 }
501 } else {
502 if (got == len)
503 return (0);
504 len -= got;
505 ptr += got;
506 }
507 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len))
508 return (EAGAIN);
509 break;
510 }
511
512 mutex_enter(&rndpool_lock);
513 while (len > 0) {
514 if (how == BLOCKING_EXTRACT) {
515 /* Check if there is enough */
516 while (rnbyte_cnt < MINEXTRACTBYTES) {
517 num_waiters++;
518 if (cv_wait_sig(&rndpool_read_cv,
519 &rndpool_lock) == 0) {
520 num_waiters--;
521 mutex_exit(&rndpool_lock);
522 return (EINTR);
523 }
524 num_waiters--;
525 }
526 }
527
528 /* Figure out how many bytes to extract */
529 bytes = min(len, rnbyte_cnt);
530 rndc_getbytes(ptr, bytes);
531
532 len -= bytes;
533 ptr += bytes;
534
535 if (len > 0 && how == ALWAYS_EXTRACT) {
536 /*
537 * There are not enough bytes, but we can not block.
538 * This only happens in the case of /dev/urandom which
539 * runs an additional generation algorithm. So, there
540 * is no problem.
541 */
542 while (len > 0) {
543 *ptr = rndpool[findex];
544 ptr++; len--;
545 rindex = findex = (findex + 1) &
546 (RNDPOOLSIZE - 1);
547 }
548 break;
549 }
550 }
551
552 mutex_exit(&rndpool_lock);
553 return (0);
554 }
555
556 int
kcf_rnd_get_bytes(uint8_t * ptr,size_t len,boolean_t noblock)557 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock)
558 {
559 extract_type_t how;
560 int error;
561
562 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT;
563 mutex_enter(&rndpool_lock);
564 if ((error = rnd_get_bytes(ptr, len, how)) != 0)
565 return (error);
566
567 BUMP_RND_STATS(rs_rndOut, len);
568 return (0);
569 }
570
571 /*
572 * Revisit this if the structs grow or we come up with a better way
573 * of cache-line-padding structures.
574 */
575 #define RND_CPU_CACHE_SIZE 64
576 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6
577 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \
578 sizeof (rndmag_t))
579 /*
580 * Per-CPU random state. Somewhat like like kmem's magazines, this provides
581 * a per-CPU instance of the pseudo-random generator. We have it much easier
582 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out.
583 *
584 * Note that this usage is preemption-safe; a thread
585 * entering a critical section remembers which generator it locked
586 * and unlocks the same one; should it be preempted and wind up running on
587 * a different CPU, there will be a brief period of increased contention
588 * before it exits the critical section but nothing will melt.
589 */
590 typedef struct rndmag_s
591 {
592 kmutex_t rm_lock;
593 uint8_t *rm_buffer; /* Start of buffer */
594 uint8_t *rm_eptr; /* End of buffer */
595 uint8_t *rm_rptr; /* Current read pointer */
596 uint32_t rm_oblocks; /* time to rekey? */
597 uint32_t rm_ofuzz; /* Rekey backoff state */
598 uint32_t rm_olimit; /* Hard rekey limit */
599 rnd_stats_t rm_stats; /* Per-CPU Statistics */
600 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */
601 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */
602 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */
603 } rndmag_t;
604
605 typedef struct rndmag_pad_s
606 {
607 rndmag_t rm_mag;
608 uint8_t rm_pad[RND_CPU_PAD];
609 } rndmag_pad_t;
610
611 /*
612 * Generate random bytes for /dev/urandom by applying the
613 * FIPS 186-2 algorithm with a key created from bytes extracted
614 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks
615 * is generated before a new key is obtained.
616 *
617 * Note that callers to this routine are likely to assume it can't fail.
618 *
619 * Called with rmp locked; releases lock.
620 */
621 static int
rnd_generate_pseudo_bytes(rndmag_pad_t * rmp,uint8_t * ptr,size_t len)622 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len)
623 {
624 size_t bytes = len, size;
625 int nblock;
626 uint32_t oblocks;
627 uint32_t tempout[HASHSIZE/BYTES_IN_WORD];
628 uint32_t seed[HASHSIZE/BYTES_IN_WORD];
629 int i;
630 hrtime_t timestamp;
631 uint8_t *src, *dst;
632
633 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock));
634
635 /* Nothing is being asked */
636 if (len == 0) {
637 mutex_exit(&rmp->rm_mag.rm_lock);
638 return (0);
639 }
640
641 nblock = howmany(len, HASHSIZE);
642
643 rmp->rm_mag.rm_oblocks += nblock;
644 oblocks = rmp->rm_mag.rm_oblocks;
645
646 do {
647 if (oblocks >= rmp->rm_mag.rm_olimit) {
648
649 /*
650 * Contention-avoiding rekey: see if
651 * the pool is locked, and if so, wait a bit.
652 * Do an 'exponential back-in' to ensure we don't
653 * run too long without rekey.
654 */
655 if (rmp->rm_mag.rm_ofuzz) {
656 /*
657 * Decaying exponential back-in for rekey.
658 */
659 if ((rnbyte_cnt < MINEXTRACTBYTES) ||
660 (!mutex_tryenter(&rndpool_lock))) {
661 rmp->rm_mag.rm_olimit +=
662 rmp->rm_mag.rm_ofuzz;
663 rmp->rm_mag.rm_ofuzz >>= 1;
664 goto punt;
665 }
666 } else {
667 mutex_enter(&rndpool_lock);
668 }
669
670 /* Get a new chunk of entropy */
671 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key,
672 HMAC_KEYSIZE, ALWAYS_EXTRACT);
673
674 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2;
675 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4;
676 oblocks = 0;
677 rmp->rm_mag.rm_oblocks = nblock;
678 }
679 punt:
680 timestamp = gethrtime();
681
682 src = (uint8_t *)×tamp;
683 dst = (uint8_t *)rmp->rm_mag.rm_seed;
684
685 for (i = 0; i < HASHSIZE; i++) {
686 dst[i] ^= src[i % sizeof (timestamp)];
687 }
688
689 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE);
690
691 fips_random_inner(rmp->rm_mag.rm_key, tempout,
692 seed);
693
694 if (bytes >= HASHSIZE) {
695 size = HASHSIZE;
696 } else {
697 size = min(bytes, HASHSIZE);
698 }
699
700 /*
701 * FIPS 140-2: Continuous RNG test - each generation
702 * of an n-bit block shall be compared with the previously
703 * generated block. Test shall fail if any two compared
704 * n-bit blocks are equal.
705 */
706 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) {
707 if (tempout[i] != rmp->rm_mag.rm_previous[i])
708 break;
709 }
710 if (i == HASHSIZE/BYTES_IN_WORD) {
711 cmn_err(CE_WARN, "kcf_random: The value of 160-bit "
712 "block random bytes are same as the previous "
713 "one.\n");
714 /* discard random bytes and return error */
715 mutex_exit(&rmp->rm_mag.rm_lock);
716 return (EIO);
717 }
718
719 bcopy(tempout, rmp->rm_mag.rm_previous,
720 HASHSIZE);
721
722 bcopy(tempout, ptr, size);
723 ptr += size;
724 bytes -= size;
725 oblocks++;
726 nblock--;
727 } while (bytes > 0);
728
729 /* Zero out sensitive information */
730 bzero(seed, HASHSIZE);
731 bzero(tempout, HASHSIZE);
732 mutex_exit(&rmp->rm_mag.rm_lock);
733 return (0);
734 }
735
736 /*
737 * Per-CPU Random magazines.
738 */
739 static rndmag_pad_t *rndmag;
740 static uint8_t *rndbuf;
741 static size_t rndmag_total;
742 /*
743 * common/os/cpu.c says that platform support code can shrinkwrap
744 * max_ncpus. On the off chance that we get loaded very early, we
745 * read it exactly once, to copy it here.
746 */
747 static uint32_t random_max_ncpus = 0;
748
749 /*
750 * Boot-time tunables, for experimentation.
751 */
752 size_t rndmag_threshold = 2560;
753 size_t rndbuf_len = 5120;
754 size_t rndmag_size = 1280;
755
756
757 int
kcf_rnd_get_pseudo_bytes(uint8_t * ptr,size_t len)758 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len)
759 {
760 rndmag_pad_t *rmp;
761 uint8_t *cptr, *eptr;
762
763 /*
764 * Anyone who asks for zero bytes of randomness should get slapped.
765 */
766 ASSERT(len > 0);
767
768 /*
769 * Fast path.
770 */
771 for (;;) {
772 rmp = &rndmag[CPU->cpu_seqid];
773 mutex_enter(&rmp->rm_mag.rm_lock);
774
775 /*
776 * Big requests bypass buffer and tail-call the
777 * generate routine directly.
778 */
779 if (len > rndmag_threshold) {
780 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len);
781 return (rnd_generate_pseudo_bytes(rmp, ptr, len));
782 }
783
784 cptr = rmp->rm_mag.rm_rptr;
785 eptr = cptr + len;
786
787 if (eptr <= rmp->rm_mag.rm_eptr) {
788 rmp->rm_mag.rm_rptr = eptr;
789 bcopy(cptr, ptr, len);
790 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len);
791 mutex_exit(&rmp->rm_mag.rm_lock);
792
793 return (0);
794 }
795 /*
796 * End fast path.
797 */
798 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer;
799 /*
800 * Note: We assume the generate routine always succeeds
801 * in this case (because it does at present..)
802 * It also always releases rm_lock.
803 */
804 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer,
805 rndbuf_len);
806 }
807 }
808
809 /*
810 * We set up (empty) magazines for all of max_ncpus, possibly wasting a
811 * little memory on big systems that don't have the full set installed.
812 * See above; "empty" means "rptr equal to eptr"; this will trigger the
813 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU.
814 *
815 * TODO: make rndmag_size tunable at run time!
816 */
817 static void
rnd_alloc_magazines()818 rnd_alloc_magazines()
819 {
820 rndmag_pad_t *rmp;
821 int i;
822
823 rndbuf_len = roundup(rndbuf_len, HASHSIZE);
824 if (rndmag_size < rndbuf_len)
825 rndmag_size = rndbuf_len;
826 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE);
827
828 random_max_ncpus = max_ncpus;
829 rndmag_total = rndmag_size * random_max_ncpus;
830
831 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP);
832 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus,
833 KM_SLEEP);
834
835 for (i = 0; i < random_max_ncpus; i++) {
836 uint8_t *buf;
837
838 rmp = &rndmag[i];
839 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL);
840
841 buf = rndbuf + i * rndmag_size;
842
843 rmp->rm_mag.rm_buffer = buf;
844 rmp->rm_mag.rm_eptr = buf + rndbuf_len;
845 rmp->rm_mag.rm_rptr = buf + rndbuf_len;
846 rmp->rm_mag.rm_oblocks = 1;
847 }
848 }
849
850 /*
851 * FIPS 140-2: the first n-bit (n > 15) block generated
852 * after power-up, initialization, or reset shall not
853 * be used, but shall be saved for comparison.
854 */
855 static void
rnd_fips_discard_initial(void)856 rnd_fips_discard_initial(void)
857 {
858 uint8_t discard_buf[HASHSIZE];
859 rndmag_pad_t *rmp;
860 int i;
861
862 for (i = 0; i < random_max_ncpus; i++) {
863 rmp = &rndmag[i];
864
865 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
866 mutex_enter(&rndpool_lock);
867 (void) rnd_get_bytes(discard_buf,
868 HMAC_KEYSIZE, ALWAYS_EXTRACT);
869 bcopy(discard_buf, rmp->rm_mag.rm_previous,
870 HMAC_KEYSIZE);
871 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
872 mutex_enter(&rndpool_lock);
873 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key,
874 HMAC_KEYSIZE, ALWAYS_EXTRACT);
875 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
876 mutex_enter(&rndpool_lock);
877 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed,
878 HMAC_KEYSIZE, ALWAYS_EXTRACT);
879 }
880 }
881
882 static void
rnd_schedule_timeout(void)883 rnd_schedule_timeout(void)
884 {
885 clock_t ut; /* time in microseconds */
886
887 /*
888 * The new timeout value is taken from the buffer of random bytes.
889 * We're merely reading the first 32 bits from the buffer here, not
890 * consuming any random bytes.
891 * The timeout multiplier value is a random value between 0.5 sec and
892 * 1.544480 sec (0.5 sec + 0xFF000 microseconds).
893 * The new timeout is TIMEOUT_INTERVAL times that multiplier.
894 */
895 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000);
896 kcf_rndtimeout_id = timeout(rnd_handler, NULL,
897 TIMEOUT_INTERVAL * drv_usectohz(ut));
898 }
899
900 /*
901 * Called from the driver for a poll on /dev/random
902 * . POLLOUT always succeeds.
903 * . POLLIN and POLLRDNORM will block until a
904 * minimum amount of entropy is available.
905 *
906 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread
907 * will block. When enough random bytes are available, later, the timeout
908 * handler routine will issue the pollwakeup() calls.
909 */
910 void
kcf_rnd_chpoll(short events,int anyyet,short * reventsp,struct pollhead ** phpp)911 kcf_rnd_chpoll(short events, int anyyet, short *reventsp,
912 struct pollhead **phpp)
913 {
914 *reventsp = events & POLLOUT;
915
916 if (events & (POLLIN | POLLRDNORM)) {
917 /*
918 * Sampling of rnbyte_cnt is an atomic
919 * operation. Hence we do not need any locking.
920 */
921 if (rnbyte_cnt >= MINEXTRACTBYTES)
922 *reventsp |= (events & (POLLIN | POLLRDNORM));
923 }
924
925 if ((*reventsp == 0 && !anyyet) || (events & POLLET))
926 *phpp = &rnd_pollhead;
927 }
928
929 /*ARGSUSED*/
930 static void
rnd_handler(void * arg)931 rnd_handler(void *arg)
932 {
933 int len = 0;
934
935 if (!rng_prov_found && rng_ok_to_log) {
936 cmn_err(CE_WARN, "No randomness provider enabled for "
937 "/dev/random. Use cryptoadm(8) to enable a provider.");
938 rng_ok_to_log = B_FALSE;
939 }
940
941 if (num_waiters > 0)
942 /*
943 * Note: len has no relationship with how many bytes
944 * a poll thread needs.
945 */
946 len = MAXEXTRACTBYTES;
947 else if (rnbyte_cnt < RNDPOOLSIZE)
948 len = MINEXTRACTBYTES;
949
950 /*
951 * Only one thread gets to set rngprov_task_idle at a given point
952 * of time and the order of the writes is defined. Also, it is OK
953 * if we read an older value of it and skip the dispatch once
954 * since we will get the correct value during the next time here.
955 * So, no locking is needed here.
956 */
957 if (len > 0 && rngprov_task_idle) {
958 rngprov_task_idle = B_FALSE;
959
960 /*
961 * It is OK if taskq_dispatch fails here. We will retry
962 * the next time around. Meanwhile, a thread doing a
963 * read() will go to the provider directly, if the
964 * cache becomes empty.
965 */
966 if (taskq_dispatch(system_taskq, rngprov_task,
967 (void *)(uintptr_t)len, TQ_NOSLEEP | TQ_NOQUEUE) ==
968 TASKQID_INVALID) {
969 rngprov_task_idle = B_TRUE;
970 }
971 }
972
973 mutex_enter(&rndpool_lock);
974 /*
975 * Wake up threads waiting in poll() or for enough accumulated
976 * random bytes to read from /dev/random. In case a poll() is
977 * concurrent with a read(), the polling process may be woken up
978 * indicating that enough randomness is now available for reading,
979 * and another process *steals* the bits from the pool, causing the
980 * subsequent read() from the first process to block. It is acceptable
981 * since the blocking will eventually end, after the timeout
982 * has expired enough times to honor the read.
983 *
984 * Note - Since we hold the rndpool_lock across the pollwakeup() call
985 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll().
986 */
987 if (rnbyte_cnt >= MINEXTRACTBYTES)
988 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM);
989
990 if (num_waiters > 0)
991 cv_broadcast(&rndpool_read_cv);
992 mutex_exit(&rndpool_lock);
993
994 rnd_schedule_timeout();
995 }
996
997 static void
rndc_addbytes(uint8_t * ptr,size_t len)998 rndc_addbytes(uint8_t *ptr, size_t len)
999 {
1000 ASSERT(ptr != NULL && len > 0);
1001 ASSERT(rnbyte_cnt <= RNDPOOLSIZE);
1002
1003 mutex_enter(&rndpool_lock);
1004 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) {
1005 rndpool[rindex] ^= *ptr;
1006 ptr++; len--;
1007 rindex = (rindex + 1) & (RNDPOOLSIZE - 1);
1008 rnbyte_cnt++;
1009 }
1010
1011 /* Handle buffer full case */
1012 while (len > 0) {
1013 rndpool[rindex] ^= *ptr;
1014 ptr++; len--;
1015 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1);
1016 }
1017 mutex_exit(&rndpool_lock);
1018 }
1019
1020 /*
1021 * Caller should check len <= rnbyte_cnt under the
1022 * rndpool_lock before calling.
1023 */
1024 static void
rndc_getbytes(uint8_t * ptr,size_t len)1025 rndc_getbytes(uint8_t *ptr, size_t len)
1026 {
1027 ASSERT(MUTEX_HELD(&rndpool_lock));
1028 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE);
1029
1030 BUMP_RND_STATS(rs_rndcOut, len);
1031
1032 while (len > 0) {
1033 *ptr = rndpool[findex];
1034 ptr++; len--;
1035 findex = (findex + 1) & (RNDPOOLSIZE - 1);
1036 rnbyte_cnt--;
1037 }
1038 }
1039
1040 /* Random number exported entry points */
1041
1042 /*
1043 * Mix the supplied bytes into the entropy pool of a kCF
1044 * RNG provider.
1045 */
1046 int
random_add_pseudo_entropy(uint8_t * ptr,size_t len,uint_t entropy_est)1047 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est)
1048 {
1049 if (len < 1)
1050 return (-1);
1051
1052 rngprov_seed(ptr, len, entropy_est, 0);
1053
1054 return (0);
1055 }
1056
1057 /*
1058 * Mix the supplied bytes into the entropy pool of a kCF
1059 * RNG provider. Mix immediately.
1060 */
1061 int
random_add_entropy(uint8_t * ptr,size_t len,uint_t entropy_est)1062 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est)
1063 {
1064 if (len < 1)
1065 return (-1);
1066
1067 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW);
1068
1069 return (0);
1070 }
1071
1072 /*
1073 * Get bytes from the /dev/urandom generator. This function
1074 * always succeeds. Returns 0.
1075 */
1076 int
random_get_pseudo_bytes(uint8_t * ptr,size_t len)1077 random_get_pseudo_bytes(uint8_t *ptr, size_t len)
1078 {
1079 ASSERT(!mutex_owned(&rndpool_lock));
1080
1081 if (len < 1)
1082 return (0);
1083 return (kcf_rnd_get_pseudo_bytes(ptr, len));
1084 }
1085
1086 /*
1087 * Get bytes from the /dev/random generator. Returns 0
1088 * on success. Returns EAGAIN if there is insufficient entropy.
1089 */
1090 int
random_get_bytes(uint8_t * ptr,size_t len)1091 random_get_bytes(uint8_t *ptr, size_t len)
1092 {
1093 ASSERT(!mutex_owned(&rndpool_lock));
1094
1095 if (len < 1)
1096 return (0);
1097 return (kcf_rnd_get_bytes(ptr, len, B_TRUE));
1098 }
1099
1100 int
random_get_blocking_bytes(uint8_t * ptr,size_t len)1101 random_get_blocking_bytes(uint8_t *ptr, size_t len)
1102 {
1103 ASSERT(!mutex_owned(&rndpool_lock));
1104
1105 if (len < 1)
1106 return (0);
1107 return (kcf_rnd_get_bytes(ptr, len, B_FALSE));
1108 }
1109