1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25
26 /*
27 * Niagara 2 Random Number Generator (RNG) driver
28 */
29
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/modctl.h>
33 #include <sys/conf.h>
34 #include <sys/devops.h>
35 #include <sys/cmn_err.h>
36 #include <sys/ksynch.h>
37 #include <sys/kmem.h>
38 #include <sys/stat.h>
39 #include <sys/open.h>
40 #include <sys/file.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/param.h>
44 #include <sys/cpuvar.h>
45 #include <sys/disp.h>
46 #include <sys/hsvc.h>
47 #include <sys/machsystm.h>
48 #include <sys/hypervisor_api.h>
49 #include <sys/n2rng.h>
50
51 static int n2rng_attach(dev_info_t *, ddi_attach_cmd_t);
52 static int n2rng_detach(dev_info_t *, ddi_detach_cmd_t);
53 static int n2rng_suspend(n2rng_t *);
54 static int n2rng_resume(n2rng_t *);
55 static uint64_t sticks_per_usec(void);
56 u_longlong_t gettick(void);
57 static int n2rng_init_ctl(n2rng_t *);
58 static void n2rng_uninit_ctl(n2rng_t *);
59 static int n2rng_config(n2rng_t *);
60 static void n2rng_config_task(void * targ);
61
62 /*
63 * Device operations.
64 */
65
66 static struct dev_ops devops = {
67 DEVO_REV, /* devo_rev */
68 0, /* devo_refcnt */
69 nodev, /* devo_getinfo */
70 nulldev, /* devo_identify */
71 nulldev, /* devo_probe */
72 n2rng_attach, /* devo_attach */
73 n2rng_detach, /* devo_detach */
74 nodev, /* devo_reset */
75 NULL, /* devo_cb_ops */
76 NULL, /* devo_bus_ops */
77 ddi_power, /* devo_power */
78 ddi_quiesce_not_supported, /* devo_quiesce */
79 };
80
81 /*
82 * Module linkage.
83 */
84 static struct modldrv modldrv = {
85 &mod_driverops, /* drv_modops */
86 "N2 RNG Driver", /* drv_linkinfo */
87 &devops, /* drv_dev_ops */
88 };
89
90 static struct modlinkage modlinkage = {
91 MODREV_1, /* ml_rev */
92 &modldrv, /* ml_linkage */
93 NULL
94 };
95
96 /*
97 * Driver globals Soft state.
98 */
99 static void *n2rng_softstate = NULL;
100
101 /*
102 * Hypervisor NCS services information.
103 */
104 static boolean_t ncs_hsvc_available = B_FALSE;
105
106 #define NVERSIONS 2
107
108 /*
109 * HV API versions supported by this driver.
110 */
111 static hsvc_info_t ncs_hsvc[NVERSIONS] = {
112 { HSVC_REV_1, NULL, HSVC_GROUP_RNG, 2, 0, DRIVER }, /* v2.0 */
113 { HSVC_REV_1, NULL, HSVC_GROUP_RNG, 1, 0, DRIVER }, /* v1.0 */
114 };
115 int ncs_version_index; /* index into ncs_hsvc[] */
116
117 /*
118 * DDI entry points.
119 */
120 int
_init(void)121 _init(void)
122 {
123 int rv;
124
125 rv = ddi_soft_state_init(&n2rng_softstate, sizeof (n2rng_t), 1);
126 if (rv != 0) {
127 /* this should *never* happen! */
128 return (rv);
129 }
130
131 if ((rv = mod_install(&modlinkage)) != 0) {
132 /* cleanup here */
133 ddi_soft_state_fini(&n2rng_softstate);
134 return (rv);
135 }
136
137 return (0);
138 }
139
140 int
_fini(void)141 _fini(void)
142 {
143 int rv;
144
145 rv = mod_remove(&modlinkage);
146 if (rv == 0) {
147 /* cleanup here */
148 ddi_soft_state_fini(&n2rng_softstate);
149 }
150
151 return (rv);
152 }
153
154 int
_info(struct modinfo * modinfop)155 _info(struct modinfo *modinfop)
156 {
157 return (mod_info(&modlinkage, modinfop));
158 }
159
160 static int
n2rng_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)161 n2rng_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
162 {
163 n2rng_t *n2rng = NULL;
164 int instance;
165 int rv;
166 int version;
167 uint64_t ncs_minor_ver;
168
169 instance = ddi_get_instance(dip);
170 DBG1(NULL, DENTRY, "n2rng_attach called, instance %d", instance);
171 /*
172 * Only instance 0 of n2rng driver is allowed.
173 */
174 if (instance != 0) {
175 n2rng_diperror(dip, "only one instance (0) allowed");
176 return (DDI_FAILURE);
177 }
178
179 switch (cmd) {
180 case DDI_RESUME:
181 n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate,
182 instance);
183 if (n2rng == NULL) {
184 n2rng_diperror(dip, "no soft state in attach");
185 return (DDI_FAILURE);
186 }
187 return (n2rng_resume(n2rng));
188
189 case DDI_ATTACH:
190 break;
191 default:
192 return (DDI_FAILURE);
193 }
194
195 rv = ddi_soft_state_zalloc(n2rng_softstate, instance);
196 if (rv != DDI_SUCCESS) {
197 n2rng_diperror(dip, "unable to allocate soft state");
198 return (DDI_FAILURE);
199 }
200 n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
201 ASSERT(n2rng != NULL);
202 n2rng->n_dip = dip;
203
204 mutex_init(&n2rng->n_lock, NULL, MUTEX_DRIVER, NULL);
205 n2rng->n_flags = 0;
206 n2rng->n_timeout_id = 0;
207 n2rng->n_sticks_per_usec = sticks_per_usec();
208
209 /* Determine binding type */
210 n2rng->n_binding_name = ddi_binding_name(dip);
211 if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_N2,
212 strlen(N2RNG_BINDNAME_N2)) == 0) {
213 /*
214 * Niagara 2
215 */
216 n2rng->n_binding = N2RNG_CPU_N2;
217 } else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_VF,
218 strlen(N2RNG_BINDNAME_VF)) == 0) {
219 /*
220 * Victoria Falls
221 */
222 n2rng->n_binding = N2RNG_CPU_VF;
223 } else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_KT,
224 strlen(N2RNG_BINDNAME_KT)) == 0) {
225 /*
226 * Rainbow Falls
227 */
228 n2rng->n_binding = N2RNG_CPU_KT;
229 } else {
230 n2rng_diperror(dip,
231 "unable to determine n2rng (cpu) binding (%s)",
232 n2rng->n_binding_name);
233 goto errorexit;
234 }
235 DBG1(n2rng, DCHATTY, "n2rng_attach: n2rng->n_binding_name = %s",
236 n2rng->n_binding_name);
237
238 /* Negotiate HV api version number */
239 for (version = 0; version < NVERSIONS; version++) {
240 rv = hsvc_register(&ncs_hsvc[version], &ncs_minor_ver);
241 if (rv == 0)
242 break;
243
244 DBG4(n2rng, DCHATTY, "n2rng_attach: grp: 0x%lx, maj: %ld, "
245 "min: %ld, errno: %d", ncs_hsvc[version].hsvc_group,
246 ncs_hsvc[version].hsvc_major,
247 ncs_hsvc[version].hsvc_minor, rv);
248 }
249 if (version == NVERSIONS) {
250 for (version = 0; version < NVERSIONS; version++) {
251 cmn_err(CE_WARN,
252 "%s: cannot negotiate hypervisor services "
253 "group: 0x%lx major: %ld minor: %ld errno: %d",
254 ncs_hsvc[version].hsvc_modname,
255 ncs_hsvc[version].hsvc_group,
256 ncs_hsvc[version].hsvc_major,
257 ncs_hsvc[version].hsvc_minor, rv);
258 }
259 goto errorexit;
260 }
261 ncs_version_index = version;
262 ncs_hsvc_available = B_TRUE;
263 DBG2(n2rng, DATTACH, "n2rng_attach: ncs api version (%ld.%ld)",
264 ncs_hsvc[ncs_version_index].hsvc_major, ncs_minor_ver);
265 n2rng->n_hvapi_major_version = ncs_hsvc[ncs_version_index].hsvc_major;
266 n2rng->n_hvapi_minor_version = (uint_t)ncs_minor_ver;
267
268 /*
269 * Verify that we are running version 2.0 or later api on multiple
270 * rng systems.
271 */
272 if ((n2rng->n_binding != N2RNG_CPU_N2) &&
273 (n2rng->n_hvapi_major_version < 2)) {
274 cmn_err(CE_NOTE, "n2rng: Incompatible hyperviser api "
275 "version %d.%d detected", n2rng->n_hvapi_major_version,
276 n2rng->n_hvapi_minor_version);
277 }
278
279 /* Initialize ctl structure if runnning in the control domain */
280 if (n2rng_init_ctl(n2rng) != DDI_SUCCESS) {
281 cmn_err(CE_WARN, "n2rng: unable to initialize rng "
282 "control structures");
283 goto errorexit;
284 }
285
286 /* Allocate single thread task queue for rng diags and registration */
287 n2rng->n_taskq = ddi_taskq_create(dip, "n2rng_taskq", 1,
288 TASKQ_DEFAULTPRI, 0);
289
290 if (n2rng->n_taskq == NULL) {
291 n2rng_diperror(dip, "ddi_taskq_create() failed");
292 goto errorexit;
293 }
294
295 /* Dispatch task to configure the RNG and register with KCF */
296 if (ddi_taskq_dispatch(n2rng->n_taskq, n2rng_config_task,
297 (void *)n2rng, DDI_SLEEP) != DDI_SUCCESS) {
298 n2rng_diperror(dip, "ddi_taskq_dispatch() failed");
299 goto errorexit;
300 }
301
302 return (DDI_SUCCESS);
303
304 errorexit:
305 /* Wait for pending config tasks to complete and delete the taskq */
306 if (n2rng->n_taskq != NULL) {
307 ddi_taskq_destroy(n2rng->n_taskq);
308 n2rng->n_taskq = NULL;
309 }
310
311 n2rng_uninit_ctl(n2rng);
312
313 (void) n2rng_uninit(n2rng);
314
315 if (ncs_hsvc_available == B_TRUE) {
316 (void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
317 ncs_hsvc_available = B_FALSE;
318 }
319
320 mutex_destroy(&n2rng->n_lock);
321 ddi_soft_state_free(n2rng_softstate, instance);
322
323 return (DDI_FAILURE);
324 }
325
326 static int
n2rng_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)327 n2rng_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
328 {
329 int instance;
330 int rv;
331 n2rng_t *n2rng;
332 timeout_id_t tid;
333
334 instance = ddi_get_instance(dip);
335 n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
336 if (n2rng == NULL) {
337 n2rng_diperror(dip, "no soft state in detach");
338 return (DDI_FAILURE);
339 }
340
341 switch (cmd) {
342 case DDI_SUSPEND:
343 return (n2rng_suspend(n2rng));
344 case DDI_DETACH:
345 break;
346 default:
347 return (DDI_FAILURE);
348 }
349
350 /* Destroy task queue first to insure configuration has completed */
351 if (n2rng->n_taskq != NULL) {
352 ddi_taskq_destroy(n2rng->n_taskq);
353 n2rng->n_taskq = NULL;
354 }
355
356 /* Untimeout pending config retry operations */
357 mutex_enter(&n2rng->n_lock);
358 tid = n2rng->n_timeout_id;
359 n2rng->n_timeout_id = 0;
360 mutex_exit(&n2rng->n_lock);
361 if (tid) {
362 DBG1(n2rng, DCHATTY, "n2rng_detach: untimeout pending retry "
363 "id = %x", tid);
364 (void) untimeout(tid);
365 }
366
367 n2rng_uninit_ctl(n2rng);
368
369 /* unregister with KCF---also tears down FIPS state */
370 rv = n2rng_uninit(n2rng) ? DDI_FAILURE : DDI_SUCCESS;
371
372 if (ncs_hsvc_available == B_TRUE) {
373 (void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
374 ncs_hsvc_available = B_FALSE;
375 }
376
377 mutex_destroy(&n2rng->n_lock);
378 ddi_soft_state_free(n2rng_softstate, instance);
379
380 return (rv);
381 }
382
383 /*ARGSUSED*/
384 static int
n2rng_suspend(n2rng_t * n2rng)385 n2rng_suspend(n2rng_t *n2rng)
386 {
387 /* unregister with KCF---also tears down FIPS state */
388 if (n2rng_uninit(n2rng) != DDI_SUCCESS) {
389 cmn_err(CE_WARN, "n2rng: unable to unregister from KCF");
390 return (DDI_FAILURE);
391 }
392
393 return (DDI_SUCCESS);
394 }
395
396 /*ARGSUSED*/
397 static int
n2rng_resume(n2rng_t * n2rng)398 n2rng_resume(n2rng_t *n2rng)
399 {
400 /* Assume clock is same speed and all data structures are intact */
401
402 /* Re-configure the RNG hardware and register with KCF */
403 return (n2rng_config(n2rng));
404 }
405
406 /*
407 * Map hypervisor error code to solaris. Only
408 * H_ENORADDR, H_EBADALIGN, H_EWOULDBLOCK, and EIO
409 * are meaningful to this device. Any other error
410 * codes are mapped EINVAL.
411 */
412 int
n2rng_herr2kerr(uint64_t hv_errcode)413 n2rng_herr2kerr(uint64_t hv_errcode)
414 {
415 int s_errcode;
416
417 switch (hv_errcode) {
418 case H_EWOULDBLOCK:
419 s_errcode = EWOULDBLOCK;
420 break;
421 case H_EIO:
422 s_errcode = EIO;
423 break;
424 case H_EBUSY:
425 s_errcode = EBUSY;
426 break;
427 case H_EOK:
428 s_errcode = 0;
429 break;
430 case H_ENOACCESS:
431 s_errcode = EPERM;
432 break;
433 case H_ENORADDR:
434 case H_EBADALIGN:
435 default:
436 s_errcode = EINVAL;
437 break;
438 }
439 return (s_errcode);
440 }
441
442 /*
443 * Waits approximately delay_sticks counts of the stick register.
444 * Times shorter than one sys clock tick (10ms on most systems) are
445 * done by busy waiting.
446 */
447 void
cyclesleep(n2rng_t * n2rng,uint64_t delay_sticks)448 cyclesleep(n2rng_t *n2rng, uint64_t delay_sticks)
449 {
450 uint64_t end_stick = gettick() + delay_sticks;
451 int64_t sticks_to_wait;
452 clock_t sys_ticks_to_wait;
453 clock_t usecs_to_wait;
454
455 /*CONSTCOND*/
456 while (1) {
457 sticks_to_wait = end_stick - gettick();
458 if (sticks_to_wait <= 0) {
459 return;
460 }
461
462 usecs_to_wait = sticks_to_wait / n2rng->n_sticks_per_usec;
463 sys_ticks_to_wait = drv_usectohz(usecs_to_wait);
464
465 if (sys_ticks_to_wait > 0) {
466 /* sleep */
467 delay(sys_ticks_to_wait);
468 } else if (usecs_to_wait > 0) {
469 /* busy wait */
470 drv_usecwait(usecs_to_wait);
471 }
472 }
473 }
474
475 static void
log_internal_errors(uint64_t hverr,char * fname)476 log_internal_errors(uint64_t hverr, char *fname)
477 {
478 switch (hverr) {
479 case H_EBADALIGN:
480 cmn_err(CE_WARN,
481 "n2rng: internal alignment "
482 "problem");
483 break;
484 case H_ENORADDR:
485 cmn_err(CE_WARN, "n2rng: internal "
486 "invalid address");
487 break;
488 case H_ENOACCESS:
489 cmn_err(CE_WARN, "n2rng: access failure");
490 break;
491 case H_EWOULDBLOCK:
492 cmn_err(CE_WARN, "n2rng: hardware busy");
493 break;
494 default:
495 cmn_err(CE_NOTE,
496 "n2rng: %s "
497 "unexpectedly "
498 "returned hverr %ld", fname, hverr);
499 break;
500 }
501 }
502
503 /*
504 * Collects a buffer full of bits, using the specified setup. numbytes
505 * must be a multiple of 8. If a sub-operation fails with EIO (handle
506 * mismatch), returns EIO. If collect_setupp is NULL, the current
507 * setup is used. If exit_setupp is NULL, the control configuratin
508 * and state are not set at exit. WARNING: the buffer must be 8-byte
509 * aligned and in contiguous physical addresses. Contiguousness is
510 * not checked!
511 */
512 int
n2rng_collect_diag_bits(n2rng_t * n2rng,int rngid,n2rng_setup_t * collect_setupp,void * buffer,int numbytes,n2rng_setup_t * exit_setupp,uint64_t exitstate)513 n2rng_collect_diag_bits(n2rng_t *n2rng, int rngid,
514 n2rng_setup_t *collect_setupp, void *buffer, int numbytes,
515 n2rng_setup_t *exit_setupp, uint64_t exitstate)
516 {
517 int rv;
518 int override_rv = 0;
519 uint64_t hverr;
520 int i;
521 uint64_t tdelta;
522 n2rng_setup_t setupbuffer[2];
523 n2rng_setup_t *setupcontigp;
524 uint64_t setupphys;
525 int numchunks;
526 boolean_t rnglooping;
527 int busycount = 0;
528 int blockcount = 0;
529
530 if (numbytes % sizeof (uint64_t)) {
531 return (EINVAL);
532 }
533
534 if ((uint64_t)buffer % sizeof (uint64_t) != 0) {
535 return (EINVAL);
536 }
537
538 numchunks = ((numbytes / sizeof (uint64_t)) + RNG_DIAG_CHUNK_SIZE - 1)
539 / RNG_DIAG_CHUNK_SIZE;
540 /*
541 * Use setupbuffer[0] if it is contiguous, otherwise
542 * setupbuffer[1].
543 */
544 setupcontigp = &setupbuffer[
545 CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
546 setupphys = va_to_pa(setupcontigp);
547
548 /*
549 * If a non-null collect_setupp pointer has been provided,
550 * push the specified setup into the hardware.
551 */
552 if (collect_setupp != NULL) {
553 /* copy the specified state to the aligned buffer */
554 *setupcontigp = *collect_setupp;
555 rnglooping = B_TRUE;
556 while (rnglooping) {
557 hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
558 CTL_STATE_HEALTHCHECK,
559 n2rng->n_ctl_data->n_watchdog_cycles, &tdelta);
560 rv = n2rng_herr2kerr(hverr);
561 switch (hverr) {
562 case H_EOK:
563 rnglooping = B_FALSE;
564 break;
565 case H_EIO: /* control yanked from us */
566 case H_ENOACCESS: /* We are not control domain */
567 return (rv);
568 case H_EWOULDBLOCK:
569 /* Data currently not available, try again */
570 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
571 DBG1(n2rng, DHEALTH,
572 "n2rng_collect_diag_bits(1) : "
573 "exceeded block count of %d",
574 RNG_MAX_BLOCK_ATTEMPTS);
575 return (rv);
576 } else {
577 cyclesleep(n2rng, tdelta);
578 }
579 break;
580 case H_EBUSY:
581 /*
582 * A control write is already in progress.
583 * Note: This shouldn't happen since
584 * n2rng_ctl_write() waits for the
585 * write to complete.
586 */
587 if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
588 DBG1(n2rng, DHEALTH,
589 "n2rng_collect_diag_bits(1): "
590 "exceeded busy count of %d",
591 RNG_MAX_BUSY_ATTEMPTS);
592 return (rv);
593 } else {
594 delay(RNG_RETRY_BUSY_DELAY);
595 }
596 break;
597 default:
598 log_internal_errors(hverr, "hv_rng_ctl_write");
599 override_rv = rv;
600 goto restore_state;
601 }
602 } /* while (rnglooping) */
603 } /* if (collect_setupp != NULL) */
604
605 /* If the caller asks for some bytes, collect the data */
606 if (numbytes > 0) {
607 for (i = 0; i < numchunks; i++) {
608 size_t thisnumbytes = (i == numchunks - 1) ?
609 numbytes - i * (RNG_DIAG_CHUNK_SIZE *
610 sizeof (uint64_t)) :
611 RNG_DIAG_CHUNK_SIZE * sizeof (uint64_t);
612
613 /* try until we successfully read a word of data */
614 rnglooping = B_TRUE;
615 busycount = 0;
616 blockcount = 0;
617 while (rnglooping) {
618 hverr = n2rng_data_read_diag(n2rng, rngid,
619 va_to_pa((uint64_t *)buffer +
620 RNG_DIAG_CHUNK_SIZE * i),
621 thisnumbytes, &tdelta);
622 rv = n2rng_herr2kerr(hverr);
623 switch (hverr) {
624 case H_EOK:
625 rnglooping = B_FALSE;
626 break;
627 case H_EIO:
628 case H_ENOACCESS:
629 return (rv);
630 case H_EWOULDBLOCK:
631 /* Data not available, try again */
632 if (++blockcount >
633 RNG_MAX_BLOCK_ATTEMPTS) {
634 DBG1(n2rng, DHEALTH,
635 "n2rng_collect_diag_bits"
636 "(2): exceeded block count"
637 " of %d",
638 RNG_MAX_BLOCK_ATTEMPTS);
639 return (rv);
640 } else {
641 cyclesleep(n2rng, tdelta);
642 }
643 break;
644 default:
645 log_internal_errors(hverr,
646 "hv_rng_data_read_diag");
647 override_rv = rv;
648 goto restore_state;
649 }
650 } /* while (!rnglooping) */
651 } /* for */
652 }
653
654 restore_state:
655
656 /* restore the preferred configuration and set exit state */
657 if (exit_setupp != NULL) {
658
659 *setupcontigp = *exit_setupp;
660 rnglooping = B_TRUE;
661 busycount = 0;
662 blockcount = 0;
663 while (rnglooping) {
664 hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
665 exitstate, n2rng->n_ctl_data->n_watchdog_cycles,
666 &tdelta);
667 rv = n2rng_herr2kerr(hverr);
668 switch (hverr) {
669 case H_EOK:
670 case H_EIO: /* control yanked from us */
671 case H_EINVAL: /* some external error, probably */
672 case H_ENOACCESS: /* We are not control domain */
673 rnglooping = B_FALSE;
674 break;
675 case H_EWOULDBLOCK:
676 /* Data currently not available, try again */
677 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
678 DBG1(n2rng, DHEALTH,
679 "n2rng_collect_diag_bits(3): "
680 "exceeded block count of %d",
681 RNG_MAX_BLOCK_ATTEMPTS);
682 return (rv);
683 } else {
684 cyclesleep(n2rng, tdelta);
685 }
686 break;
687 case H_EBUSY:
688 /*
689 * A control write is already in progress.
690 * Note: This shouldn't happen since
691 * n2rng_ctl_write() waits for the
692 * write to complete.
693 */
694 if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
695 DBG1(n2rng, DHEALTH,
696 "n2rng_collect_diag_bits(3): "
697 "exceeded busy count of %d",
698 RNG_MAX_BUSY_ATTEMPTS);
699 return (rv);
700 } else {
701 delay(RNG_RETRY_BUSY_DELAY);
702 }
703 break;
704 default:
705 rnglooping = B_FALSE;
706 log_internal_errors(hverr, "hv_rng_ctl_write");
707 break;
708 }
709 } /* while */
710 } /* if */
711
712 /*
713 * override_rv takes care of the case where we abort becuase
714 * of some error, but still want to restore the peferred state
715 * and return the first error, even if other error occur.
716 */
717 return (override_rv ? override_rv : rv);
718 }
719
720 int
n2rng_getentropy(n2rng_t * n2rng,void * buffer,size_t size)721 n2rng_getentropy(n2rng_t *n2rng, void *buffer, size_t size)
722 {
723 int i, rv = 0; /* so it works if size is zero */
724 uint64_t hverr;
725 uint64_t *buffer_w = (uint64_t *)buffer;
726 int num_w = size / sizeof (uint64_t);
727 uint64_t randval;
728 uint64_t randvalphys = va_to_pa(&randval);
729 uint64_t tdelta;
730 int failcount = 0;
731 int blockcount = 0;
732 boolean_t rnglooping;
733
734 for (i = 0; i < num_w; i++) {
735 rnglooping = B_TRUE;
736 while (rnglooping) {
737 hverr = hv_rng_data_read(randvalphys, &tdelta);
738 rv = n2rng_herr2kerr(hverr);
739 switch (hverr) {
740 case H_EOK:
741 buffer_w[i] = randval;
742 failcount = 0;
743 rnglooping = B_FALSE;
744 break;
745 case H_EIO:
746 /*
747 * Either a health check is in progress, or
748 * the watchdog timer has expired while running
749 * hv api version 2.0 or higher with health
750 * checks enabled.
751 */
752 if (n2rng->n_hvapi_major_version < 2) {
753 /*
754 * A health check is in progress.
755 * Wait RNG_RETRY_HLCHK_USECS and fail
756 * after RNG_MAX_DATA_READ_ATTEMPTS
757 * failures.
758 */
759 if (++failcount >
760 RNG_MAX_DATA_READ_ATTEMPTS) {
761 DBG2(n2rng, DHEALTH,
762 "n2rng_getentropy: exceeded"
763 "EIO count of %d on cpu %d",
764 RNG_MAX_DATA_READ_ATTEMPTS,
765 CPU->cpu_id);
766 goto exitpoint;
767 } else {
768 delay(drv_usectohz
769 (RNG_RETRY_HLCHK_USECS));
770 }
771 } else {
772 /*
773 * Just return the error. If a flurry of
774 * random data requests happen to occur
775 * during a health check, there are
776 * multiple levels of defense:
777 * - 2.0 HV provides random data pool
778 * - FIPS algorithm tolerates failures
779 * - Software failover
780 * - Automatic configuration retries
781 * - Hardware failover on some systems
782 */
783 goto exitpoint;
784 }
785 break;
786 case H_EWOULDBLOCK:
787 /* Data currently not available, try again */
788 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
789 DBG1(n2rng, DHEALTH,
790 "n2rng_getentropy: "
791 "exceeded block count of %d",
792 RNG_MAX_BLOCK_ATTEMPTS);
793 goto exitpoint;
794 } else {
795 cyclesleep(n2rng, tdelta);
796 }
797 break;
798 default:
799 log_internal_errors(hverr, "hv_rng_data_read");
800 goto exitpoint;
801 }
802 } /* while */
803 } /* for */
804
805 exitpoint:
806 return (rv);
807 }
808
809 uint64_t
n2rng_ctl_read(n2rng_t * n2rng,int rngid,uint64_t ctlregs_pa,uint64_t * state,uint64_t * tdelta,uint64_t * wdelta)810 n2rng_ctl_read(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa, uint64_t *state,
811 uint64_t *tdelta, uint64_t *wdelta)
812 {
813 uint64_t rv;
814 uint64_t wstatus;
815
816 /* Call correct hv function based on api version */
817 if (n2rng->n_hvapi_major_version == 2) {
818 rv = hv_rng_ctl_read_v2(ctlregs_pa, (uint64_t)rngid, state,
819 tdelta, wdelta, &wstatus);
820 if (rv == 0) {
821 rv = wstatus;
822 }
823 } else {
824 rv = hv_rng_ctl_read(ctlregs_pa, state, tdelta);
825 *wdelta = 0;
826 }
827
828 return (rv);
829 }
830
831 uint64_t
n2rng_ctl_wait(n2rng_t * n2rng,int rngid)832 n2rng_ctl_wait(n2rng_t *n2rng, int rngid)
833 {
834 uint64_t state;
835 uint64_t tdelta;
836 uint64_t wdelta;
837 uint64_t wstatus;
838 boolean_t rnglooping = B_TRUE;
839 uint64_t rv;
840 n2rng_setup_t setupbuffer[2];
841 n2rng_setup_t *setupcontigp;
842 uint64_t setupphys;
843 int busycount = 0;
844 int blockcount = 0;
845
846 /*
847 * Use setupbuffer[0] if it is contiguous, otherwise
848 * setupbuffer[1].
849 */
850 setupcontigp = &setupbuffer[
851 CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
852 setupphys = va_to_pa(setupcontigp);
853
854 while (rnglooping) {
855 rv = hv_rng_ctl_read_v2(setupphys, (uint64_t)rngid, &state,
856 &tdelta, &wdelta, &wstatus);
857 switch (rv) {
858 case H_EOK:
859 rv = wstatus;
860 rnglooping = B_FALSE;
861 break;
862 case H_EWOULDBLOCK:
863 /* Data currently not available, try again */
864 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
865 DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
866 "exceeded block count of %d",
867 RNG_MAX_BLOCK_ATTEMPTS);
868 return (rv);
869 } else {
870 cyclesleep(n2rng, tdelta);
871 }
872 break;
873 case H_EBUSY:
874 /* Control write still pending, try again */
875 if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
876 DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
877 "exceeded busy count of %d",
878 RNG_MAX_BUSY_ATTEMPTS);
879 return (rv);
880 } else {
881 delay(RNG_RETRY_BUSY_DELAY);
882 }
883 break;
884 default:
885 log_internal_errors(rv, "n2rng_ctl_wait");
886 rnglooping = B_FALSE;
887 }
888 } /* while (rnglooping) */
889
890 return (rv);
891 }
892
893 uint64_t
n2rng_ctl_write(n2rng_t * n2rng,int rngid,uint64_t ctlregs_pa,uint64_t newstate,uint64_t wtimeout,uint64_t * tdelta)894 n2rng_ctl_write(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa,
895 uint64_t newstate, uint64_t wtimeout, uint64_t *tdelta)
896 {
897 uint64_t rv;
898
899 /* Call correct hv function based on api version */
900 if (n2rng->n_hvapi_major_version == 2) {
901 rv = hv_rng_ctl_write_v2(ctlregs_pa, newstate, wtimeout,
902 (uint64_t)rngid);
903 if (rv == H_EOK) {
904 /* Wait for control registers to be written */
905 rv = n2rng_ctl_wait(n2rng, rngid);
906 }
907 *tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
908 } else {
909 rv = hv_rng_ctl_write(ctlregs_pa, newstate, wtimeout, tdelta);
910 }
911
912 return (rv);
913 }
914
915 uint64_t
n2rng_data_read_diag(n2rng_t * n2rng,int rngid,uint64_t data_pa,size_t datalen,uint64_t * tdelta)916 n2rng_data_read_diag(n2rng_t *n2rng, int rngid, uint64_t data_pa,
917 size_t datalen, uint64_t *tdelta)
918 {
919 uint64_t rv;
920
921 /* Call correct hv function based on api version */
922 if (n2rng->n_hvapi_major_version == 2) {
923 rv = hv_rng_data_read_diag_v2(data_pa, datalen,
924 (uint64_t)rngid, tdelta);
925 if (*tdelta == 0) {
926 *tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
927 }
928 } else {
929 rv = hv_rng_data_read_diag(data_pa, datalen, tdelta);
930 }
931
932 return (rv);
933 }
934
935 uint64_t
n2rng_check_ctl_access(n2rng_t * n2rng)936 n2rng_check_ctl_access(n2rng_t *n2rng)
937 {
938 uint64_t rv;
939 uint64_t unused_64;
940
941 /* Call correct hv function based on api version */
942 if (n2rng->n_hvapi_major_version == 2) {
943 /*
944 * Attempt to read control registers with invalid ID and data
945 * just to see if we get an access error
946 */
947 rv = hv_rng_ctl_read_v2(NULL, N2RNG_INVALID_ID,
948 &unused_64, &unused_64, &unused_64, &unused_64);
949 } else {
950 rv = hv_rng_get_diag_control();
951 }
952
953 return (rv);
954 }
955
956 /*
957 * n2rng_config_retry()
958 *
959 * Schedule a timed call to n2rng_config() if one is not already pending
960 */
961 void
n2rng_config_retry(n2rng_t * n2rng,clock_t seconds)962 n2rng_config_retry(n2rng_t *n2rng, clock_t seconds)
963 {
964 mutex_enter(&n2rng->n_lock);
965 /* Check if a config retry is already pending */
966 if (n2rng->n_timeout_id) {
967 DBG1(n2rng, DCFG, "n2rng_config_retry: retry pending "
968 "id = %x", n2rng->n_timeout_id);
969 } else {
970 n2rng->n_timeout_id = timeout(n2rng_config_task,
971 (void *)n2rng, drv_usectohz(seconds * SECOND));
972 DBG2(n2rng, DCFG, "n2rng_config_retry: retry scheduled in "
973 "%d seconds, id = %x", seconds, n2rng->n_timeout_id);
974 }
975 mutex_exit(&n2rng->n_lock);
976 }
977
978 static uint64_t
sticks_per_usec(void)979 sticks_per_usec(void)
980 {
981 uint64_t starttick = gettick();
982 hrtime_t starttime = gethrtime();
983 uint64_t endtick;
984 hrtime_t endtime;
985
986 delay(2);
987
988 endtick = gettick();
989 endtime = gethrtime();
990
991 return ((1000 * (endtick - starttick)) / (endtime - starttime));
992 }
993
994 static int
n2rng_init_ctl(n2rng_t * n2rng)995 n2rng_init_ctl(n2rng_t *n2rng)
996 {
997 int rv;
998 int hverr;
999 rng_entry_t *rng;
1000 int rngid;
1001 int blockcount = 0;
1002
1003 n2rng->n_ctl_data = NULL;
1004
1005 /* Attempt to gain diagnostic control */
1006 do {
1007 hverr = n2rng_check_ctl_access(n2rng);
1008 rv = n2rng_herr2kerr(hverr);
1009 if ((hverr == H_EWOULDBLOCK) &&
1010 (++blockcount > RNG_MAX_BUSY_ATTEMPTS)) {
1011 DBG1(n2rng, DHEALTH, "n2rng_int_ctl: exceeded busy "
1012 "count of %d", RNG_MAX_BUSY_ATTEMPTS);
1013 return (rv);
1014 } else {
1015 delay(RNG_RETRY_BUSY_DELAY);
1016 }
1017 } while (hverr == H_EWOULDBLOCK);
1018
1019 /*
1020 * If attempt fails with EPERM, the driver is not running in the
1021 * control domain
1022 */
1023 if (rv == EPERM) {
1024 DBG0(n2rng, DATTACH,
1025 "n2rng_init_ctl: Running in guest domain");
1026 return (DDI_SUCCESS);
1027 }
1028
1029 /* Allocate control stucture only used in control domain */
1030 n2rng->n_ctl_data = kmem_alloc(sizeof (rng_ctl_data_t), KM_SLEEP);
1031 n2rng->n_ctl_data->n_num_rngs_online = 0;
1032
1033 /*
1034 * If running with an API version less than 2.0 default to one rng.
1035 * Otherwise get number of rngs from device properties.
1036 */
1037 if (n2rng->n_hvapi_major_version < 2) {
1038 n2rng->n_ctl_data->n_num_rngs = 1;
1039 } else {
1040 n2rng->n_ctl_data->n_num_rngs =
1041 ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1042 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1043 N2RNG_PROP_NUM_UNITS, 0);
1044 if (n2rng->n_ctl_data->n_num_rngs == 0) {
1045 cmn_err(CE_WARN, "n2rng: %s property not found",
1046 N2RNG_PROP_NUM_UNITS);
1047 return (DDI_FAILURE);
1048 }
1049 }
1050
1051 /* Allocate space for all rng entries */
1052 n2rng->n_ctl_data->n_rngs =
1053 kmem_zalloc(n2rng->n_ctl_data->n_num_rngs *
1054 sizeof (rng_entry_t), KM_SLEEP);
1055
1056 /* Get accumulate cycles from .conf file. */
1057 n2rng->n_ctl_data->n_accumulate_cycles =
1058 ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1059 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "acc_cycles",
1060 RNG_DEFAULT_ACCUMULATE_CYCLES);
1061
1062 /* Get health check frequency from .conf file */
1063 n2rng->n_ctl_data->n_hc_secs = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1064 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "hc_seconds",
1065 RNG_DEFAULT_HC_SECS);
1066
1067 /* API versions prior to 2.0 do not support health checks */
1068 if ((n2rng->n_hvapi_major_version < 2) &&
1069 (n2rng->n_ctl_data->n_hc_secs > 0)) {
1070 cmn_err(CE_WARN, "n2rng: Hyperviser api "
1071 "version %d.%d does not support health checks",
1072 n2rng->n_hvapi_major_version,
1073 n2rng->n_hvapi_minor_version);
1074 n2rng->n_ctl_data->n_hc_secs = 0;
1075 }
1076
1077
1078 /* Calculate watchdog timeout value */
1079 if (n2rng->n_ctl_data->n_hc_secs <= 0) {
1080 n2rng->n_ctl_data->n_watchdog_cycles = 0;
1081 } else {
1082 n2rng->n_ctl_data->n_watchdog_cycles =
1083 ((uint64_t)(RNG_EXTRA_WATCHDOG_SECS) +
1084 n2rng->n_ctl_data->n_hc_secs) *
1085 n2rng->n_sticks_per_usec * 1000000;
1086 }
1087
1088 /*
1089 * Set some plausible state into the preferred configuration.
1090 * The intent is that the health check will immediately overwrite it.
1091 */
1092 for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs; rngid++) {
1093
1094 rng = &n2rng->n_ctl_data->n_rngs[rngid];
1095
1096 rng->n_preferred_config.ctlwds[0].word = 0;
1097 rng->n_preferred_config.ctlwds[0].fields.rnc_anlg_sel =
1098 N2RNG_NOANALOGOUT;
1099 rng->n_preferred_config.ctlwds[0].fields.rnc_cnt =
1100 RNG_DEFAULT_ACCUMULATE_CYCLES;
1101 rng->n_preferred_config.ctlwds[0].fields.rnc_mode =
1102 RNG_MODE_NORMAL;
1103 rng->n_preferred_config.ctlwds[1].word =
1104 rng->n_preferred_config.ctlwds[0].word;
1105 rng->n_preferred_config.ctlwds[2].word =
1106 rng->n_preferred_config.ctlwds[0].word;
1107 rng->n_preferred_config.ctlwds[3].word =
1108 rng->n_preferred_config.ctlwds[0].word;
1109 rng->n_preferred_config.ctlwds[0].fields.rnc_vcoctl = 1;
1110 rng->n_preferred_config.ctlwds[0].fields.rnc_selbits = 1;
1111 rng->n_preferred_config.ctlwds[1].fields.rnc_vcoctl = 2;
1112 rng->n_preferred_config.ctlwds[1].fields.rnc_selbits = 2;
1113 rng->n_preferred_config.ctlwds[2].fields.rnc_vcoctl = 3;
1114 rng->n_preferred_config.ctlwds[2].fields.rnc_selbits = 4;
1115 rng->n_preferred_config.ctlwds[3].fields.rnc_vcoctl = 0;
1116 rng->n_preferred_config.ctlwds[3].fields.rnc_selbits = 7;
1117 }
1118
1119 n2rng_setcontrol(n2rng);
1120 DBG2(n2rng, DATTACH,
1121 "n2rng_init_ctl: Running in control domain with %d rng device%s",
1122 n2rng->n_ctl_data->n_num_rngs,
1123 (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
1124 DBG2(n2rng, DCFG,
1125 "n2rng_init_ctl: n_sticks_per_usec = %ld, n_hc_secs = %d",
1126 n2rng->n_sticks_per_usec,
1127 n2rng->n_ctl_data->n_hc_secs);
1128 DBG2(n2rng, DCFG,
1129 "n2rng_init_ctl: n_watchdog_cycles = %ld, "
1130 "n_accumulate_cycles = %ld", n2rng->n_ctl_data->n_watchdog_cycles,
1131 n2rng->n_ctl_data->n_accumulate_cycles);
1132
1133 return (DDI_SUCCESS);
1134 }
1135
1136 static void
n2rng_uninit_ctl(n2rng_t * n2rng)1137 n2rng_uninit_ctl(n2rng_t *n2rng)
1138 {
1139 if (n2rng->n_ctl_data) {
1140 if (n2rng->n_ctl_data->n_num_rngs) {
1141 kmem_free(n2rng->n_ctl_data->n_rngs,
1142 n2rng->n_ctl_data->n_num_rngs *
1143 sizeof (rng_entry_t));
1144 n2rng->n_ctl_data->n_rngs = NULL;
1145 n2rng->n_ctl_data->n_num_rngs = 0;
1146 }
1147 kmem_free(n2rng->n_ctl_data, sizeof (rng_ctl_data_t));
1148 n2rng->n_ctl_data = NULL;
1149 }
1150 }
1151
1152
1153 /*
1154 * n2rng_config_test()
1155 *
1156 * Attempt read random data to see if the rng is configured.
1157 */
1158 int
n2rng_config_test(n2rng_t * n2rng)1159 n2rng_config_test(n2rng_t *n2rng)
1160 {
1161 int rv = 0;
1162 uint64_t hverr;
1163 uint64_t randval = 0;
1164 uint64_t randvalphys = va_to_pa(&randval);
1165 uint64_t tdelta;
1166 int failcount = 0;
1167 int blockcount = 0;
1168 boolean_t rnglooping = B_TRUE;
1169
1170 while (rnglooping) {
1171 hverr = hv_rng_data_read(randvalphys, &tdelta);
1172 rv = n2rng_herr2kerr(hverr);
1173 switch (hverr) {
1174 case H_EOK:
1175 failcount = 0;
1176 rnglooping = B_FALSE;
1177 break;
1178 case H_EIO:
1179 /*
1180 * A health check is in progress.
1181 * Wait RNG_RETRY_HLCHK_USECS and fail
1182 * after RNG_MAX_DATA_READ_ATTEMPTS
1183 * failures.
1184 */
1185 if (++failcount > RNG_MAX_DATA_READ_ATTEMPTS) {
1186 goto exitpoint;
1187 } else {
1188 delay(drv_usectohz(RNG_RETRY_HLCHK_USECS));
1189 }
1190 break;
1191 case H_EWOULDBLOCK:
1192 /* Data currently not available, try again */
1193 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
1194 DBG1(n2rng, DHEALTH, "n2rng_config_test: "
1195 "exceeded block count of %d",
1196 RNG_MAX_BLOCK_ATTEMPTS);
1197 goto exitpoint;
1198 } else {
1199 cyclesleep(n2rng, tdelta);
1200 }
1201 break;
1202 case H_ENOACCESS:
1203 /* An rng error has occured during health check */
1204 goto exitpoint;
1205 default:
1206 log_internal_errors(hverr, "hv_rng_data_read");
1207 goto exitpoint;
1208 }
1209 } /* while */
1210
1211 exitpoint:
1212 return (rv);
1213 }
1214
1215 /*
1216 * n2rng_config()
1217 *
1218 * Run health check on the RNG hardware
1219 * Configure the RNG hardware
1220 * Register with crypto framework
1221 */
1222 static int
n2rng_config(n2rng_t * n2rng)1223 n2rng_config(n2rng_t *n2rng)
1224 {
1225 int rv;
1226 rng_entry_t *rng;
1227 int rngid;
1228
1229 /*
1230 * Run health checks and configure rngs if running in control domain,
1231 * otherwise just check if at least one rng is available.
1232 */
1233 if (n2rng_iscontrol(n2rng)) {
1234
1235 for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs;
1236 rngid++) {
1237
1238 rng = &n2rng->n_ctl_data->n_rngs[rngid];
1239
1240 /* Only test rngs that have not already failed */
1241 if (rng->n_rng_state == CTL_STATE_ERROR) {
1242 continue;
1243 }
1244
1245 if ((n2rng->n_binding == N2RNG_CPU_VF) &&
1246 (n2rng->n_hvapi_major_version < 2)) {
1247 /*
1248 * Since api versions prior to 2.0 do not
1249 * support multiple rngs, bind to the current
1250 * processor for the entire health check
1251 * process.
1252 */
1253 thread_affinity_set(curthread, CPU_CURRENT);
1254 DBG1(n2rng, DCFG, "n2rng_config: "
1255 "Configuring single rng from cpu %d",
1256 CPU->cpu_id);
1257 rv = n2rng_do_health_check(n2rng, rngid);
1258 thread_affinity_clear(curthread);
1259 } else {
1260 rv = n2rng_do_health_check(n2rng, rngid);
1261 }
1262
1263 switch (rv) {
1264 case 0:
1265 /*
1266 * Successful, increment online count if
1267 * necessary
1268 */
1269 DBG1(n2rng, DCFG, "n2rng_config: rng(%d) "
1270 "passed health checks", rngid);
1271 if (rng->n_rng_state != CTL_STATE_CONFIGURED) {
1272 rng->n_rng_state =
1273 CTL_STATE_CONFIGURED;
1274 n2rng->n_ctl_data->n_num_rngs_online++;
1275 }
1276 break;
1277 default:
1278 /*
1279 * Health checks failed, decrement online
1280 * count if necessary
1281 */
1282 cmn_err(CE_WARN, "n2rng: rng(%d) "
1283 "failed health checks", rngid);
1284 if (rng->n_rng_state == CTL_STATE_CONFIGURED) {
1285 n2rng->n_ctl_data->n_num_rngs_online--;
1286 }
1287 rng->n_rng_state = CTL_STATE_ERROR;
1288 break;
1289 }
1290 }
1291 DBG2(n2rng, DCFG, "n2rng_config: %d rng%s online",
1292 n2rng->n_ctl_data->n_num_rngs_online,
1293 (n2rng->n_ctl_data->n_num_rngs_online == 1) ? "" : "s");
1294
1295 /* Check if all rngs have failed */
1296 if (n2rng->n_ctl_data->n_num_rngs_online == 0) {
1297 cmn_err(CE_WARN, "n2rng: %d RNG device%s failed",
1298 n2rng->n_ctl_data->n_num_rngs,
1299 (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
1300 goto errorexit;
1301 } else {
1302 n2rng_setconfigured(n2rng);
1303 }
1304 } else {
1305 /* Running in guest domain, just check if rng is configured */
1306 rv = n2rng_config_test(n2rng);
1307 switch (rv) {
1308 case 0:
1309 n2rng_setconfigured(n2rng);
1310 break;
1311 case EIO:
1312 /* Don't set configured to force a retry */
1313 break;
1314 default:
1315 goto errorexit;
1316 }
1317 }
1318
1319 /*
1320 * Initialize FIPS state and register with KCF if we have at least one
1321 * RNG configured. Otherwise schedule a retry if all rngs have not
1322 * failed.
1323 */
1324 if (n2rng_isconfigured(n2rng)) {
1325
1326 if (n2rng_init(n2rng) != DDI_SUCCESS) {
1327 cmn_err(CE_WARN, "n2rng: unable to register with KCF");
1328 goto errorexit;
1329 }
1330
1331 /*
1332 * Schedule a retry if running in the control domain and a
1333 * health check time has been specified.
1334 */
1335 if (n2rng_iscontrol(n2rng) &&
1336 (n2rng->n_ctl_data->n_hc_secs > 0)) {
1337 n2rng_config_retry(n2rng,
1338 n2rng->n_ctl_data->n_hc_secs);
1339 }
1340 } else if (!n2rng_isfailed(n2rng)) {
1341 /* Schedule a retry if one is not already pending */
1342 n2rng_config_retry(n2rng, RNG_CFG_RETRY_SECS);
1343 }
1344 return (DDI_SUCCESS);
1345
1346 errorexit:
1347 /* Unregister from kCF if we are registered */
1348 (void) n2rng_unregister_provider(n2rng);
1349 n2rng_setfailed(n2rng);
1350 cmn_err(CE_WARN, "n2rng: hardware failure detected");
1351 return (DDI_FAILURE);
1352 }
1353
1354 /*
1355 * n2rng_config_task()
1356 *
1357 * Call n2rng_config() from the task queue or after a timeout, ignore result.
1358 */
1359 static void
n2rng_config_task(void * targ)1360 n2rng_config_task(void *targ)
1361 {
1362 n2rng_t *n2rng = (n2rng_t *)targ;
1363
1364 mutex_enter(&n2rng->n_lock);
1365 n2rng->n_timeout_id = 0;
1366 mutex_exit(&n2rng->n_lock);
1367 (void) n2rng_config(n2rng);
1368 }
1369