xref: /titanic_44/usr/src/uts/sun4v/io/n2rng/n2rng.c (revision a4aeef46cda1835da2b19f8f62b4526de6521e6c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * Niagara 2 Random Number Generator (RNG) driver
29  */
30 
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/devops.h>
36 #include <sys/cmn_err.h>
37 #include <sys/ksynch.h>
38 #include <sys/kmem.h>
39 #include <sys/stat.h>
40 #include <sys/open.h>
41 #include <sys/file.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/param.h>
45 #include <sys/cpuvar.h>
46 #include <sys/disp.h>
47 #include <sys/hsvc.h>
48 #include <sys/machsystm.h>
49 #include <sys/hypervisor_api.h>
50 #include <sys/n2rng.h>
51 
52 static int	n2rng_attach(dev_info_t *, ddi_attach_cmd_t);
53 static int	n2rng_detach(dev_info_t *, ddi_detach_cmd_t);
54 static int	n2rng_suspend(n2rng_t *);
55 static int	n2rng_resume(n2rng_t *);
56 static uint64_t sticks_per_usec(void);
57 u_longlong_t	gettick(void);
58 static int	n2rng_init_ctl(n2rng_t *);
59 static void	n2rng_uninit_ctl(n2rng_t *);
60 static int	n2rng_config(n2rng_t *);
61 static void	n2rng_config_task(void * targ);
62 
63 /*
64  * Device operations.
65  */
66 
67 static struct dev_ops devops = {
68 	DEVO_REV,		/* devo_rev */
69 	0,			/* devo_refcnt */
70 	nodev,			/* devo_getinfo */
71 	nulldev,		/* devo_identify */
72 	nulldev,		/* devo_probe */
73 	n2rng_attach,		/* devo_attach */
74 	n2rng_detach,		/* devo_detach */
75 	nodev,			/* devo_reset */
76 	NULL,			/* devo_cb_ops */
77 	NULL,			/* devo_bus_ops */
78 	ddi_power,		/* devo_power */
79 	ddi_quiesce_not_supported,	/* devo_quiesce */
80 };
81 
82 /*
83  * Module linkage.
84  */
85 static struct modldrv modldrv = {
86 	&mod_driverops,			/* drv_modops */
87 	"N2 RNG Driver",		/* drv_linkinfo */
88 	&devops,			/* drv_dev_ops */
89 };
90 
91 static struct modlinkage modlinkage = {
92 	MODREV_1,			/* ml_rev */
93 	&modldrv,			/* ml_linkage */
94 	NULL
95 };
96 
97 /*
98  * Driver globals Soft state.
99  */
100 static void	*n2rng_softstate = NULL;
101 
102 /*
103  * Hypervisor NCS services information.
104  */
105 static boolean_t ncs_hsvc_available = B_FALSE;
106 
107 #define	NVERSIONS	2
108 
109 /*
110  * HV API versions supported by this driver.
111  */
112 static hsvc_info_t ncs_hsvc[NVERSIONS] = {
113 	{ HSVC_REV_1, NULL, HSVC_GROUP_RNG, 2, 0, DRIVER },	/* v2.0 */
114 	{ HSVC_REV_1, NULL, HSVC_GROUP_RNG, 1, 0, DRIVER },	/* v1.0 */
115 };
116 int	ncs_version_index;	/* index into ncs_hsvc[] */
117 
118 /*
119  * DDI entry points.
120  */
121 int
122 _init(void)
123 {
124 	int	rv;
125 
126 	rv = ddi_soft_state_init(&n2rng_softstate, sizeof (n2rng_t), 1);
127 	if (rv != 0) {
128 		/* this should *never* happen! */
129 		return (rv);
130 	}
131 
132 	if ((rv = mod_install(&modlinkage)) != 0) {
133 		/* cleanup here */
134 		ddi_soft_state_fini(&n2rng_softstate);
135 		return (rv);
136 	}
137 
138 	return (0);
139 }
140 
141 int
142 _fini(void)
143 {
144 	int	rv;
145 
146 	rv = mod_remove(&modlinkage);
147 	if (rv == 0) {
148 		/* cleanup here */
149 		ddi_soft_state_fini(&n2rng_softstate);
150 	}
151 
152 	return (rv);
153 }
154 
155 int
156 _info(struct modinfo *modinfop)
157 {
158 	return (mod_info(&modlinkage, modinfop));
159 }
160 
161 static int
162 n2rng_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
163 {
164 	n2rng_t		*n2rng = NULL;
165 	int		instance;
166 	int		rv;
167 	int		version;
168 	uint64_t	ncs_minor_ver;
169 
170 	instance = ddi_get_instance(dip);
171 	DBG1(NULL, DENTRY, "n2rng_attach called, instance %d", instance);
172 	/*
173 	 * Only instance 0 of n2rng driver is allowed.
174 	 */
175 	if (instance != 0) {
176 		n2rng_diperror(dip, "only one instance (0) allowed");
177 		return (DDI_FAILURE);
178 	}
179 
180 	switch (cmd) {
181 	case DDI_RESUME:
182 		n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate,
183 		    instance);
184 		if (n2rng == NULL) {
185 			n2rng_diperror(dip, "no soft state in attach");
186 			return (DDI_FAILURE);
187 		}
188 		return (n2rng_resume(n2rng));
189 
190 	case DDI_ATTACH:
191 		break;
192 	default:
193 		return (DDI_FAILURE);
194 	}
195 
196 	rv = ddi_soft_state_zalloc(n2rng_softstate, instance);
197 	if (rv != DDI_SUCCESS) {
198 		n2rng_diperror(dip, "unable to allocate soft state");
199 		return (DDI_FAILURE);
200 	}
201 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
202 	ASSERT(n2rng != NULL);
203 	n2rng->n_dip = dip;
204 
205 	mutex_init(&n2rng->n_lock, NULL, MUTEX_DRIVER, NULL);
206 	n2rng->n_flags = 0;
207 	n2rng->n_timeout_id = 0;
208 	n2rng->n_sticks_per_usec = sticks_per_usec();
209 
210 	/* Determine binding type */
211 	n2rng->n_binding_name = ddi_binding_name(dip);
212 	if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_N2,
213 	    strlen(N2RNG_BINDNAME_N2)) == 0) {
214 		/*
215 		 * Niagara 2
216 		 */
217 		n2rng->n_binding = N2RNG_CPU_N2;
218 	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_VF,
219 	    strlen(N2RNG_BINDNAME_VF)) == 0) {
220 		/*
221 		 * Victoria Falls
222 		 */
223 		n2rng->n_binding = N2RNG_CPU_VF;
224 	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_KT,
225 	    strlen(N2RNG_BINDNAME_KT)) == 0) {
226 		/*
227 		 * Rainbow Falls
228 		 */
229 		n2rng->n_binding = N2RNG_CPU_KT;
230 	} else {
231 		n2rng_diperror(dip,
232 		    "unable to determine n2rng (cpu) binding (%s)",
233 		    n2rng->n_binding_name);
234 		goto errorexit;
235 	}
236 	DBG1(n2rng, DCHATTY, "n2rng_attach: n2rng->n_binding_name = %s",
237 	    n2rng->n_binding_name);
238 
239 	/* Negotiate HV api version number */
240 	for (version = 0; version < NVERSIONS; version++) {
241 		rv = hsvc_register(&ncs_hsvc[version], &ncs_minor_ver);
242 		if (rv == 0)
243 			break;
244 
245 		DBG4(n2rng, DCHATTY, "n2rng_attach: grp: 0x%lx, maj: %ld, "
246 		    "min: %ld, errno: %d", ncs_hsvc[version].hsvc_group,
247 		    ncs_hsvc[version].hsvc_major,
248 		    ncs_hsvc[version].hsvc_minor, rv);
249 	}
250 	if (version == NVERSIONS) {
251 		for (version = 0; version < NVERSIONS; version++) {
252 			cmn_err(CE_WARN,
253 			    "%s: cannot negotiate hypervisor services "
254 			    "group: 0x%lx major: %ld minor: %ld errno: %d",
255 			    ncs_hsvc[version].hsvc_modname,
256 			    ncs_hsvc[version].hsvc_group,
257 			    ncs_hsvc[version].hsvc_major,
258 			    ncs_hsvc[version].hsvc_minor, rv);
259 		}
260 		goto errorexit;
261 	}
262 	ncs_version_index = version;
263 	ncs_hsvc_available = B_TRUE;
264 	DBG2(n2rng, DATTACH, "n2rng_attach: ncs api version (%ld.%ld)",
265 	    ncs_hsvc[ncs_version_index].hsvc_major, ncs_minor_ver);
266 	n2rng->n_hvapi_major_version = ncs_hsvc[ncs_version_index].hsvc_major;
267 	n2rng->n_hvapi_minor_version = (uint_t)ncs_minor_ver;
268 
269 	/*
270 	 * Verify that we are running version 2.0 or later api on multiple
271 	 * rng systems.
272 	 */
273 	if ((n2rng->n_binding != N2RNG_CPU_N2) &&
274 	    (n2rng->n_hvapi_major_version < 2)) {
275 		cmn_err(CE_NOTE, "n2rng: Incompatible hyperviser api "
276 		    "version %d.%d detected", n2rng->n_hvapi_major_version,
277 		    n2rng->n_hvapi_minor_version);
278 	}
279 
280 	/* Initialize ctl structure if runnning in the control domain */
281 	if (n2rng_init_ctl(n2rng) != DDI_SUCCESS) {
282 		cmn_err(CE_WARN, "n2rng: unable to initialize rng "
283 		    "control structures");
284 		goto errorexit;
285 	}
286 
287 	/* Allocate single thread task queue for rng diags and registration */
288 	n2rng->n_taskq = ddi_taskq_create(dip, "n2rng_taskq", 1,
289 	    TASKQ_DEFAULTPRI, 0);
290 
291 	if (n2rng->n_taskq == NULL) {
292 		n2rng_diperror(dip, "ddi_taskq_create() failed");
293 		goto errorexit;
294 	}
295 
296 	/* Dispatch task to configure the RNG and register with KCF */
297 	if (ddi_taskq_dispatch(n2rng->n_taskq, n2rng_config_task,
298 	    (void *)n2rng, DDI_SLEEP) != DDI_SUCCESS) {
299 		n2rng_diperror(dip, "ddi_taskq_dispatch() failed");
300 		goto errorexit;
301 	}
302 
303 	return (DDI_SUCCESS);
304 
305 errorexit:
306 	/* Wait for pending config tasks to complete and delete the taskq */
307 	if (n2rng->n_taskq != NULL) {
308 		ddi_taskq_destroy(n2rng->n_taskq);
309 		n2rng->n_taskq = NULL;
310 	}
311 
312 	n2rng_uninit_ctl(n2rng);
313 
314 	(void) n2rng_uninit(n2rng);
315 
316 	if (ncs_hsvc_available == B_TRUE) {
317 		(void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
318 		ncs_hsvc_available = B_FALSE;
319 	}
320 
321 	mutex_destroy(&n2rng->n_lock);
322 	ddi_soft_state_free(n2rng_softstate, instance);
323 
324 	return (DDI_FAILURE);
325 }
326 
327 static int
328 n2rng_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
329 {
330 	int		instance;
331 	int		rv;
332 	n2rng_t		*n2rng;
333 	timeout_id_t	tid;
334 
335 	instance = ddi_get_instance(dip);
336 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
337 	if (n2rng == NULL) {
338 		n2rng_diperror(dip, "no soft state in detach");
339 		return (DDI_FAILURE);
340 	}
341 
342 	switch (cmd) {
343 	case DDI_SUSPEND:
344 		return (n2rng_suspend(n2rng));
345 	case DDI_DETACH:
346 		break;
347 	default:
348 		return (DDI_FAILURE);
349 	}
350 
351 	/* Destroy task queue first to insure configuration has completed */
352 	if (n2rng->n_taskq != NULL) {
353 		ddi_taskq_destroy(n2rng->n_taskq);
354 		n2rng->n_taskq = NULL;
355 	}
356 
357 	/* Untimeout pending config retry operations */
358 	mutex_enter(&n2rng->n_lock);
359 	tid = n2rng->n_timeout_id;
360 	n2rng->n_timeout_id = 0;
361 	mutex_exit(&n2rng->n_lock);
362 	if (tid) {
363 		DBG1(n2rng, DCHATTY, "n2rng_detach: untimeout pending retry "
364 		    "id = %x", tid);
365 		(void) untimeout(tid);
366 	}
367 
368 	n2rng_uninit_ctl(n2rng);
369 
370 	/* unregister with KCF---also tears down FIPS state */
371 	rv = n2rng_uninit(n2rng) ? DDI_FAILURE : DDI_SUCCESS;
372 
373 	if (ncs_hsvc_available == B_TRUE) {
374 		(void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
375 		ncs_hsvc_available = B_FALSE;
376 	}
377 
378 	mutex_destroy(&n2rng->n_lock);
379 	ddi_soft_state_free(n2rng_softstate, instance);
380 
381 	return (rv);
382 }
383 
384 /*ARGSUSED*/
385 static int
386 n2rng_suspend(n2rng_t *n2rng)
387 {
388 	/* unregister with KCF---also tears down FIPS state */
389 	if (n2rng_uninit(n2rng) != DDI_SUCCESS) {
390 		cmn_err(CE_WARN, "n2rng: unable to unregister from KCF");
391 		return (DDI_FAILURE);
392 	}
393 
394 	return (DDI_SUCCESS);
395 }
396 
397 /*ARGSUSED*/
398 static int
399 n2rng_resume(n2rng_t *n2rng)
400 {
401 	/* Assume clock is same speed and all data structures are intact */
402 
403 	/* Re-configure the RNG hardware and register with KCF */
404 	return (n2rng_config(n2rng));
405 }
406 
407 /*
408  * Map hypervisor error code to solaris. Only
409  * H_ENORADDR, H_EBADALIGN, H_EWOULDBLOCK, and EIO
410  * are meaningful to this device. Any other error
411  * codes are mapped EINVAL.
412  */
413 int
414 n2rng_herr2kerr(uint64_t hv_errcode)
415 {
416 	int	s_errcode;
417 
418 	switch (hv_errcode) {
419 	case H_EWOULDBLOCK:
420 		s_errcode = EWOULDBLOCK;
421 		break;
422 	case H_EIO:
423 		s_errcode = EIO;
424 		break;
425 	case H_EBUSY:
426 		s_errcode = EBUSY;
427 		break;
428 	case H_EOK:
429 		s_errcode = 0;
430 		break;
431 	case H_ENOACCESS:
432 		s_errcode = EPERM;
433 		break;
434 	case H_ENORADDR:
435 	case H_EBADALIGN:
436 	default:
437 		s_errcode = EINVAL;
438 		break;
439 	}
440 	return (s_errcode);
441 }
442 
443 /*
444  * Waits approximately delay_sticks counts of the stick register.
445  * Times shorter than one sys clock tick (10ms on most systems) are
446  * done by busy waiting.
447  */
448 void
449 cyclesleep(n2rng_t *n2rng, uint64_t delay_sticks)
450 {
451 	uint64_t	end_stick = gettick() + delay_sticks;
452 	int64_t		sticks_to_wait;
453 	clock_t		sys_ticks_to_wait;
454 	clock_t		usecs_to_wait;
455 
456 	/*CONSTCOND*/
457 	while (1) {
458 		sticks_to_wait = end_stick - gettick();
459 		if (sticks_to_wait <= 0) {
460 			return;
461 		}
462 
463 		usecs_to_wait = sticks_to_wait / n2rng->n_sticks_per_usec;
464 		sys_ticks_to_wait = drv_usectohz(usecs_to_wait);
465 
466 		if (sys_ticks_to_wait > 0) {
467 			/* sleep */
468 			delay(sys_ticks_to_wait);
469 		} else if (usecs_to_wait > 0) {
470 			/* busy wait */
471 			drv_usecwait(usecs_to_wait);
472 		}
473 	}
474 }
475 
476 static void
477 log_internal_errors(uint64_t hverr, char *fname)
478 {
479 	switch (hverr) {
480 	case H_EBADALIGN:
481 		cmn_err(CE_WARN,
482 		    "n2rng: internal alignment "
483 		    "problem");
484 		break;
485 	case H_ENORADDR:
486 		cmn_err(CE_WARN, "n2rng: internal "
487 		    "invalid address");
488 		break;
489 	case H_ENOACCESS:
490 		cmn_err(CE_WARN, "n2rng: access failure");
491 		break;
492 	case H_EWOULDBLOCK:
493 		cmn_err(CE_WARN, "n2rng: hardware busy");
494 		break;
495 	default:
496 		cmn_err(CE_NOTE,
497 		    "n2rng: %s "
498 		    "unexpectedly "
499 		    "returned hverr %ld", fname, hverr);
500 		break;
501 	}
502 }
503 
504 /*
505  * Collects a buffer full of bits, using the specified setup. numbytes
506  * must be a multiple of 8. If a sub-operation fails with EIO (handle
507  * mismatch), returns EIO.  If collect_setupp is NULL, the current
508  * setup is used.  If exit_setupp is NULL, the control configuratin
509  * and state are not set at exit.  WARNING: the buffer must be 8-byte
510  * aligned and in contiguous physical addresses.  Contiguousness is
511  * not checked!
512  */
513 int
514 n2rng_collect_diag_bits(n2rng_t *n2rng, int rngid,
515     n2rng_setup_t *collect_setupp, void *buffer, int numbytes,
516     n2rng_setup_t *exit_setupp, uint64_t exitstate)
517 {
518 	int		rv;
519 	int		override_rv = 0;
520 	uint64_t	hverr;
521 	int		i;
522 	uint64_t	tdelta;
523 	n2rng_setup_t	setupbuffer[2];
524 	n2rng_setup_t	*setupcontigp;
525 	uint64_t	setupphys;
526 	int		numchunks;
527 	boolean_t	rnglooping;
528 	int		busycount = 0;
529 	int		blockcount = 0;
530 
531 	if (numbytes % sizeof (uint64_t)) {
532 		return (EINVAL);
533 	}
534 
535 	if ((uint64_t)buffer % sizeof (uint64_t) != 0) {
536 		return (EINVAL);
537 	}
538 
539 	numchunks = ((numbytes / sizeof (uint64_t)) + RNG_DIAG_CHUNK_SIZE - 1)
540 	    / RNG_DIAG_CHUNK_SIZE;
541 	/*
542 	 * Use setupbuffer[0] if it is contiguous, otherwise
543 	 * setupbuffer[1].
544 	 */
545 	setupcontigp = &setupbuffer[
546 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
547 	setupphys = va_to_pa(setupcontigp);
548 
549 	/*
550 	 * If a non-null collect_setupp pointer has been provided,
551 	 * push the specified setup into the hardware.
552 	 */
553 	if (collect_setupp != NULL) {
554 		/* copy the specified state to the aligned buffer */
555 		*setupcontigp = *collect_setupp;
556 		rnglooping = B_TRUE;
557 		while (rnglooping) {
558 			hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
559 			    CTL_STATE_HEALTHCHECK,
560 			    n2rng->n_ctl_data->n_watchdog_cycles, &tdelta);
561 			rv = n2rng_herr2kerr(hverr);
562 			switch (hverr) {
563 			case H_EOK:
564 				rnglooping = B_FALSE;
565 				break;
566 			case H_EIO: /* control yanked from us */
567 			case H_ENOACCESS: /* We are not control domain */
568 				return (rv);
569 			case H_EWOULDBLOCK:
570 				/* Data currently not available, try again */
571 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
572 					DBG1(n2rng, DHEALTH,
573 					    "n2rng_collect_diag_bits(1) : "
574 					    "exceeded block count of %d",
575 					    RNG_MAX_BLOCK_ATTEMPTS);
576 					return (rv);
577 				} else {
578 					cyclesleep(n2rng, tdelta);
579 				}
580 				break;
581 			case H_EBUSY:
582 				/*
583 				 * A control write is already in progress.
584 				 * Note: This shouldn't happen since
585 				 * n2rng_ctl_write() waits for the
586 				 * write to complete.
587 				 */
588 				if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
589 					DBG1(n2rng, DHEALTH,
590 					    "n2rng_collect_diag_bits(1): "
591 					    "exceeded busy count of %d",
592 					    RNG_MAX_BUSY_ATTEMPTS);
593 					return (rv);
594 				} else {
595 					delay(RNG_RETRY_BUSY_DELAY);
596 				}
597 				break;
598 			default:
599 				log_internal_errors(hverr, "hv_rng_ctl_write");
600 				override_rv = rv;
601 				goto restore_state;
602 			}
603 		} /* while (rnglooping) */
604 	} /* if (collect_setupp != NULL) */
605 
606 	/* If the caller asks for some bytes, collect the data */
607 	if (numbytes > 0) {
608 		for (i = 0; i < numchunks; i++) {
609 			size_t thisnumbytes = (i == numchunks - 1) ?
610 			    numbytes - i * (RNG_DIAG_CHUNK_SIZE *
611 			    sizeof (uint64_t)) :
612 			    RNG_DIAG_CHUNK_SIZE * sizeof (uint64_t);
613 
614 			/* try until we successfully read a word of data */
615 			rnglooping = B_TRUE;
616 			busycount = 0;
617 			blockcount = 0;
618 			while (rnglooping) {
619 				hverr = n2rng_data_read_diag(n2rng, rngid,
620 				    va_to_pa((uint64_t *)buffer +
621 				    RNG_DIAG_CHUNK_SIZE * i),
622 				    thisnumbytes, &tdelta);
623 				rv = n2rng_herr2kerr(hverr);
624 				switch (hverr) {
625 				case H_EOK:
626 					rnglooping = B_FALSE;
627 					break;
628 				case H_EIO:
629 				case H_ENOACCESS:
630 					return (rv);
631 				case H_EWOULDBLOCK:
632 					/* Data not available, try again */
633 					if (++blockcount >
634 					    RNG_MAX_BLOCK_ATTEMPTS) {
635 						DBG1(n2rng, DHEALTH,
636 						    "n2rng_collect_diag_bits"
637 						    "(2): exceeded block count"
638 						    " of %d",
639 						    RNG_MAX_BLOCK_ATTEMPTS);
640 						return (rv);
641 					} else {
642 						cyclesleep(n2rng, tdelta);
643 					}
644 					break;
645 				default:
646 					log_internal_errors(hverr,
647 					    "hv_rng_data_read_diag");
648 					override_rv = rv;
649 					goto restore_state;
650 				}
651 			} /* while (!rnglooping) */
652 		} /* for */
653 	}
654 
655 restore_state:
656 
657 	/* restore the preferred configuration and set exit state */
658 	if (exit_setupp != NULL) {
659 
660 		*setupcontigp = *exit_setupp;
661 		rnglooping = B_TRUE;
662 		busycount = 0;
663 		blockcount = 0;
664 		while (rnglooping) {
665 			hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
666 			    exitstate, n2rng->n_ctl_data->n_watchdog_cycles,
667 			    &tdelta);
668 			rv = n2rng_herr2kerr(hverr);
669 			switch (hverr) {
670 			case H_EOK:
671 			case H_EIO: /* control yanked from us */
672 			case H_EINVAL: /* some external error, probably */
673 			case H_ENOACCESS: /* We are not control domain */
674 				rnglooping = B_FALSE;
675 				break;
676 			case H_EWOULDBLOCK:
677 				/* Data currently not available, try again */
678 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
679 					DBG1(n2rng, DHEALTH,
680 					    "n2rng_collect_diag_bits(3): "
681 					    "exceeded block count of %d",
682 					    RNG_MAX_BLOCK_ATTEMPTS);
683 					return (rv);
684 				} else {
685 					cyclesleep(n2rng, tdelta);
686 				}
687 				break;
688 			case H_EBUSY:
689 				/*
690 				 * A control write is already in progress.
691 				 * Note: This shouldn't happen since
692 				 * n2rng_ctl_write() waits for the
693 				 * write to complete.
694 				 */
695 				if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
696 					DBG1(n2rng, DHEALTH,
697 					    "n2rng_collect_diag_bits(3): "
698 					    "exceeded busy count of %d",
699 					    RNG_MAX_BUSY_ATTEMPTS);
700 					return (rv);
701 				} else {
702 					delay(RNG_RETRY_BUSY_DELAY);
703 				}
704 				break;
705 			default:
706 				rnglooping = B_FALSE;
707 				log_internal_errors(hverr, "hv_rng_ctl_write");
708 				break;
709 			}
710 		} /* while */
711 	} /* if */
712 
713 	/*
714 	 * override_rv takes care of the case where we abort becuase
715 	 * of some error, but still want to restore the peferred state
716 	 * and return the first error, even if other error occur.
717 	 */
718 	return (override_rv ? override_rv : rv);
719 }
720 
721 int
722 n2rng_getentropy(n2rng_t *n2rng, void *buffer, size_t size)
723 {
724 	int		i, rv = 0;  /* so it works if size is zero */
725 	uint64_t	hverr;
726 	uint64_t	*buffer_w = (uint64_t *)buffer;
727 	int		num_w = size / sizeof (uint64_t);
728 	uint64_t	randval;
729 	uint64_t	randvalphys = va_to_pa(&randval);
730 	uint64_t	tdelta;
731 	int		failcount = 0;
732 	int		blockcount = 0;
733 	boolean_t	rnglooping;
734 
735 	for (i = 0; i < num_w; i++) {
736 		rnglooping = B_TRUE;
737 		while (rnglooping) {
738 			hverr = hv_rng_data_read(randvalphys, &tdelta);
739 			rv = n2rng_herr2kerr(hverr);
740 			switch (hverr) {
741 			case H_EOK:
742 				buffer_w[i] = randval;
743 				failcount = 0;
744 				rnglooping = B_FALSE;
745 				break;
746 			case H_EIO:
747 				/*
748 				 * Either a health check is in progress, or
749 				 * the watchdog timer has expired while running
750 				 * hv api version 2.0 or higher with health
751 				 * checks enabled.
752 				 */
753 				if (n2rng->n_hvapi_major_version < 2) {
754 					/*
755 					 * A health check is in progress.
756 					 * Wait RNG_RETRY_HLCHK_USECS and fail
757 					 * after RNG_MAX_DATA_READ_ATTEMPTS
758 					 * failures.
759 					 */
760 					if (++failcount >
761 					    RNG_MAX_DATA_READ_ATTEMPTS) {
762 						DBG2(n2rng, DHEALTH,
763 						    "n2rng_getentropy: exceeded"
764 						    "EIO count of %d on cpu %d",
765 						    RNG_MAX_DATA_READ_ATTEMPTS,
766 						    CPU->cpu_id);
767 						goto exitpoint;
768 					} else {
769 						delay(drv_usectohz
770 						    (RNG_RETRY_HLCHK_USECS));
771 					}
772 				} else {
773 					/*
774 					 * Just return the error. If a flurry of
775 					 * random data requests happen to occur
776 					 * during a health check, there are
777 					 * multiple levels of defense:
778 					 * - 2.0 HV provides random data pool
779 					 * - FIPS algorithm tolerates failures
780 					 * - Software failover
781 					 * - Automatic configuration retries
782 					 * - Hardware failover on some systems
783 					 */
784 					goto exitpoint;
785 				}
786 				break;
787 			case H_EWOULDBLOCK:
788 				/* Data currently not available, try again */
789 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
790 					DBG1(n2rng, DHEALTH,
791 					    "n2rng_getentropy: "
792 					    "exceeded block count of %d",
793 					    RNG_MAX_BLOCK_ATTEMPTS);
794 					goto exitpoint;
795 				} else {
796 					cyclesleep(n2rng, tdelta);
797 				}
798 				break;
799 			default:
800 				log_internal_errors(hverr, "hv_rng_data_read");
801 				goto exitpoint;
802 			}
803 		} /* while */
804 	} /* for */
805 
806 exitpoint:
807 	return (rv);
808 }
809 
810 uint64_t
811 n2rng_ctl_read(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa, uint64_t *state,
812     uint64_t *tdelta, uint64_t *wdelta)
813 {
814 	uint64_t	rv;
815 	uint64_t	wstatus;
816 
817 	/* Call correct hv function based on api version */
818 	if (n2rng->n_hvapi_major_version == 2) {
819 		rv = hv_rng_ctl_read_v2(ctlregs_pa, (uint64_t)rngid, state,
820 		    tdelta, wdelta, &wstatus);
821 		if (rv == 0) {
822 			rv = wstatus;
823 		}
824 	} else {
825 		rv = hv_rng_ctl_read(ctlregs_pa, state, tdelta);
826 		*wdelta = 0;
827 	}
828 
829 	return (rv);
830 }
831 
832 uint64_t
833 n2rng_ctl_wait(n2rng_t *n2rng, int rngid)
834 {
835 	uint64_t	state;
836 	uint64_t	tdelta;
837 	uint64_t	wdelta;
838 	uint64_t	wstatus;
839 	boolean_t	rnglooping = B_TRUE;
840 	uint64_t	rv;
841 	n2rng_setup_t	setupbuffer[2];
842 	n2rng_setup_t	*setupcontigp;
843 	uint64_t	setupphys;
844 	int		busycount = 0;
845 	int		blockcount = 0;
846 
847 	/*
848 	 * Use setupbuffer[0] if it is contiguous, otherwise
849 	 * setupbuffer[1].
850 	 */
851 	setupcontigp = &setupbuffer[
852 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
853 	setupphys = va_to_pa(setupcontigp);
854 
855 	while (rnglooping) {
856 		rv = hv_rng_ctl_read_v2(setupphys, (uint64_t)rngid, &state,
857 		    &tdelta, &wdelta, &wstatus);
858 		switch (rv) {
859 		case H_EOK:
860 			rv = wstatus;
861 			rnglooping = B_FALSE;
862 			break;
863 		case H_EWOULDBLOCK:
864 			/* Data currently not available, try again */
865 			if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
866 				DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
867 				    "exceeded block count of %d",
868 				    RNG_MAX_BLOCK_ATTEMPTS);
869 				return (rv);
870 			} else {
871 				cyclesleep(n2rng, tdelta);
872 			}
873 			break;
874 		case H_EBUSY:
875 			/* Control write still pending, try again */
876 			if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
877 				DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
878 				    "exceeded busy count of %d",
879 				    RNG_MAX_BUSY_ATTEMPTS);
880 				return (rv);
881 			} else {
882 				delay(RNG_RETRY_BUSY_DELAY);
883 			}
884 			break;
885 		default:
886 			log_internal_errors(rv, "n2rng_ctl_wait");
887 			rnglooping = B_FALSE;
888 		}
889 	} /* while (rnglooping) */
890 
891 	return (rv);
892 }
893 
894 uint64_t
895 n2rng_ctl_write(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa,
896     uint64_t newstate, uint64_t wtimeout, uint64_t *tdelta)
897 {
898 	uint64_t	rv;
899 
900 	/* Call correct hv function based on api version */
901 	if (n2rng->n_hvapi_major_version == 2) {
902 		rv = hv_rng_ctl_write_v2(ctlregs_pa, newstate, wtimeout,
903 		    (uint64_t)rngid);
904 		if (rv == H_EOK) {
905 			/* Wait for control registers to be written */
906 			rv = n2rng_ctl_wait(n2rng, rngid);
907 		}
908 		*tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
909 	} else {
910 		rv = hv_rng_ctl_write(ctlregs_pa, newstate, wtimeout, tdelta);
911 	}
912 
913 	return (rv);
914 }
915 
916 uint64_t
917 n2rng_data_read_diag(n2rng_t *n2rng, int rngid, uint64_t data_pa,
918     size_t  datalen, uint64_t *tdelta)
919 {
920 	uint64_t	rv;
921 
922 	/* Call correct hv function based on api version */
923 	if (n2rng->n_hvapi_major_version == 2) {
924 		rv = hv_rng_data_read_diag_v2(data_pa, datalen,
925 		    (uint64_t)rngid, tdelta);
926 		if (*tdelta == 0) {
927 			*tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
928 		}
929 	} else {
930 		rv = hv_rng_data_read_diag(data_pa, datalen, tdelta);
931 	}
932 
933 	return (rv);
934 }
935 
936 uint64_t
937 n2rng_check_ctl_access(n2rng_t *n2rng)
938 {
939 	uint64_t	rv;
940 	uint64_t	unused_64;
941 
942 	/* Call correct hv function based on api version */
943 	if (n2rng->n_hvapi_major_version == 2) {
944 		/*
945 		 * Attempt to read control registers with invalid ID and data
946 		 * just to see if we get an access error
947 		 */
948 		rv = hv_rng_ctl_read_v2(NULL, N2RNG_INVALID_ID,
949 		    &unused_64, &unused_64, &unused_64, &unused_64);
950 	} else {
951 		rv = hv_rng_get_diag_control();
952 	}
953 
954 	return (rv);
955 }
956 
957 /*
958  * n2rng_config_retry()
959  *
960  * Schedule a timed call to n2rng_config() if one is not already pending
961  */
962 void
963 n2rng_config_retry(n2rng_t *n2rng, clock_t seconds)
964 {
965 	mutex_enter(&n2rng->n_lock);
966 	/* Check if a config retry is already pending */
967 	if (n2rng->n_timeout_id) {
968 		DBG1(n2rng, DCFG, "n2rng_config_retry: retry pending "
969 		    "id = %x", n2rng->n_timeout_id);
970 	} else {
971 		n2rng->n_timeout_id = timeout(n2rng_config_task,
972 		    (void *)n2rng, drv_usectohz(seconds * SECOND));
973 		DBG2(n2rng, DCFG, "n2rng_config_retry: retry scheduled in "
974 		    "%d seconds, id = %x", seconds, n2rng->n_timeout_id);
975 	}
976 	mutex_exit(&n2rng->n_lock);
977 }
978 
979 static uint64_t
980 sticks_per_usec(void)
981 {
982 	uint64_t starttick = gettick();
983 	hrtime_t starttime = gethrtime();
984 	uint64_t endtick;
985 	hrtime_t endtime;
986 
987 	delay(2);
988 
989 	endtick = gettick();
990 	endtime = gethrtime();
991 
992 	return ((1000 * (endtick - starttick)) / (endtime - starttime));
993 }
994 
995 static int
996 n2rng_init_ctl(n2rng_t *n2rng)
997 {
998 	int		rv;
999 	int		hverr;
1000 	rng_entry_t	*rng;
1001 	int		rngid;
1002 	int		blockcount = 0;
1003 
1004 	n2rng->n_ctl_data = NULL;
1005 
1006 	/* Attempt to gain diagnostic control */
1007 	do {
1008 		hverr = n2rng_check_ctl_access(n2rng);
1009 		rv = n2rng_herr2kerr(hverr);
1010 		if ((hverr == H_EWOULDBLOCK) &&
1011 		    (++blockcount > RNG_MAX_BUSY_ATTEMPTS)) {
1012 			DBG1(n2rng, DHEALTH, "n2rng_int_ctl: exceeded busy "
1013 			    "count of %d", RNG_MAX_BUSY_ATTEMPTS);
1014 			return (rv);
1015 		} else {
1016 			delay(RNG_RETRY_BUSY_DELAY);
1017 		}
1018 	} while (hverr == H_EWOULDBLOCK);
1019 
1020 	/*
1021 	 * If attempt fails with EPERM, the driver is not running in the
1022 	 * control domain
1023 	 */
1024 	if (rv == EPERM) {
1025 		DBG0(n2rng, DATTACH,
1026 		    "n2rng_init_ctl: Running in guest domain");
1027 		return (DDI_SUCCESS);
1028 	}
1029 
1030 	/* Allocate control stucture only used in control domain */
1031 	n2rng->n_ctl_data = kmem_alloc(sizeof (rng_ctl_data_t), KM_SLEEP);
1032 	n2rng->n_ctl_data->n_num_rngs_online = 0;
1033 
1034 	/*
1035 	 * If running with an API version less than 2.0 default to one rng.
1036 	 * Otherwise get number of rngs from device properties.
1037 	 */
1038 	if (n2rng->n_hvapi_major_version < 2) {
1039 		n2rng->n_ctl_data->n_num_rngs = 1;
1040 	} else {
1041 		n2rng->n_ctl_data->n_num_rngs =
1042 		    ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1043 		    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1044 		    N2RNG_PROP_NUM_UNITS, 0);
1045 		if (n2rng->n_ctl_data->n_num_rngs == 0) {
1046 			cmn_err(CE_WARN, "n2rng: %s property not found",
1047 			    N2RNG_PROP_NUM_UNITS);
1048 			return (DDI_FAILURE);
1049 		}
1050 	}
1051 
1052 	/* Allocate space for all rng entries */
1053 	n2rng->n_ctl_data->n_rngs =
1054 	    kmem_zalloc(n2rng->n_ctl_data->n_num_rngs *
1055 	    sizeof (rng_entry_t), KM_SLEEP);
1056 
1057 	/* Get accumulate cycles from .conf file. */
1058 	n2rng->n_ctl_data->n_accumulate_cycles =
1059 	    ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1060 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "acc_cycles",
1061 	    RNG_DEFAULT_ACCUMULATE_CYCLES);
1062 
1063 	/* Get health check frequency from .conf file */
1064 	n2rng->n_ctl_data->n_hc_secs = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1065 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "hc_seconds",
1066 	    RNG_DEFAULT_HC_SECS);
1067 
1068 	/* API versions prior to 2.0 do not support health checks */
1069 	if ((n2rng->n_hvapi_major_version < 2) &&
1070 	    (n2rng->n_ctl_data->n_hc_secs > 0)) {
1071 		cmn_err(CE_WARN, "n2rng: Hyperviser api "
1072 		    "version %d.%d does not support health checks",
1073 		    n2rng->n_hvapi_major_version,
1074 		    n2rng->n_hvapi_minor_version);
1075 		n2rng->n_ctl_data->n_hc_secs = 0;
1076 	}
1077 
1078 	/* Calculate watchdog timeout value */
1079 	if (n2rng->n_ctl_data->n_hc_secs <= 0) {
1080 		n2rng->n_ctl_data->n_watchdog_cycles = 0;
1081 	} else {
1082 		n2rng->n_ctl_data->n_watchdog_cycles =
1083 		    ((uint64_t)(RNG_EXTRA_WATCHDOG_SECS) +
1084 		    n2rng->n_ctl_data->n_hc_secs) *
1085 		    n2rng->n_sticks_per_usec * 1000000;
1086 	}
1087 
1088 	/*
1089 	 * Set some plausible state into the preferred configuration.
1090 	 * The intent is that the health check will immediately overwrite it.
1091 	 */
1092 	for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs; rngid++) {
1093 
1094 		rng = &n2rng->n_ctl_data->n_rngs[rngid];
1095 
1096 		rng->n_preferred_config.ctlwds[0].word = 0;
1097 		rng->n_preferred_config.ctlwds[0].fields.rnc_anlg_sel =
1098 		    N2RNG_NOANALOGOUT;
1099 		rng->n_preferred_config.ctlwds[0].fields.rnc_cnt =
1100 		    RNG_DEFAULT_ACCUMULATE_CYCLES;
1101 		rng->n_preferred_config.ctlwds[0].fields.rnc_mode =
1102 		    RNG_MODE_NORMAL;
1103 		rng->n_preferred_config.ctlwds[1].word =
1104 		    rng->n_preferred_config.ctlwds[0].word;
1105 		rng->n_preferred_config.ctlwds[2].word =
1106 		    rng->n_preferred_config.ctlwds[0].word;
1107 		rng->n_preferred_config.ctlwds[3].word =
1108 		    rng->n_preferred_config.ctlwds[0].word;
1109 		rng->n_preferred_config.ctlwds[0].fields.rnc_vcoctl = 1;
1110 		rng->n_preferred_config.ctlwds[0].fields.rnc_selbits = 1;
1111 		rng->n_preferred_config.ctlwds[1].fields.rnc_vcoctl = 2;
1112 		rng->n_preferred_config.ctlwds[1].fields.rnc_selbits = 2;
1113 		rng->n_preferred_config.ctlwds[2].fields.rnc_vcoctl = 3;
1114 		rng->n_preferred_config.ctlwds[2].fields.rnc_selbits = 4;
1115 		rng->n_preferred_config.ctlwds[3].fields.rnc_vcoctl = 0;
1116 		rng->n_preferred_config.ctlwds[3].fields.rnc_selbits = 7;
1117 	}
1118 
1119 	n2rng_setcontrol(n2rng);
1120 	DBG2(n2rng, DATTACH,
1121 	    "n2rng_init_ctl: Running in control domain with %d rng device%s",
1122 	    n2rng->n_ctl_data->n_num_rngs,
1123 	    (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
1124 	DBG2(n2rng, DCFG,
1125 	    "n2rng_init_ctl: n_sticks_per_usec = %ld, n_hc_secs = %d",
1126 	    n2rng->n_sticks_per_usec,
1127 	    n2rng->n_ctl_data->n_hc_secs);
1128 	DBG2(n2rng, DCFG,
1129 	    "n2rng_init_ctl: n_watchdog_cycles = %ld, "
1130 	    "n_accumulate_cycles = %ld", n2rng->n_ctl_data->n_watchdog_cycles,
1131 	    n2rng->n_ctl_data->n_accumulate_cycles);
1132 
1133 	return (DDI_SUCCESS);
1134 }
1135 
1136 static void
1137 n2rng_uninit_ctl(n2rng_t *n2rng)
1138 {
1139 	if (n2rng->n_ctl_data) {
1140 		if (n2rng->n_ctl_data->n_num_rngs) {
1141 			kmem_free(n2rng->n_ctl_data->n_rngs,
1142 			    n2rng->n_ctl_data->n_num_rngs *
1143 			    sizeof (rng_entry_t));
1144 			n2rng->n_ctl_data->n_rngs = NULL;
1145 			n2rng->n_ctl_data->n_num_rngs = 0;
1146 		}
1147 		kmem_free(n2rng->n_ctl_data, sizeof (rng_ctl_data_t));
1148 		n2rng->n_ctl_data = NULL;
1149 	}
1150 }
1151 
1152 
1153 /*
1154  * n2rng_config_test()
1155  *
1156  * Attempt read random data to see if the rng is configured.
1157  */
1158 int
1159 n2rng_config_test(n2rng_t *n2rng)
1160 {
1161 	int		rv = 0;
1162 	uint64_t	hverr;
1163 	uint64_t	randval = 0;
1164 	uint64_t	randvalphys = va_to_pa(&randval);
1165 	uint64_t	tdelta;
1166 	int		failcount = 0;
1167 	int		blockcount = 0;
1168 	boolean_t	rnglooping = B_TRUE;
1169 
1170 	while (rnglooping) {
1171 		hverr = hv_rng_data_read(randvalphys, &tdelta);
1172 		rv = n2rng_herr2kerr(hverr);
1173 		switch (hverr) {
1174 		case H_EOK:
1175 			failcount = 0;
1176 			rnglooping = B_FALSE;
1177 			break;
1178 		case H_EIO:
1179 			/*
1180 			 * A health check is in progress.
1181 			 * Wait RNG_RETRY_HLCHK_USECS and fail
1182 			 * after RNG_MAX_DATA_READ_ATTEMPTS
1183 			 * failures.
1184 			 */
1185 			if (++failcount > RNG_MAX_DATA_READ_ATTEMPTS) {
1186 				goto exitpoint;
1187 			} else {
1188 				delay(drv_usectohz(RNG_RETRY_HLCHK_USECS));
1189 			}
1190 			break;
1191 		case H_EWOULDBLOCK:
1192 			/* Data currently not available, try again */
1193 			if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
1194 				DBG1(n2rng, DHEALTH, "n2rng_config_test: "
1195 				    "exceeded block count of %d",
1196 				    RNG_MAX_BLOCK_ATTEMPTS);
1197 				goto exitpoint;
1198 			} else {
1199 				cyclesleep(n2rng, tdelta);
1200 			}
1201 			break;
1202 		case H_ENOACCESS:
1203 			/* An rng error has occured during health check */
1204 			goto exitpoint;
1205 		default:
1206 			log_internal_errors(hverr, "hv_rng_data_read");
1207 			goto exitpoint;
1208 		}
1209 	} /* while */
1210 
1211 exitpoint:
1212 	return (rv);
1213 }
1214 
1215 /*
1216  * n2rng_config()
1217  *
1218  * Run health check on the RNG hardware
1219  * Configure the RNG hardware
1220  * Register with crypto framework
1221  */
1222 static int
1223 n2rng_config(n2rng_t *n2rng)
1224 {
1225 	int		rv;
1226 	rng_entry_t	*rng;
1227 	int		rngid;
1228 
1229 	/*
1230 	 * Run health checks and configure rngs if running in control domain,
1231 	 * otherwise just check if at least one rng is available.
1232 	 */
1233 	if (n2rng_iscontrol(n2rng)) {
1234 
1235 		for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs;
1236 		    rngid++) {
1237 
1238 			rng = &n2rng->n_ctl_data->n_rngs[rngid];
1239 
1240 			/* Only test rngs that have not already failed */
1241 			if (rng->n_rng_state == CTL_STATE_ERROR) {
1242 				continue;
1243 			}
1244 
1245 			if ((n2rng->n_binding == N2RNG_CPU_VF) &&
1246 			    (n2rng->n_hvapi_major_version < 2)) {
1247 				/*
1248 				 * Since api versions prior to 2.0 do not
1249 				 * support multiple rngs, bind to the current
1250 				 * processor for the entire health check
1251 				 * process.
1252 				 */
1253 				thread_affinity_set(curthread, CPU_CURRENT);
1254 				DBG1(n2rng, DCFG, "n2rng_config: "
1255 				    "Configuring single rng from cpu %d",
1256 				    CPU->cpu_id);
1257 				rv = n2rng_do_health_check(n2rng, rngid);
1258 				thread_affinity_clear(curthread);
1259 			} else {
1260 				rv = n2rng_do_health_check(n2rng, rngid);
1261 			}
1262 
1263 			switch (rv) {
1264 			case 0:
1265 				/*
1266 				 * Successful, increment online count if
1267 				 * necessary
1268 				 */
1269 				DBG1(n2rng, DCFG, "n2rng_config: rng(%d) "
1270 				    "passed health checks", rngid);
1271 				if (rng->n_rng_state != CTL_STATE_CONFIGURED) {
1272 					rng->n_rng_state =
1273 					    CTL_STATE_CONFIGURED;
1274 					n2rng->n_ctl_data->n_num_rngs_online++;
1275 				}
1276 				break;
1277 			default:
1278 				/*
1279 				 * Health checks failed, decrement online
1280 				 * count if necessary
1281 				 */
1282 				cmn_err(CE_WARN, "n2rng: rng(%d) "
1283 				    "failed health checks", rngid);
1284 				if (rng->n_rng_state == CTL_STATE_CONFIGURED) {
1285 					n2rng->n_ctl_data->n_num_rngs_online--;
1286 				}
1287 				rng->n_rng_state = CTL_STATE_ERROR;
1288 				break;
1289 			}
1290 		}
1291 		DBG2(n2rng, DCFG, "n2rng_config: %d rng%s online",
1292 		    n2rng->n_ctl_data->n_num_rngs_online,
1293 		    (n2rng->n_ctl_data->n_num_rngs_online == 1) ? "" : "s");
1294 
1295 		/* Check if all rngs have failed */
1296 		if (n2rng->n_ctl_data->n_num_rngs_online == 0) {
1297 			cmn_err(CE_WARN, "n2rng: %d RNG device%s failed",
1298 			    n2rng->n_ctl_data->n_num_rngs,
1299 			    (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
1300 			goto errorexit;
1301 		} else {
1302 			n2rng_setconfigured(n2rng);
1303 		}
1304 	} else {
1305 		/* Running in guest domain, just check if rng is configured */
1306 		rv = n2rng_config_test(n2rng);
1307 		switch (rv) {
1308 		case 0:
1309 			n2rng_setconfigured(n2rng);
1310 			break;
1311 		case EIO:
1312 			/* Don't set configured to force a retry */
1313 			break;
1314 		default:
1315 			goto errorexit;
1316 		}
1317 	}
1318 
1319 	/*
1320 	 * Initialize FIPS state and register with KCF if we have at least one
1321 	 * RNG configured.  Otherwise schedule a retry if all rngs have not
1322 	 * failed.
1323 	 */
1324 	if (n2rng_isconfigured(n2rng)) {
1325 
1326 		if (n2rng_init(n2rng) != DDI_SUCCESS) {
1327 			cmn_err(CE_WARN, "n2rng: unable to register with KCF");
1328 			goto errorexit;
1329 		}
1330 
1331 		/*
1332 		 * Schedule a retry if running in the control domain and a
1333 		 * health check time has been specified.
1334 		 */
1335 		if (n2rng_iscontrol(n2rng) &&
1336 		    (n2rng->n_ctl_data->n_hc_secs > 0)) {
1337 			n2rng_config_retry(n2rng,
1338 			    n2rng->n_ctl_data->n_hc_secs);
1339 		}
1340 	} else if (!n2rng_isfailed(n2rng)) {
1341 		/* Schedule a retry if one is not already pending */
1342 		n2rng_config_retry(n2rng, RNG_CFG_RETRY_SECS);
1343 	}
1344 	return (DDI_SUCCESS);
1345 
1346 errorexit:
1347 	/* Unregister from kCF if we are registered */
1348 	(void) n2rng_unregister_provider(n2rng);
1349 	n2rng_setfailed(n2rng);
1350 	cmn_err(CE_WARN, "n2rng: hardware failure detected");
1351 	return (DDI_FAILURE);
1352 }
1353 
1354 /*
1355  * n2rng_config_task()
1356  *
1357  * Call n2rng_config() from the task queue or after a timeout, ignore result.
1358  */
1359 static void
1360 n2rng_config_task(void *targ)
1361 {
1362 	n2rng_t *n2rng = (n2rng_t *)targ;
1363 
1364 	mutex_enter(&n2rng->n_lock);
1365 	n2rng->n_timeout_id = 0;
1366 	mutex_exit(&n2rng->n_lock);
1367 	(void) n2rng_config(n2rng);
1368 }
1369