xref: /titanic_44/usr/src/uts/sun4v/io/n2rng/n2rng.c (revision 23a1ccea6aac035f084a7a4cdc968687d1b02daf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 
26 /*
27  * Niagara 2 Random Number Generator (RNG) driver
28  */
29 
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/modctl.h>
33 #include <sys/conf.h>
34 #include <sys/devops.h>
35 #include <sys/cmn_err.h>
36 #include <sys/ksynch.h>
37 #include <sys/kmem.h>
38 #include <sys/stat.h>
39 #include <sys/open.h>
40 #include <sys/file.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/param.h>
44 #include <sys/cpuvar.h>
45 #include <sys/disp.h>
46 #include <sys/hsvc.h>
47 #include <sys/machsystm.h>
48 #include <sys/hypervisor_api.h>
49 #include <sys/n2rng.h>
50 #include <fips/fips_checksum.h>
51 
52 static int	n2rng_attach(dev_info_t *, ddi_attach_cmd_t);
53 static int	n2rng_detach(dev_info_t *, ddi_detach_cmd_t);
54 static int	n2rng_suspend(n2rng_t *);
55 static int	n2rng_resume(n2rng_t *);
56 static uint64_t sticks_per_usec(void);
57 u_longlong_t	gettick(void);
58 static int	n2rng_init_ctl(n2rng_t *);
59 static void	n2rng_uninit_ctl(n2rng_t *);
60 static int	n2rng_config(n2rng_t *);
61 static void	n2rng_config_task(void * targ);
62 
63 /*
64  * Device operations.
65  */
66 
67 static struct dev_ops devops = {
68 	DEVO_REV,		/* devo_rev */
69 	0,			/* devo_refcnt */
70 	nodev,			/* devo_getinfo */
71 	nulldev,		/* devo_identify */
72 	nulldev,		/* devo_probe */
73 	n2rng_attach,		/* devo_attach */
74 	n2rng_detach,		/* devo_detach */
75 	nodev,			/* devo_reset */
76 	NULL,			/* devo_cb_ops */
77 	NULL,			/* devo_bus_ops */
78 	ddi_power,		/* devo_power */
79 	ddi_quiesce_not_supported,	/* devo_quiesce */
80 };
81 
82 /*
83  * Module linkage.
84  */
85 static struct modldrv modldrv = {
86 	&mod_driverops,			/* drv_modops */
87 	"N2 RNG Driver",		/* drv_linkinfo */
88 	&devops,			/* drv_dev_ops */
89 };
90 
91 static struct modlinkage modlinkage = {
92 	MODREV_1,			/* ml_rev */
93 	&modldrv,			/* ml_linkage */
94 	NULL
95 };
96 
97 /*
98  * Driver globals Soft state.
99  */
100 static void	*n2rng_softstate = NULL;
101 
102 /*
103  * Hypervisor NCS services information.
104  */
105 static boolean_t ncs_hsvc_available = B_FALSE;
106 
107 #define	NVERSIONS	2
108 
109 /*
110  * HV API versions supported by this driver.
111  */
112 static hsvc_info_t ncs_hsvc[NVERSIONS] = {
113 	{ HSVC_REV_1, NULL, HSVC_GROUP_RNG, 2, 0, DRIVER },	/* v2.0 */
114 	{ HSVC_REV_1, NULL, HSVC_GROUP_RNG, 1, 0, DRIVER },	/* v1.0 */
115 };
116 int	ncs_version_index;	/* index into ncs_hsvc[] */
117 
118 /*
119  * DDI entry points.
120  */
121 int
122 _init(void)
123 {
124 	int	rv;
125 
126 	rv = ddi_soft_state_init(&n2rng_softstate, sizeof (n2rng_t), 1);
127 	if (rv != 0) {
128 		/* this should *never* happen! */
129 		return (rv);
130 	}
131 
132 	if ((rv = mod_install(&modlinkage)) != 0) {
133 		/* cleanup here */
134 		ddi_soft_state_fini(&n2rng_softstate);
135 		return (rv);
136 	}
137 
138 	return (0);
139 }
140 
141 int
142 _fini(void)
143 {
144 	int	rv;
145 
146 	rv = mod_remove(&modlinkage);
147 	if (rv == 0) {
148 		/* cleanup here */
149 		ddi_soft_state_fini(&n2rng_softstate);
150 	}
151 
152 	return (rv);
153 }
154 
155 int
156 _info(struct modinfo *modinfop)
157 {
158 	return (mod_info(&modlinkage, modinfop));
159 }
160 
161 static int
162 n2rng_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
163 {
164 	n2rng_t		*n2rng = NULL;
165 	int		instance;
166 	int		rv;
167 	int		version;
168 	uint64_t	ncs_minor_ver;
169 
170 	instance = ddi_get_instance(dip);
171 	DBG1(NULL, DENTRY, "n2rng_attach called, instance %d", instance);
172 	/*
173 	 * Only instance 0 of n2rng driver is allowed.
174 	 */
175 	if (instance != 0) {
176 		n2rng_diperror(dip, "only one instance (0) allowed");
177 		return (DDI_FAILURE);
178 	}
179 
180 	switch (cmd) {
181 	case DDI_RESUME:
182 		n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate,
183 		    instance);
184 		if (n2rng == NULL) {
185 			n2rng_diperror(dip, "no soft state in attach");
186 			return (DDI_FAILURE);
187 		}
188 		return (n2rng_resume(n2rng));
189 
190 	case DDI_ATTACH:
191 		break;
192 	default:
193 		return (DDI_FAILURE);
194 	}
195 
196 	rv = ddi_soft_state_zalloc(n2rng_softstate, instance);
197 	if (rv != DDI_SUCCESS) {
198 		n2rng_diperror(dip, "unable to allocate soft state");
199 		return (DDI_FAILURE);
200 	}
201 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
202 	ASSERT(n2rng != NULL);
203 	n2rng->n_dip = dip;
204 
205 	mutex_init(&n2rng->n_lock, NULL, MUTEX_DRIVER, NULL);
206 	n2rng->n_flags = 0;
207 	n2rng->n_timeout_id = 0;
208 	n2rng->n_sticks_per_usec = sticks_per_usec();
209 
210 	/* Determine binding type */
211 	n2rng->n_binding_name = ddi_binding_name(dip);
212 	if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_N2,
213 	    strlen(N2RNG_BINDNAME_N2)) == 0) {
214 		/*
215 		 * Niagara 2
216 		 */
217 		n2rng->n_binding = N2RNG_CPU_N2;
218 	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_VF,
219 	    strlen(N2RNG_BINDNAME_VF)) == 0) {
220 		/*
221 		 * Victoria Falls
222 		 */
223 		n2rng->n_binding = N2RNG_CPU_VF;
224 	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_KT,
225 	    strlen(N2RNG_BINDNAME_KT)) == 0) {
226 		/*
227 		 * Rainbow Falls
228 		 */
229 		n2rng->n_binding = N2RNG_CPU_KT;
230 	} else {
231 		n2rng_diperror(dip,
232 		    "unable to determine n2rng (cpu) binding (%s)",
233 		    n2rng->n_binding_name);
234 		goto errorexit;
235 	}
236 	DBG1(n2rng, DCHATTY, "n2rng_attach: n2rng->n_binding_name = %s",
237 	    n2rng->n_binding_name);
238 
239 	/* Negotiate HV api version number */
240 	for (version = 0; version < NVERSIONS; version++) {
241 		rv = hsvc_register(&ncs_hsvc[version], &ncs_minor_ver);
242 		if (rv == 0)
243 			break;
244 
245 		DBG4(n2rng, DCHATTY, "n2rng_attach: grp: 0x%lx, maj: %ld, "
246 		    "min: %ld, errno: %d", ncs_hsvc[version].hsvc_group,
247 		    ncs_hsvc[version].hsvc_major,
248 		    ncs_hsvc[version].hsvc_minor, rv);
249 	}
250 	if (version == NVERSIONS) {
251 		for (version = 0; version < NVERSIONS; version++) {
252 			cmn_err(CE_WARN,
253 			    "%s: cannot negotiate hypervisor services "
254 			    "group: 0x%lx major: %ld minor: %ld errno: %d",
255 			    ncs_hsvc[version].hsvc_modname,
256 			    ncs_hsvc[version].hsvc_group,
257 			    ncs_hsvc[version].hsvc_major,
258 			    ncs_hsvc[version].hsvc_minor, rv);
259 		}
260 		goto errorexit;
261 	}
262 	ncs_version_index = version;
263 	ncs_hsvc_available = B_TRUE;
264 	DBG2(n2rng, DATTACH, "n2rng_attach: ncs api version (%ld.%ld)",
265 	    ncs_hsvc[ncs_version_index].hsvc_major, ncs_minor_ver);
266 	n2rng->n_hvapi_major_version = ncs_hsvc[ncs_version_index].hsvc_major;
267 	n2rng->n_hvapi_minor_version = (uint_t)ncs_minor_ver;
268 
269 	/*
270 	 * Verify that we are running version 2.0 or later api on multiple
271 	 * rng systems.
272 	 */
273 	if ((n2rng->n_binding != N2RNG_CPU_N2) &&
274 	    (n2rng->n_hvapi_major_version < 2)) {
275 		cmn_err(CE_NOTE, "n2rng: Incompatible hyperviser api "
276 		    "version %d.%d detected", n2rng->n_hvapi_major_version,
277 		    n2rng->n_hvapi_minor_version);
278 	}
279 
280 	/* Initialize ctl structure if runnning in the control domain */
281 	if (n2rng_init_ctl(n2rng) != DDI_SUCCESS) {
282 		cmn_err(CE_WARN, "n2rng: unable to initialize rng "
283 		    "control structures");
284 		goto errorexit;
285 	}
286 
287 	/* Allocate single thread task queue for rng diags and registration */
288 	n2rng->n_taskq = ddi_taskq_create(dip, "n2rng_taskq", 1,
289 	    TASKQ_DEFAULTPRI, 0);
290 
291 	if (n2rng->n_taskq == NULL) {
292 		n2rng_diperror(dip, "ddi_taskq_create() failed");
293 		goto errorexit;
294 	}
295 
296 	/* Dispatch task to configure the RNG and register with KCF */
297 	if (ddi_taskq_dispatch(n2rng->n_taskq, n2rng_config_task,
298 	    (void *)n2rng, DDI_SLEEP) != DDI_SUCCESS) {
299 		n2rng_diperror(dip, "ddi_taskq_dispatch() failed");
300 		goto errorexit;
301 	}
302 
303 	if (n2rng->n_is_fips == B_TRUE) {
304 		/*
305 		 * FIPs Post test: Feed the known seed and make sure it
306 		 * produces the known random number.
307 		 */
308 		if (n2rng_fips_rng_post() != CRYPTO_SUCCESS) {
309 			n2rng_diperror(dip, "n2rng: FIPs POST test failed\n");
310 			goto errorexit;
311 		}
312 	}
313 
314 	return (DDI_SUCCESS);
315 
316 errorexit:
317 	/* Wait for pending config tasks to complete and delete the taskq */
318 	if (n2rng->n_taskq != NULL) {
319 		ddi_taskq_destroy(n2rng->n_taskq);
320 		n2rng->n_taskq = NULL;
321 	}
322 
323 	n2rng_uninit_ctl(n2rng);
324 
325 	(void) n2rng_uninit(n2rng);
326 
327 	if (ncs_hsvc_available == B_TRUE) {
328 		(void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
329 		ncs_hsvc_available = B_FALSE;
330 	}
331 
332 	mutex_destroy(&n2rng->n_lock);
333 	ddi_soft_state_free(n2rng_softstate, instance);
334 
335 	return (DDI_FAILURE);
336 }
337 
338 static int
339 n2rng_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
340 {
341 	int		instance;
342 	int		rv;
343 	n2rng_t		*n2rng;
344 	timeout_id_t	tid;
345 
346 	instance = ddi_get_instance(dip);
347 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
348 	if (n2rng == NULL) {
349 		n2rng_diperror(dip, "no soft state in detach");
350 		return (DDI_FAILURE);
351 	}
352 
353 	switch (cmd) {
354 	case DDI_SUSPEND:
355 		return (n2rng_suspend(n2rng));
356 	case DDI_DETACH:
357 		break;
358 	default:
359 		return (DDI_FAILURE);
360 	}
361 
362 	/* Destroy task queue first to insure configuration has completed */
363 	if (n2rng->n_taskq != NULL) {
364 		ddi_taskq_destroy(n2rng->n_taskq);
365 		n2rng->n_taskq = NULL;
366 	}
367 
368 	/* Untimeout pending config retry operations */
369 	mutex_enter(&n2rng->n_lock);
370 	tid = n2rng->n_timeout_id;
371 	n2rng->n_timeout_id = 0;
372 	mutex_exit(&n2rng->n_lock);
373 	if (tid) {
374 		DBG1(n2rng, DCHATTY, "n2rng_detach: untimeout pending retry "
375 		    "id = %x", tid);
376 		(void) untimeout(tid);
377 	}
378 
379 	n2rng_uninit_ctl(n2rng);
380 
381 	/* unregister with KCF---also tears down FIPS state */
382 	rv = n2rng_uninit(n2rng) ? DDI_FAILURE : DDI_SUCCESS;
383 
384 	if (ncs_hsvc_available == B_TRUE) {
385 		(void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
386 		ncs_hsvc_available = B_FALSE;
387 	}
388 
389 	mutex_destroy(&n2rng->n_lock);
390 	ddi_soft_state_free(n2rng_softstate, instance);
391 
392 	return (rv);
393 }
394 
395 /*ARGSUSED*/
396 static int
397 n2rng_suspend(n2rng_t *n2rng)
398 {
399 	/* unregister with KCF---also tears down FIPS state */
400 	if (n2rng_uninit(n2rng) != DDI_SUCCESS) {
401 		cmn_err(CE_WARN, "n2rng: unable to unregister from KCF");
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (DDI_SUCCESS);
406 }
407 
408 /*ARGSUSED*/
409 static int
410 n2rng_resume(n2rng_t *n2rng)
411 {
412 	/* Assume clock is same speed and all data structures are intact */
413 
414 	/* Re-configure the RNG hardware and register with KCF */
415 	return (n2rng_config(n2rng));
416 }
417 
418 /*
419  * Map hypervisor error code to solaris. Only
420  * H_ENORADDR, H_EBADALIGN, H_EWOULDBLOCK, and EIO
421  * are meaningful to this device. Any other error
422  * codes are mapped EINVAL.
423  */
424 int
425 n2rng_herr2kerr(uint64_t hv_errcode)
426 {
427 	int	s_errcode;
428 
429 	switch (hv_errcode) {
430 	case H_EWOULDBLOCK:
431 		s_errcode = EWOULDBLOCK;
432 		break;
433 	case H_EIO:
434 		s_errcode = EIO;
435 		break;
436 	case H_EBUSY:
437 		s_errcode = EBUSY;
438 		break;
439 	case H_EOK:
440 		s_errcode = 0;
441 		break;
442 	case H_ENOACCESS:
443 		s_errcode = EPERM;
444 		break;
445 	case H_ENORADDR:
446 	case H_EBADALIGN:
447 	default:
448 		s_errcode = EINVAL;
449 		break;
450 	}
451 	return (s_errcode);
452 }
453 
454 /*
455  * Waits approximately delay_sticks counts of the stick register.
456  * Times shorter than one sys clock tick (10ms on most systems) are
457  * done by busy waiting.
458  */
459 void
460 cyclesleep(n2rng_t *n2rng, uint64_t delay_sticks)
461 {
462 	uint64_t	end_stick = gettick() + delay_sticks;
463 	int64_t		sticks_to_wait;
464 	clock_t		sys_ticks_to_wait;
465 	clock_t		usecs_to_wait;
466 
467 	/*CONSTCOND*/
468 	while (1) {
469 		sticks_to_wait = end_stick - gettick();
470 		if (sticks_to_wait <= 0) {
471 			return;
472 		}
473 
474 		usecs_to_wait = sticks_to_wait / n2rng->n_sticks_per_usec;
475 		sys_ticks_to_wait = drv_usectohz(usecs_to_wait);
476 
477 		if (sys_ticks_to_wait > 0) {
478 			/* sleep */
479 			delay(sys_ticks_to_wait);
480 		} else if (usecs_to_wait > 0) {
481 			/* busy wait */
482 			drv_usecwait(usecs_to_wait);
483 		}
484 	}
485 }
486 
487 static void
488 log_internal_errors(uint64_t hverr, char *fname)
489 {
490 	switch (hverr) {
491 	case H_EBADALIGN:
492 		cmn_err(CE_WARN,
493 		    "n2rng: internal alignment "
494 		    "problem");
495 		break;
496 	case H_ENORADDR:
497 		cmn_err(CE_WARN, "n2rng: internal "
498 		    "invalid address");
499 		break;
500 	case H_ENOACCESS:
501 		cmn_err(CE_WARN, "n2rng: access failure");
502 		break;
503 	case H_EWOULDBLOCK:
504 		cmn_err(CE_WARN, "n2rng: hardware busy");
505 		break;
506 	default:
507 		cmn_err(CE_NOTE,
508 		    "n2rng: %s "
509 		    "unexpectedly "
510 		    "returned hverr %ld", fname, hverr);
511 		break;
512 	}
513 }
514 
515 /*
516  * Collects a buffer full of bits, using the specified setup. numbytes
517  * must be a multiple of 8. If a sub-operation fails with EIO (handle
518  * mismatch), returns EIO.  If collect_setupp is NULL, the current
519  * setup is used.  If exit_setupp is NULL, the control configuratin
520  * and state are not set at exit.  WARNING: the buffer must be 8-byte
521  * aligned and in contiguous physical addresses.  Contiguousness is
522  * not checked!
523  */
524 int
525 n2rng_collect_diag_bits(n2rng_t *n2rng, int rngid,
526     n2rng_setup_t *collect_setupp, void *buffer, int numbytes,
527     n2rng_setup_t *exit_setupp, uint64_t exitstate)
528 {
529 	int		rv;
530 	int		override_rv = 0;
531 	uint64_t	hverr;
532 	int		i;
533 	uint64_t	tdelta;
534 	n2rng_setup_t	setupbuffer[2];
535 	n2rng_setup_t	*setupcontigp;
536 	uint64_t	setupphys;
537 	int		numchunks;
538 	boolean_t	rnglooping;
539 	int		busycount = 0;
540 	int		blockcount = 0;
541 
542 	if (numbytes % sizeof (uint64_t)) {
543 		return (EINVAL);
544 	}
545 
546 	if ((uint64_t)buffer % sizeof (uint64_t) != 0) {
547 		return (EINVAL);
548 	}
549 
550 	numchunks = ((numbytes / sizeof (uint64_t)) + RNG_DIAG_CHUNK_SIZE - 1)
551 	    / RNG_DIAG_CHUNK_SIZE;
552 	/*
553 	 * Use setupbuffer[0] if it is contiguous, otherwise
554 	 * setupbuffer[1].
555 	 */
556 	setupcontigp = &setupbuffer[
557 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
558 	setupphys = va_to_pa(setupcontigp);
559 
560 	/*
561 	 * If a non-null collect_setupp pointer has been provided,
562 	 * push the specified setup into the hardware.
563 	 */
564 	if (collect_setupp != NULL) {
565 		/* copy the specified state to the aligned buffer */
566 		*setupcontigp = *collect_setupp;
567 		rnglooping = B_TRUE;
568 		while (rnglooping) {
569 			hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
570 			    CTL_STATE_HEALTHCHECK,
571 			    n2rng->n_ctl_data->n_watchdog_cycles, &tdelta);
572 			rv = n2rng_herr2kerr(hverr);
573 			switch (hverr) {
574 			case H_EOK:
575 				rnglooping = B_FALSE;
576 				break;
577 			case H_EIO: /* control yanked from us */
578 			case H_ENOACCESS: /* We are not control domain */
579 				return (rv);
580 			case H_EWOULDBLOCK:
581 				/* Data currently not available, try again */
582 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
583 					DBG1(n2rng, DHEALTH,
584 					    "n2rng_collect_diag_bits(1) : "
585 					    "exceeded block count of %d",
586 					    RNG_MAX_BLOCK_ATTEMPTS);
587 					return (rv);
588 				} else {
589 					cyclesleep(n2rng, tdelta);
590 				}
591 				break;
592 			case H_EBUSY:
593 				/*
594 				 * A control write is already in progress.
595 				 * Note: This shouldn't happen since
596 				 * n2rng_ctl_write() waits for the
597 				 * write to complete.
598 				 */
599 				if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
600 					DBG1(n2rng, DHEALTH,
601 					    "n2rng_collect_diag_bits(1): "
602 					    "exceeded busy count of %d",
603 					    RNG_MAX_BUSY_ATTEMPTS);
604 					return (rv);
605 				} else {
606 					delay(RNG_RETRY_BUSY_DELAY);
607 				}
608 				break;
609 			default:
610 				log_internal_errors(hverr, "hv_rng_ctl_write");
611 				override_rv = rv;
612 				goto restore_state;
613 			}
614 		} /* while (rnglooping) */
615 	} /* if (collect_setupp != NULL) */
616 
617 	/* If the caller asks for some bytes, collect the data */
618 	if (numbytes > 0) {
619 		for (i = 0; i < numchunks; i++) {
620 			size_t thisnumbytes = (i == numchunks - 1) ?
621 			    numbytes - i * (RNG_DIAG_CHUNK_SIZE *
622 			    sizeof (uint64_t)) :
623 			    RNG_DIAG_CHUNK_SIZE * sizeof (uint64_t);
624 
625 			/* try until we successfully read a word of data */
626 			rnglooping = B_TRUE;
627 			busycount = 0;
628 			blockcount = 0;
629 			while (rnglooping) {
630 				hverr = n2rng_data_read_diag(n2rng, rngid,
631 				    va_to_pa((uint64_t *)buffer +
632 				    RNG_DIAG_CHUNK_SIZE * i),
633 				    thisnumbytes, &tdelta);
634 				rv = n2rng_herr2kerr(hverr);
635 				switch (hverr) {
636 				case H_EOK:
637 					rnglooping = B_FALSE;
638 					break;
639 				case H_EIO:
640 				case H_ENOACCESS:
641 					return (rv);
642 				case H_EWOULDBLOCK:
643 					/* Data not available, try again */
644 					if (++blockcount >
645 					    RNG_MAX_BLOCK_ATTEMPTS) {
646 						DBG1(n2rng, DHEALTH,
647 						    "n2rng_collect_diag_bits"
648 						    "(2): exceeded block count"
649 						    " of %d",
650 						    RNG_MAX_BLOCK_ATTEMPTS);
651 						return (rv);
652 					} else {
653 						cyclesleep(n2rng, tdelta);
654 					}
655 					break;
656 				default:
657 					log_internal_errors(hverr,
658 					    "hv_rng_data_read_diag");
659 					override_rv = rv;
660 					goto restore_state;
661 				}
662 			} /* while (!rnglooping) */
663 		} /* for */
664 	}
665 
666 restore_state:
667 
668 	/* restore the preferred configuration and set exit state */
669 	if (exit_setupp != NULL) {
670 
671 		*setupcontigp = *exit_setupp;
672 		rnglooping = B_TRUE;
673 		busycount = 0;
674 		blockcount = 0;
675 		while (rnglooping) {
676 			hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
677 			    exitstate, n2rng->n_ctl_data->n_watchdog_cycles,
678 			    &tdelta);
679 			rv = n2rng_herr2kerr(hverr);
680 			switch (hverr) {
681 			case H_EOK:
682 			case H_EIO: /* control yanked from us */
683 			case H_EINVAL: /* some external error, probably */
684 			case H_ENOACCESS: /* We are not control domain */
685 				rnglooping = B_FALSE;
686 				break;
687 			case H_EWOULDBLOCK:
688 				/* Data currently not available, try again */
689 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
690 					DBG1(n2rng, DHEALTH,
691 					    "n2rng_collect_diag_bits(3): "
692 					    "exceeded block count of %d",
693 					    RNG_MAX_BLOCK_ATTEMPTS);
694 					return (rv);
695 				} else {
696 					cyclesleep(n2rng, tdelta);
697 				}
698 				break;
699 			case H_EBUSY:
700 				/*
701 				 * A control write is already in progress.
702 				 * Note: This shouldn't happen since
703 				 * n2rng_ctl_write() waits for the
704 				 * write to complete.
705 				 */
706 				if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
707 					DBG1(n2rng, DHEALTH,
708 					    "n2rng_collect_diag_bits(3): "
709 					    "exceeded busy count of %d",
710 					    RNG_MAX_BUSY_ATTEMPTS);
711 					return (rv);
712 				} else {
713 					delay(RNG_RETRY_BUSY_DELAY);
714 				}
715 				break;
716 			default:
717 				rnglooping = B_FALSE;
718 				log_internal_errors(hverr, "hv_rng_ctl_write");
719 				break;
720 			}
721 		} /* while */
722 	} /* if */
723 
724 	/*
725 	 * override_rv takes care of the case where we abort becuase
726 	 * of some error, but still want to restore the peferred state
727 	 * and return the first error, even if other error occur.
728 	 */
729 	return (override_rv ? override_rv : rv);
730 }
731 
732 int
733 n2rng_getentropy(n2rng_t *n2rng, void *buffer, size_t size)
734 {
735 	int		i, rv = 0;  /* so it works if size is zero */
736 	uint64_t	hverr;
737 	uint64_t	*buffer_w = (uint64_t *)buffer;
738 	int		num_w = size / sizeof (uint64_t);
739 	uint64_t	randval;
740 	uint64_t	randvalphys = va_to_pa(&randval);
741 	uint64_t	tdelta;
742 	int		failcount = 0;
743 	int		blockcount = 0;
744 	boolean_t	rnglooping;
745 
746 	for (i = 0; i < num_w; i++) {
747 		rnglooping = B_TRUE;
748 		while (rnglooping) {
749 			hverr = hv_rng_data_read(randvalphys, &tdelta);
750 			rv = n2rng_herr2kerr(hverr);
751 			switch (hverr) {
752 			case H_EOK:
753 				buffer_w[i] = randval;
754 				failcount = 0;
755 				rnglooping = B_FALSE;
756 				break;
757 			case H_EIO:
758 				/*
759 				 * Either a health check is in progress, or
760 				 * the watchdog timer has expired while running
761 				 * hv api version 2.0 or higher with health
762 				 * checks enabled.
763 				 */
764 				if (n2rng->n_hvapi_major_version < 2) {
765 					/*
766 					 * A health check is in progress.
767 					 * Wait RNG_RETRY_HLCHK_USECS and fail
768 					 * after RNG_MAX_DATA_READ_ATTEMPTS
769 					 * failures.
770 					 */
771 					if (++failcount >
772 					    RNG_MAX_DATA_READ_ATTEMPTS) {
773 						DBG2(n2rng, DHEALTH,
774 						    "n2rng_getentropy: exceeded"
775 						    "EIO count of %d on cpu %d",
776 						    RNG_MAX_DATA_READ_ATTEMPTS,
777 						    CPU->cpu_id);
778 						goto exitpoint;
779 					} else {
780 						delay(drv_usectohz
781 						    (RNG_RETRY_HLCHK_USECS));
782 					}
783 				} else {
784 					/*
785 					 * Just return the error. If a flurry of
786 					 * random data requests happen to occur
787 					 * during a health check, there are
788 					 * multiple levels of defense:
789 					 * - 2.0 HV provides random data pool
790 					 * - FIPS algorithm tolerates failures
791 					 * - Software failover
792 					 * - Automatic configuration retries
793 					 * - Hardware failover on some systems
794 					 */
795 					goto exitpoint;
796 				}
797 				break;
798 			case H_EWOULDBLOCK:
799 				/* Data currently not available, try again */
800 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
801 					DBG1(n2rng, DHEALTH,
802 					    "n2rng_getentropy: "
803 					    "exceeded block count of %d",
804 					    RNG_MAX_BLOCK_ATTEMPTS);
805 					goto exitpoint;
806 				} else {
807 					cyclesleep(n2rng, tdelta);
808 				}
809 				break;
810 			default:
811 				log_internal_errors(hverr, "hv_rng_data_read");
812 				goto exitpoint;
813 			}
814 		} /* while */
815 	} /* for */
816 
817 exitpoint:
818 	return (rv);
819 }
820 
821 uint64_t
822 n2rng_ctl_read(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa, uint64_t *state,
823     uint64_t *tdelta, uint64_t *wdelta)
824 {
825 	uint64_t	rv;
826 	uint64_t	wstatus;
827 
828 	/* Call correct hv function based on api version */
829 	if (n2rng->n_hvapi_major_version == 2) {
830 		rv = hv_rng_ctl_read_v2(ctlregs_pa, (uint64_t)rngid, state,
831 		    tdelta, wdelta, &wstatus);
832 		if (rv == 0) {
833 			rv = wstatus;
834 		}
835 	} else {
836 		rv = hv_rng_ctl_read(ctlregs_pa, state, tdelta);
837 		*wdelta = 0;
838 	}
839 
840 	return (rv);
841 }
842 
843 uint64_t
844 n2rng_ctl_wait(n2rng_t *n2rng, int rngid)
845 {
846 	uint64_t	state;
847 	uint64_t	tdelta;
848 	uint64_t	wdelta;
849 	uint64_t	wstatus;
850 	boolean_t	rnglooping = B_TRUE;
851 	uint64_t	rv;
852 	n2rng_setup_t	setupbuffer[2];
853 	n2rng_setup_t	*setupcontigp;
854 	uint64_t	setupphys;
855 	int		busycount = 0;
856 	int		blockcount = 0;
857 
858 	/*
859 	 * Use setupbuffer[0] if it is contiguous, otherwise
860 	 * setupbuffer[1].
861 	 */
862 	setupcontigp = &setupbuffer[
863 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
864 	setupphys = va_to_pa(setupcontigp);
865 
866 	while (rnglooping) {
867 		rv = hv_rng_ctl_read_v2(setupphys, (uint64_t)rngid, &state,
868 		    &tdelta, &wdelta, &wstatus);
869 		switch (rv) {
870 		case H_EOK:
871 			rv = wstatus;
872 			rnglooping = B_FALSE;
873 			break;
874 		case H_EWOULDBLOCK:
875 			/* Data currently not available, try again */
876 			if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
877 				DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
878 				    "exceeded block count of %d",
879 				    RNG_MAX_BLOCK_ATTEMPTS);
880 				return (rv);
881 			} else {
882 				cyclesleep(n2rng, tdelta);
883 			}
884 			break;
885 		case H_EBUSY:
886 			/* Control write still pending, try again */
887 			if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
888 				DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
889 				    "exceeded busy count of %d",
890 				    RNG_MAX_BUSY_ATTEMPTS);
891 				return (rv);
892 			} else {
893 				delay(RNG_RETRY_BUSY_DELAY);
894 			}
895 			break;
896 		default:
897 			log_internal_errors(rv, "n2rng_ctl_wait");
898 			rnglooping = B_FALSE;
899 		}
900 	} /* while (rnglooping) */
901 
902 	return (rv);
903 }
904 
905 uint64_t
906 n2rng_ctl_write(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa,
907     uint64_t newstate, uint64_t wtimeout, uint64_t *tdelta)
908 {
909 	uint64_t	rv;
910 
911 	/* Call correct hv function based on api version */
912 	if (n2rng->n_hvapi_major_version == 2) {
913 		rv = hv_rng_ctl_write_v2(ctlregs_pa, newstate, wtimeout,
914 		    (uint64_t)rngid);
915 		if (rv == H_EOK) {
916 			/* Wait for control registers to be written */
917 			rv = n2rng_ctl_wait(n2rng, rngid);
918 		}
919 		*tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
920 	} else {
921 		rv = hv_rng_ctl_write(ctlregs_pa, newstate, wtimeout, tdelta);
922 	}
923 
924 	return (rv);
925 }
926 
927 uint64_t
928 n2rng_data_read_diag(n2rng_t *n2rng, int rngid, uint64_t data_pa,
929     size_t  datalen, uint64_t *tdelta)
930 {
931 	uint64_t	rv;
932 
933 	/* Call correct hv function based on api version */
934 	if (n2rng->n_hvapi_major_version == 2) {
935 		rv = hv_rng_data_read_diag_v2(data_pa, datalen,
936 		    (uint64_t)rngid, tdelta);
937 		if (*tdelta == 0) {
938 			*tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
939 		}
940 	} else {
941 		rv = hv_rng_data_read_diag(data_pa, datalen, tdelta);
942 	}
943 
944 	return (rv);
945 }
946 
947 uint64_t
948 n2rng_check_ctl_access(n2rng_t *n2rng)
949 {
950 	uint64_t	rv;
951 	uint64_t	unused_64;
952 
953 	/* Call correct hv function based on api version */
954 	if (n2rng->n_hvapi_major_version == 2) {
955 		/*
956 		 * Attempt to read control registers with invalid ID and data
957 		 * just to see if we get an access error
958 		 */
959 		rv = hv_rng_ctl_read_v2(NULL, N2RNG_INVALID_ID,
960 		    &unused_64, &unused_64, &unused_64, &unused_64);
961 	} else {
962 		rv = hv_rng_get_diag_control();
963 	}
964 
965 	return (rv);
966 }
967 
968 /*
969  * n2rng_config_retry()
970  *
971  * Schedule a timed call to n2rng_config() if one is not already pending
972  */
973 void
974 n2rng_config_retry(n2rng_t *n2rng, clock_t seconds)
975 {
976 	mutex_enter(&n2rng->n_lock);
977 	/* Check if a config retry is already pending */
978 	if (n2rng->n_timeout_id) {
979 		DBG1(n2rng, DCFG, "n2rng_config_retry: retry pending "
980 		    "id = %x", n2rng->n_timeout_id);
981 	} else {
982 		n2rng->n_timeout_id = timeout(n2rng_config_task,
983 		    (void *)n2rng, drv_usectohz(seconds * SECOND));
984 		DBG2(n2rng, DCFG, "n2rng_config_retry: retry scheduled in "
985 		    "%d seconds, id = %x", seconds, n2rng->n_timeout_id);
986 	}
987 	mutex_exit(&n2rng->n_lock);
988 }
989 
990 static uint64_t
991 sticks_per_usec(void)
992 {
993 	uint64_t starttick = gettick();
994 	hrtime_t starttime = gethrtime();
995 	uint64_t endtick;
996 	hrtime_t endtime;
997 
998 	delay(2);
999 
1000 	endtick = gettick();
1001 	endtime = gethrtime();
1002 
1003 	return ((1000 * (endtick - starttick)) / (endtime - starttime));
1004 }
1005 
1006 static int
1007 n2rng_init_ctl(n2rng_t *n2rng)
1008 {
1009 	int		rv;
1010 	int		hverr;
1011 	rng_entry_t	*rng;
1012 	int		rngid;
1013 	int		blockcount = 0;
1014 
1015 	n2rng->n_ctl_data = NULL;
1016 
1017 	/* Attempt to gain diagnostic control */
1018 	do {
1019 		hverr = n2rng_check_ctl_access(n2rng);
1020 		rv = n2rng_herr2kerr(hverr);
1021 		if ((hverr == H_EWOULDBLOCK) &&
1022 		    (++blockcount > RNG_MAX_BUSY_ATTEMPTS)) {
1023 			DBG1(n2rng, DHEALTH, "n2rng_int_ctl: exceeded busy "
1024 			    "count of %d", RNG_MAX_BUSY_ATTEMPTS);
1025 			return (rv);
1026 		} else {
1027 			delay(RNG_RETRY_BUSY_DELAY);
1028 		}
1029 	} while (hverr == H_EWOULDBLOCK);
1030 
1031 	/*
1032 	 * If attempt fails with EPERM, the driver is not running in the
1033 	 * control domain
1034 	 */
1035 	if (rv == EPERM) {
1036 		DBG0(n2rng, DATTACH,
1037 		    "n2rng_init_ctl: Running in guest domain");
1038 		return (DDI_SUCCESS);
1039 	}
1040 
1041 	/* Allocate control stucture only used in control domain */
1042 	n2rng->n_ctl_data = kmem_alloc(sizeof (rng_ctl_data_t), KM_SLEEP);
1043 	n2rng->n_ctl_data->n_num_rngs_online = 0;
1044 
1045 	/*
1046 	 * If running with an API version less than 2.0 default to one rng.
1047 	 * Otherwise get number of rngs from device properties.
1048 	 */
1049 	if (n2rng->n_hvapi_major_version < 2) {
1050 		n2rng->n_ctl_data->n_num_rngs = 1;
1051 	} else {
1052 		n2rng->n_ctl_data->n_num_rngs =
1053 		    ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1054 		    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1055 		    N2RNG_PROP_NUM_UNITS, 0);
1056 		if (n2rng->n_ctl_data->n_num_rngs == 0) {
1057 			cmn_err(CE_WARN, "n2rng: %s property not found",
1058 			    N2RNG_PROP_NUM_UNITS);
1059 			return (DDI_FAILURE);
1060 		}
1061 	}
1062 
1063 	/* Allocate space for all rng entries */
1064 	n2rng->n_ctl_data->n_rngs =
1065 	    kmem_zalloc(n2rng->n_ctl_data->n_num_rngs *
1066 	    sizeof (rng_entry_t), KM_SLEEP);
1067 
1068 	/* Get accumulate cycles from .conf file. */
1069 	n2rng->n_ctl_data->n_accumulate_cycles =
1070 	    ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1071 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "acc_cycles",
1072 	    RNG_DEFAULT_ACCUMULATE_CYCLES);
1073 
1074 	/* Get health check frequency from .conf file */
1075 	n2rng->n_ctl_data->n_hc_secs = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1076 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "hc_seconds",
1077 	    RNG_DEFAULT_HC_SECS);
1078 
1079 	/* get fips configuration : FALSE by default */
1080 	n2rng->n_is_fips = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1081 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1082 	    N2RNG_FIPS_STRING, B_FALSE);
1083 
1084 	/* API versions prior to 2.0 do not support health checks */
1085 	if ((n2rng->n_hvapi_major_version < 2) &&
1086 	    (n2rng->n_ctl_data->n_hc_secs > 0)) {
1087 		cmn_err(CE_WARN, "n2rng: Hyperviser api "
1088 		    "version %d.%d does not support health checks",
1089 		    n2rng->n_hvapi_major_version,
1090 		    n2rng->n_hvapi_minor_version);
1091 		n2rng->n_ctl_data->n_hc_secs = 0;
1092 	}
1093 
1094 
1095 	if (n2rng->n_is_fips == B_TRUE) {
1096 		/* When in FIPs mode, run the module integrity test */
1097 		if (fips_check_module("drv/n2rng", (void *)_init) != 0) {
1098 			cmn_err(CE_WARN, "n2rng: FIPs Software Integrity Test "
1099 			    "failed\n");
1100 			return (DDI_FAILURE);
1101 		}
1102 	}
1103 
1104 	/* Calculate watchdog timeout value */
1105 	if (n2rng->n_ctl_data->n_hc_secs <= 0) {
1106 		n2rng->n_ctl_data->n_watchdog_cycles = 0;
1107 	} else {
1108 		n2rng->n_ctl_data->n_watchdog_cycles =
1109 		    ((uint64_t)(RNG_EXTRA_WATCHDOG_SECS) +
1110 		    n2rng->n_ctl_data->n_hc_secs) *
1111 		    n2rng->n_sticks_per_usec * 1000000;
1112 	}
1113 
1114 	/*
1115 	 * Set some plausible state into the preferred configuration.
1116 	 * The intent is that the health check will immediately overwrite it.
1117 	 */
1118 	for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs; rngid++) {
1119 
1120 		rng = &n2rng->n_ctl_data->n_rngs[rngid];
1121 
1122 		rng->n_preferred_config.ctlwds[0].word = 0;
1123 		rng->n_preferred_config.ctlwds[0].fields.rnc_anlg_sel =
1124 		    N2RNG_NOANALOGOUT;
1125 		rng->n_preferred_config.ctlwds[0].fields.rnc_cnt =
1126 		    RNG_DEFAULT_ACCUMULATE_CYCLES;
1127 		rng->n_preferred_config.ctlwds[0].fields.rnc_mode =
1128 		    RNG_MODE_NORMAL;
1129 		rng->n_preferred_config.ctlwds[1].word =
1130 		    rng->n_preferred_config.ctlwds[0].word;
1131 		rng->n_preferred_config.ctlwds[2].word =
1132 		    rng->n_preferred_config.ctlwds[0].word;
1133 		rng->n_preferred_config.ctlwds[3].word =
1134 		    rng->n_preferred_config.ctlwds[0].word;
1135 		rng->n_preferred_config.ctlwds[0].fields.rnc_vcoctl = 1;
1136 		rng->n_preferred_config.ctlwds[0].fields.rnc_selbits = 1;
1137 		rng->n_preferred_config.ctlwds[1].fields.rnc_vcoctl = 2;
1138 		rng->n_preferred_config.ctlwds[1].fields.rnc_selbits = 2;
1139 		rng->n_preferred_config.ctlwds[2].fields.rnc_vcoctl = 3;
1140 		rng->n_preferred_config.ctlwds[2].fields.rnc_selbits = 4;
1141 		rng->n_preferred_config.ctlwds[3].fields.rnc_vcoctl = 0;
1142 		rng->n_preferred_config.ctlwds[3].fields.rnc_selbits = 7;
1143 	}
1144 
1145 	n2rng_setcontrol(n2rng);
1146 	DBG2(n2rng, DATTACH,
1147 	    "n2rng_init_ctl: Running in control domain with %d rng device%s",
1148 	    n2rng->n_ctl_data->n_num_rngs,
1149 	    (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
1150 	DBG2(n2rng, DCFG,
1151 	    "n2rng_init_ctl: n_sticks_per_usec = %ld, n_hc_secs = %d",
1152 	    n2rng->n_sticks_per_usec,
1153 	    n2rng->n_ctl_data->n_hc_secs);
1154 	DBG2(n2rng, DCFG,
1155 	    "n2rng_init_ctl: n_watchdog_cycles = %ld, "
1156 	    "n_accumulate_cycles = %ld", n2rng->n_ctl_data->n_watchdog_cycles,
1157 	    n2rng->n_ctl_data->n_accumulate_cycles);
1158 
1159 	return (DDI_SUCCESS);
1160 }
1161 
1162 static void
1163 n2rng_uninit_ctl(n2rng_t *n2rng)
1164 {
1165 	if (n2rng->n_ctl_data) {
1166 		if (n2rng->n_ctl_data->n_num_rngs) {
1167 			kmem_free(n2rng->n_ctl_data->n_rngs,
1168 			    n2rng->n_ctl_data->n_num_rngs *
1169 			    sizeof (rng_entry_t));
1170 			n2rng->n_ctl_data->n_rngs = NULL;
1171 			n2rng->n_ctl_data->n_num_rngs = 0;
1172 		}
1173 		kmem_free(n2rng->n_ctl_data, sizeof (rng_ctl_data_t));
1174 		n2rng->n_ctl_data = NULL;
1175 	}
1176 }
1177 
1178 
1179 /*
1180  * n2rng_config_test()
1181  *
1182  * Attempt read random data to see if the rng is configured.
1183  */
1184 int
1185 n2rng_config_test(n2rng_t *n2rng)
1186 {
1187 	int		rv = 0;
1188 	uint64_t	hverr;
1189 	uint64_t	randval = 0;
1190 	uint64_t	randvalphys = va_to_pa(&randval);
1191 	uint64_t	tdelta;
1192 	int		failcount = 0;
1193 	int		blockcount = 0;
1194 	boolean_t	rnglooping = B_TRUE;
1195 
1196 	while (rnglooping) {
1197 		hverr = hv_rng_data_read(randvalphys, &tdelta);
1198 		rv = n2rng_herr2kerr(hverr);
1199 		switch (hverr) {
1200 		case H_EOK:
1201 			failcount = 0;
1202 			rnglooping = B_FALSE;
1203 			break;
1204 		case H_EIO:
1205 			/*
1206 			 * A health check is in progress.
1207 			 * Wait RNG_RETRY_HLCHK_USECS and fail
1208 			 * after RNG_MAX_DATA_READ_ATTEMPTS
1209 			 * failures.
1210 			 */
1211 			if (++failcount > RNG_MAX_DATA_READ_ATTEMPTS) {
1212 				goto exitpoint;
1213 			} else {
1214 				delay(drv_usectohz(RNG_RETRY_HLCHK_USECS));
1215 			}
1216 			break;
1217 		case H_EWOULDBLOCK:
1218 			/* Data currently not available, try again */
1219 			if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
1220 				DBG1(n2rng, DHEALTH, "n2rng_config_test: "
1221 				    "exceeded block count of %d",
1222 				    RNG_MAX_BLOCK_ATTEMPTS);
1223 				goto exitpoint;
1224 			} else {
1225 				cyclesleep(n2rng, tdelta);
1226 			}
1227 			break;
1228 		case H_ENOACCESS:
1229 			/* An rng error has occured during health check */
1230 			goto exitpoint;
1231 		default:
1232 			log_internal_errors(hverr, "hv_rng_data_read");
1233 			goto exitpoint;
1234 		}
1235 	} /* while */
1236 
1237 exitpoint:
1238 	return (rv);
1239 }
1240 
1241 /*
1242  * n2rng_config()
1243  *
1244  * Run health check on the RNG hardware
1245  * Configure the RNG hardware
1246  * Register with crypto framework
1247  */
1248 static int
1249 n2rng_config(n2rng_t *n2rng)
1250 {
1251 	int		rv;
1252 	rng_entry_t	*rng;
1253 	int		rngid;
1254 
1255 	/*
1256 	 * Run health checks and configure rngs if running in control domain,
1257 	 * otherwise just check if at least one rng is available.
1258 	 */
1259 	if (n2rng_iscontrol(n2rng)) {
1260 
1261 		for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs;
1262 		    rngid++) {
1263 
1264 			rng = &n2rng->n_ctl_data->n_rngs[rngid];
1265 
1266 			/* Only test rngs that have not already failed */
1267 			if (rng->n_rng_state == CTL_STATE_ERROR) {
1268 				continue;
1269 			}
1270 
1271 			if ((n2rng->n_binding == N2RNG_CPU_VF) &&
1272 			    (n2rng->n_hvapi_major_version < 2)) {
1273 				/*
1274 				 * Since api versions prior to 2.0 do not
1275 				 * support multiple rngs, bind to the current
1276 				 * processor for the entire health check
1277 				 * process.
1278 				 */
1279 				thread_affinity_set(curthread, CPU_CURRENT);
1280 				DBG1(n2rng, DCFG, "n2rng_config: "
1281 				    "Configuring single rng from cpu %d",
1282 				    CPU->cpu_id);
1283 				rv = n2rng_do_health_check(n2rng, rngid);
1284 				thread_affinity_clear(curthread);
1285 			} else {
1286 				rv = n2rng_do_health_check(n2rng, rngid);
1287 			}
1288 
1289 			switch (rv) {
1290 			case 0:
1291 				/*
1292 				 * Successful, increment online count if
1293 				 * necessary
1294 				 */
1295 				DBG1(n2rng, DCFG, "n2rng_config: rng(%d) "
1296 				    "passed health checks", rngid);
1297 				if (rng->n_rng_state != CTL_STATE_CONFIGURED) {
1298 					rng->n_rng_state =
1299 					    CTL_STATE_CONFIGURED;
1300 					n2rng->n_ctl_data->n_num_rngs_online++;
1301 				}
1302 				break;
1303 			default:
1304 				/*
1305 				 * Health checks failed, decrement online
1306 				 * count if necessary
1307 				 */
1308 				cmn_err(CE_WARN, "n2rng: rng(%d) "
1309 				    "failed health checks", rngid);
1310 				if (rng->n_rng_state == CTL_STATE_CONFIGURED) {
1311 					n2rng->n_ctl_data->n_num_rngs_online--;
1312 				}
1313 				rng->n_rng_state = CTL_STATE_ERROR;
1314 				break;
1315 			}
1316 		}
1317 		DBG2(n2rng, DCFG, "n2rng_config: %d rng%s online",
1318 		    n2rng->n_ctl_data->n_num_rngs_online,
1319 		    (n2rng->n_ctl_data->n_num_rngs_online == 1) ? "" : "s");
1320 
1321 		/* Check if all rngs have failed */
1322 		if (n2rng->n_ctl_data->n_num_rngs_online == 0) {
1323 			cmn_err(CE_WARN, "n2rng: %d RNG device%s failed",
1324 			    n2rng->n_ctl_data->n_num_rngs,
1325 			    (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
1326 			goto errorexit;
1327 		} else {
1328 			n2rng_setconfigured(n2rng);
1329 		}
1330 	} else {
1331 		/* Running in guest domain, just check if rng is configured */
1332 		rv = n2rng_config_test(n2rng);
1333 		switch (rv) {
1334 		case 0:
1335 			n2rng_setconfigured(n2rng);
1336 			break;
1337 		case EIO:
1338 			/* Don't set configured to force a retry */
1339 			break;
1340 		default:
1341 			goto errorexit;
1342 		}
1343 	}
1344 
1345 	/*
1346 	 * Initialize FIPS state and register with KCF if we have at least one
1347 	 * RNG configured.  Otherwise schedule a retry if all rngs have not
1348 	 * failed.
1349 	 */
1350 	if (n2rng_isconfigured(n2rng)) {
1351 
1352 		if (n2rng_init(n2rng) != DDI_SUCCESS) {
1353 			cmn_err(CE_WARN, "n2rng: unable to register with KCF");
1354 			goto errorexit;
1355 		}
1356 
1357 		/*
1358 		 * Schedule a retry if running in the control domain and a
1359 		 * health check time has been specified.
1360 		 */
1361 		if (n2rng_iscontrol(n2rng) &&
1362 		    (n2rng->n_ctl_data->n_hc_secs > 0)) {
1363 			n2rng_config_retry(n2rng,
1364 			    n2rng->n_ctl_data->n_hc_secs);
1365 		}
1366 	} else if (!n2rng_isfailed(n2rng)) {
1367 		/* Schedule a retry if one is not already pending */
1368 		n2rng_config_retry(n2rng, RNG_CFG_RETRY_SECS);
1369 	}
1370 	return (DDI_SUCCESS);
1371 
1372 errorexit:
1373 	/* Unregister from kCF if we are registered */
1374 	(void) n2rng_unregister_provider(n2rng);
1375 	n2rng_setfailed(n2rng);
1376 	cmn_err(CE_WARN, "n2rng: hardware failure detected");
1377 	return (DDI_FAILURE);
1378 }
1379 
1380 /*
1381  * n2rng_config_task()
1382  *
1383  * Call n2rng_config() from the task queue or after a timeout, ignore result.
1384  */
1385 static void
1386 n2rng_config_task(void *targ)
1387 {
1388 	n2rng_t *n2rng = (n2rng_t *)targ;
1389 
1390 	mutex_enter(&n2rng->n_lock);
1391 	n2rng->n_timeout_id = 0;
1392 	mutex_exit(&n2rng->n_lock);
1393 	(void) n2rng_config(n2rng);
1394 }
1395