xref: /titanic_50/usr/src/uts/sun4v/io/n2rng/n2rng.c (revision 29e83d4b25fd82feb8e0e0fbe89f7e2a8438533d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Niagara 2 Random Number Generator (RNG) driver
30  */
31 
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/devops.h>
37 #include <sys/cmn_err.h>
38 #include <sys/ksynch.h>
39 #include <sys/kmem.h>
40 #include <sys/stat.h>
41 #include <sys/open.h>
42 #include <sys/file.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/param.h>
46 #include <sys/cpuvar.h>
47 #include <sys/disp.h>
48 #include <sys/hsvc.h>
49 #include <sys/machsystm.h>
50 #include <sys/hypervisor_api.h>
51 #include <sys/n2rng.h>
52 
53 static int	n2rng_attach(dev_info_t *, ddi_attach_cmd_t);
54 static int	n2rng_detach(dev_info_t *, ddi_detach_cmd_t);
55 static int	n2rng_suspend(n2rng_t *);
56 static int	n2rng_resume(n2rng_t *);
57 int	n2rng_herr2kerr(uint64_t);
58 int	n2rng_logic_test(n2rng_t *);
59 int	n2rng_noise_gen_test_set(void);
60 int	n2rng_init(n2rng_t *n2rng);
61 int	n2rng_uninit(n2rng_t *n2rng);
62 
63 static uint64_t sticks_per_usec(void);
64 u_longlong_t gettick(void);
65 
66 static void n2rng_config_task(void * targ);
67 
68 /*
69  * Device operations.
70  */
71 
72 static struct dev_ops devops = {
73 	DEVO_REV,		/* devo_rev */
74 	0,			/* devo_refcnt */
75 	nodev,			/* devo_getinfo */
76 	nulldev,		/* devo_identify */
77 	nulldev,		/* devo_probe */
78 	n2rng_attach,		/* devo_attach */
79 	n2rng_detach,		/* devo_detach */
80 	nodev,			/* devo_reset */
81 	NULL,			/* devo_cb_ops */
82 	NULL,			/* devo_bus_ops */
83 	ddi_power		/* devo_power */
84 };
85 
86 /*
87  * Module linkage.
88  */
89 static struct modldrv modldrv = {
90 	&mod_driverops,			/* drv_modops */
91 	"N2 RNG Driver v%I%",		/* drv_linkinfo */
92 	&devops,			/* drv_dev_ops */
93 };
94 
95 static struct modlinkage modlinkage = {
96 	MODREV_1,			/* ml_rev */
97 	&modldrv,			/* ml_linkage */
98 	NULL
99 };
100 
101 /*
102  * Driver globals Soft state.
103  */
104 static void	*n2rng_softstate = NULL;
105 
106 /*
107  * Hypervisor RNG information.
108  */
109 static uint64_t	rng_min_ver;	/* negotiated RNG API minor version */
110 static boolean_t rng_hsvc_available = B_FALSE;
111 
112 static hsvc_info_t rng_hsvc = {
113 	HSVC_REV_1, NULL, HSVC_GROUP_RNG, RNG_MAJOR_VER,
114 	RNG_MINOR_VER, "n2rng"
115 };
116 
117 /*
118  * DDI entry points.
119  */
120 int
121 _init(void)
122 {
123 	int	rv;
124 
125 	rv = ddi_soft_state_init(&n2rng_softstate, sizeof (n2rng_t), 1);
126 	if (rv != 0) {
127 		/* this should *never* happen! */
128 		return (rv);
129 	}
130 
131 	if ((rv = mod_install(&modlinkage)) != 0) {
132 		/* cleanup here */
133 		ddi_soft_state_fini(&n2rng_softstate);
134 		return (rv);
135 	}
136 
137 	return (0);
138 }
139 
140 int
141 _fini(void)
142 {
143 	int	rv;
144 
145 	rv = mod_remove(&modlinkage);
146 	if (rv == 0) {
147 		/* cleanup here */
148 		ddi_soft_state_fini(&n2rng_softstate);
149 	}
150 
151 	return (rv);
152 }
153 
154 int
155 _info(struct modinfo *modinfop)
156 {
157 	return (mod_info(&modlinkage, modinfop));
158 }
159 
160 static int
161 n2rng_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
162 {
163 	n2rng_t		*n2rng = NULL;
164 	int		instance;
165 	int		rv;
166 
167 	instance = ddi_get_instance(dip);
168 	DBG1(NULL, DATTACH, "n2rng_attach called, instance %d", instance);
169 	/*
170 	 * Only instance 0 of n2rng driver is allowed.
171 	 */
172 	if (instance != 0) {
173 		n2rng_diperror(dip, "only one instance (0) allowed");
174 		return (DDI_FAILURE);
175 	}
176 
177 	switch (cmd) {
178 	case DDI_RESUME:
179 		n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate,
180 		    instance);
181 		if (n2rng == NULL) {
182 			n2rng_diperror(dip, "no soft state in attach");
183 			return (DDI_FAILURE);
184 		}
185 		return (n2rng_resume(n2rng));
186 
187 	case DDI_ATTACH:
188 		break;
189 	default:
190 		return (DDI_FAILURE);
191 	}
192 
193 	rv = ddi_soft_state_zalloc(n2rng_softstate, instance);
194 	if (rv != DDI_SUCCESS) {
195 		n2rng_diperror(dip, "unable to allocate soft state");
196 		return (DDI_FAILURE);
197 	}
198 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
199 	ASSERT(n2rng != NULL);
200 	n2rng->n_dip = dip;
201 
202 	mutex_init(&n2rng->n_health_check_mutex, NULL, MUTEX_DRIVER, NULL);
203 
204 	if ((rv = hsvc_register(&rng_hsvc, &rng_min_ver)) != 0) {
205 		cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
206 		    "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d",
207 		    rng_hsvc.hsvc_modname, rng_hsvc.hsvc_group,
208 		    rng_hsvc.hsvc_major, rng_hsvc.hsvc_minor, rv);
209 		ddi_soft_state_free(n2rng_softstate, instance);
210 		mutex_destroy(&n2rng->n_health_check_mutex);
211 		return (DDI_FAILURE);
212 	}
213 	rng_hsvc_available = B_TRUE;
214 
215 	/* Allocate single thread task queue for rng diags and registration */
216 	n2rng->n_taskq = ddi_taskq_create(dip, "n2rng_taskq", 1,
217 	    TASKQ_DEFAULTPRI, 0);
218 
219 	if (n2rng->n_taskq == NULL) {
220 		n2rng_diperror(dip, "ddi_taskq_create() failed");
221 		goto errorexit;
222 	}
223 
224 	/* No locking, but it is okay */
225 	n2rng->n_sticks_per_usec = sticks_per_usec();
226 	/*
227 	 * The first product will likely be around 4 billion, so we
228 	 * use uint64_t to avoid integer overflow.
229 	 */
230 	n2rng->n_anlg_settle_cycles = (uint64_t)RNG_CTL_SETTLE_NS *
231 	    n2rng->n_sticks_per_usec / 1000;
232 
233 	/*
234 	 * Set some plausible state into the preferred
235 	 * configuration. The intent is that the health check, below,
236 	 * will immediately overwrite it.  If we are not in a control
237 	 * domain, this stuff will have no effect.
238 	 */
239 	n2rng->n_preferred_config.ctlwds[0].word = 0;
240 	n2rng->n_preferred_config.ctlwds[0].fields.rnc_anlg_sel =
241 	    N2RNG_NOANALOGOUT;
242 	n2rng->n_preferred_config.ctlwds[0].fields.rnc_cnt =
243 	    RNG_DEFAULT_ACCUMULATE_CYCLES;
244 	n2rng->n_preferred_config.ctlwds[0].fields.rnc_mode =
245 	    RNG_MODE_NORMAL;
246 	n2rng->n_preferred_config.ctlwds[1].word =
247 	    n2rng->n_preferred_config.ctlwds[0].word;
248 	n2rng->n_preferred_config.ctlwds[2].word =
249 	    n2rng->n_preferred_config.ctlwds[0].word;
250 	n2rng->n_preferred_config.ctlwds[3].word =
251 	    n2rng->n_preferred_config.ctlwds[0].word;
252 	n2rng->n_preferred_config.ctlwds[0].fields.rnc_vcoctl = 1;
253 	n2rng->n_preferred_config.ctlwds[0].fields.rnc_selbits = 1;
254 	n2rng->n_preferred_config.ctlwds[1].fields.rnc_vcoctl = 2;
255 	n2rng->n_preferred_config.ctlwds[1].fields.rnc_selbits = 2;
256 	n2rng->n_preferred_config.ctlwds[2].fields.rnc_vcoctl = 3;
257 	n2rng->n_preferred_config.ctlwds[2].fields.rnc_selbits = 4;
258 	n2rng->n_preferred_config.ctlwds[3].fields.rnc_vcoctl = 0;
259 	n2rng->n_preferred_config.ctlwds[3].fields.rnc_selbits = 7;
260 
261 	/* Dispatch task to configure the RNG and register with KCF */
262 	if (ddi_taskq_dispatch(n2rng->n_taskq, n2rng_config_task,
263 	    (void *)n2rng, DDI_SLEEP) != DDI_SUCCESS) {
264 		n2rng_diperror(dip, "ddi_taskq_dispatch() failed");
265 		goto errorexit;
266 	}
267 
268 	return (DDI_SUCCESS);
269 
270 errorexit:
271 	if (rng_hsvc_available == B_TRUE) {
272 		(void) hsvc_unregister(&rng_hsvc);
273 		rng_hsvc_available = B_FALSE;
274 	}
275 
276 	if (n2rng->n_taskq != NULL) {
277 		ddi_taskq_destroy(n2rng->n_taskq);
278 		n2rng->n_taskq = NULL;
279 	}
280 
281 	mutex_destroy(&n2rng->n_health_check_mutex);
282 	ddi_soft_state_free(n2rng_softstate, instance);
283 
284 	return (DDI_FAILURE);
285 }
286 
287 static int
288 n2rng_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
289 {
290 	int		instance;
291 	int		rv;
292 	n2rng_t		*n2rng;
293 
294 	instance = ddi_get_instance(dip);
295 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
296 	if (n2rng == NULL) {
297 		n2rng_diperror(dip, "no soft state in detach");
298 		return (DDI_FAILURE);
299 	}
300 
301 	switch (cmd) {
302 	case DDI_SUSPEND:
303 		return (n2rng_suspend(n2rng));
304 	case DDI_DETACH:
305 		break;
306 	default:
307 		return (DDI_FAILURE);
308 	}
309 
310 	/* unregister with KCF---also tears down FIPS state */
311 	rv = n2rng_uninit(n2rng) ? DDI_FAILURE : DDI_SUCCESS;
312 
313 	if (n2rng->n_taskq != NULL) {
314 		ddi_taskq_destroy(n2rng->n_taskq);
315 		n2rng->n_taskq = NULL;
316 	}
317 
318 	if (rng_hsvc_available == B_TRUE) {
319 		(void) hsvc_unregister(&rng_hsvc);
320 		rng_hsvc_available = B_FALSE;
321 	}
322 
323 	mutex_destroy(&n2rng->n_health_check_mutex);
324 
325 	ddi_soft_state_free(n2rng_softstate, instance);
326 
327 	return (rv);
328 }
329 
330 /*ARGSUSED*/
331 static int
332 n2rng_suspend(n2rng_t *n2rng)
333 {
334 	return (DDI_SUCCESS);
335 }
336 
337 /*ARGSUSED*/
338 static int
339 n2rng_resume(n2rng_t *n2rng)
340 {
341 	int		rv;
342 
343 	/* assume clock is same speed, all data structures intact.  */
344 	rv = n2rng_do_health_check(n2rng);
345 	switch (rv) {
346 	case 0:
347 	case EPERM:
348 		break;
349 	default:
350 		cmn_err(CE_WARN, "n2rng: n2rng_resume: health check failed. "
351 		    "Unregistering from encryption framework");
352 		n2rng->n_flags |= N2RNG_FAILED;
353 		(void) n2rng_uninit(n2rng);
354 		break;
355 	}
356 
357 	return (DDI_SUCCESS);
358 }
359 
360 /*
361  * Map hypervisor error code to solaris. Only
362  * H_ENORADDR, H_EBADALIGN, H_EWOULDBLOCK, and EIO
363  * are meaningful to this device. Any other error
364  * codes are mapped EINVAL.
365  */
366 int
367 n2rng_herr2kerr(uint64_t hv_errcode)
368 {
369 	int	s_errcode;
370 
371 	switch (hv_errcode) {
372 	case H_EWOULDBLOCK:
373 		s_errcode = EWOULDBLOCK;
374 		break;
375 	case H_ENORADDR:
376 	case H_EBADALIGN:
377 	case H_EIO:
378 		s_errcode = EIO;
379 		break;
380 	case H_EOK:
381 		s_errcode = 0;
382 		break;
383 	case H_ENOACCESS:
384 		s_errcode = EPERM;
385 		break;
386 	default:
387 		s_errcode = EINVAL;
388 		break;
389 	}
390 	return (s_errcode);
391 }
392 
393 /*
394  * Waits approximately delay_sticks counts of the stick register.
395  * Times shorter than one sys clock tick (10ms on most systems) are
396  * done by busy waiting.
397  */
398 void
399 cyclesleep(n2rng_t *n2rng, uint64_t delay_sticks)
400 {
401 	uint64_t	end_stick = gettick() + delay_sticks;
402 	int64_t		sticks_to_wait;
403 	clock_t		sys_ticks_to_wait;
404 	clock_t		usecs_to_wait;
405 
406 	/*CONSTCOND*/
407 	while (1) {
408 		sticks_to_wait = end_stick - gettick();
409 		if (sticks_to_wait <= 0) {
410 			return;
411 		}
412 
413 		usecs_to_wait = sticks_to_wait / n2rng->n_sticks_per_usec;
414 		sys_ticks_to_wait = drv_usectohz(usecs_to_wait);
415 
416 		if (sys_ticks_to_wait > 0) {
417 			/* sleep */
418 			delay(sys_ticks_to_wait);
419 		} else if (usecs_to_wait > 0) {
420 			/* busy wait */
421 			drv_usecwait(usecs_to_wait);
422 		}
423 	}
424 }
425 
426 static void
427 log_internal_errors(uint64_t hverr, char *fname)
428 {
429 	switch (hverr) {
430 	case H_EBADALIGN:
431 		cmn_err(CE_WARN,
432 		    "n2rng: internal alignment "
433 		    "problem");
434 		break;
435 	case H_ENORADDR:
436 		cmn_err(CE_WARN, "n2rng: internal "
437 		    "invalid address");
438 		break;
439 	default:
440 		cmn_err(CE_NOTE,
441 		    "n2rng: %s "
442 		    "unexpectedly "
443 		    "returned hverr %ld", fname, hverr);
444 		break;
445 	}
446 }
447 
448 /*
449  * Collects a buffer full of bits, using the specified setup. numbytes
450  * must be a multiple of 8. If a sub-operation fails with EIO (handle
451  * mismatch), returns EIO.  If collect_setupp is NULL, the current
452  * setup is used.  If exit_setupp is NULL, the control configuratin
453  * and state are not set at exit.  WARNING: the buffer must be 8-byte
454  * aligned and in contiguous physical addresses.  Contiguousness is
455  * not checked!
456  */
457 int
458 n2rng_collect_diag_bits(n2rng_t *n2rng, n2rng_setup_t *collect_setupp,
459     void *buffer, int numbytes, n2rng_setup_t *exit_setupp,
460     uint64_t exitstate)
461 {
462 	int		rv;
463 	int		override_rv = 0;
464 	uint64_t	hverr;
465 	int		i;
466 	uint64_t	tdelta;
467 	n2rng_setup_t	setupbuffer[2];
468 	n2rng_setup_t	*setupcontigp;
469 	uint64_t	setupphys;
470 	int		numchunks;
471 	boolean_t	rnglooping;
472 
473 	if (numbytes % sizeof (uint64_t)) {
474 		return (EINVAL);
475 	}
476 
477 	if ((uint64_t)buffer % sizeof (uint64_t) != 0) {
478 		return (EINVAL);
479 	}
480 
481 	numchunks = ((numbytes / sizeof (uint64_t)) + RNG_DIAG_CHUNK_SIZE - 1)
482 	    / RNG_DIAG_CHUNK_SIZE;
483 	/*
484 	 * Use setupbuffer[0] if it is contiguous, otherwise
485 	 * setupbuffer[1].
486 	 */
487 	setupcontigp = &setupbuffer[
488 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
489 	setupphys = va_to_pa(setupcontigp);
490 
491 	/*
492 	 * If a non-null collect_setupp pointer has been provided,
493 	 * push the specified setup into the hardware.
494 	 */
495 	if (collect_setupp != NULL) {
496 		/* copy the specified state to the aligned buffer */
497 		*setupcontigp = *collect_setupp;
498 		rnglooping = B_TRUE;
499 		while (rnglooping) {
500 			hverr = hv_rng_ctl_write(setupphys,
501 			    CTL_STATE_HEALTHCHECK,
502 			    n2rng->n_anlg_settle_cycles, &tdelta);
503 			rv = n2rng_herr2kerr(hverr);
504 			switch (hverr) {
505 			case 0:
506 				rnglooping = B_FALSE;
507 				break;
508 			case H_EIO: /* control yanked from us */
509 			case H_ENOACCESS: /* We are not control domain */
510 				return (rv);
511 			case H_EWOULDBLOCK:
512 				cyclesleep(n2rng, tdelta);
513 				break;
514 			default:
515 				log_internal_errors(hverr, "hv_rng_ctl_write");
516 				override_rv = rv;
517 				goto restore_state;
518 			}
519 		} /* while (rnglooping) */
520 	} /* if (collect_setupp != NULL) */
521 
522 	/* If the caller asks for some bytes, collect the data */
523 	if (numbytes > 0) {
524 		for (i = 0; i < numchunks; i++) {
525 			size_t thisnumbytes = (i == numchunks - 1) ?
526 			    numbytes - i * (RNG_DIAG_CHUNK_SIZE *
527 			    sizeof (uint64_t)) :
528 			    RNG_DIAG_CHUNK_SIZE * sizeof (uint64_t);
529 			/* try until we successfully read a word of data */
530 			rnglooping = B_TRUE;
531 			while (rnglooping) {
532 				hverr = hv_rng_data_read_diag(
533 				    va_to_pa((uint64_t *)buffer +
534 				    RNG_DIAG_CHUNK_SIZE * i),
535 				    thisnumbytes, &tdelta);
536 				rv = n2rng_herr2kerr(hverr);
537 				switch (hverr) {
538 				case 0:
539 					rnglooping = B_FALSE;
540 					break;
541 				case H_EIO:
542 				case H_ENOACCESS:
543 					return (rv);
544 				case H_EWOULDBLOCK:
545 					cyclesleep(n2rng, tdelta);
546 					break;
547 				default:
548 					log_internal_errors(hverr,
549 					    "hv_rng_data_read_diag");
550 					override_rv = rv;
551 					goto restore_state;
552 				}
553 			} /* while (!rnglooping) */
554 		} /* for */
555 	} /* if */
556 
557 restore_state:
558 
559 	/* restore the preferred configuration and set exit state */
560 	if (exit_setupp != NULL) {
561 
562 		*setupcontigp = *exit_setupp;
563 		rnglooping = B_TRUE;
564 		while (rnglooping) {
565 			hverr = hv_rng_ctl_write(setupphys, exitstate,
566 			    n2rng->n_anlg_settle_cycles, &tdelta);
567 			rv = n2rng_herr2kerr(hverr);
568 			switch (hverr) {
569 			case 0:
570 			case H_EIO: /* control yanked from us */
571 			case H_EINVAL: /* some external error, probably */
572 			case H_ENOACCESS: /* We are not control domain */
573 				rnglooping = B_FALSE;
574 				break;
575 			case H_EWOULDBLOCK:
576 				cyclesleep(n2rng, tdelta);
577 				break;
578 
579 			default:
580 				rnglooping = B_FALSE;
581 				log_internal_errors(hverr, "hv_rng_ctl_write");
582 				break;
583 			}
584 		} /* while */
585 	} /* if */
586 
587 	/*
588 	 * override_rv takes care of the case where we abort becuase
589 	 * of some error, but still want to restore the peferred state
590 	 * and return the first error, even if other error occur.
591 	 */
592 	return (override_rv ? override_rv : rv);
593 }
594 
595 int
596 n2rng_getentropy(n2rng_t *n2rng, void *buffer, size_t size)
597 {
598 	int		i, rv = 0;  /* so it works if size is zero */
599 	uint64_t	hverr;
600 	uint64_t	*buffer_w = (uint64_t *)buffer;
601 	int		num_w = size / sizeof (uint64_t);
602 	uint64_t	randval;
603 	uint64_t	randvalphys = va_to_pa(&randval);
604 	uint64_t	tdelta;
605 	int		failcount = 0;
606 	boolean_t	rnglooping;
607 
608 	for (i = 0; i < num_w; i++) {
609 		rnglooping = B_TRUE;
610 		while (rnglooping) {
611 			hverr = hv_rng_data_read(randvalphys, &tdelta);
612 			rv = n2rng_herr2kerr(hverr);
613 			switch (hverr) {
614 			case H_EOK:
615 				buffer_w[i] = randval;
616 				failcount = 0;
617 				rnglooping = B_FALSE;
618 				break;
619 			case H_EIO:
620 				/*
621 				 * A health check is in progress.
622 				 * Wait RNG_RETRY_HLCHK_USECS and fail
623 				 * after RNG_MAX_DATA_READ_ATTEMPTS
624 				 * failures.
625 				 */
626 				if (++failcount > RNG_MAX_DATA_READ_ATTEMPTS) {
627 					goto exitpoint;
628 				} else {
629 					delay(drv_usectohz(
630 					    RNG_RETRY_HLCHK_USECS));
631 				}
632 				break;
633 			case H_EWOULDBLOCK:
634 				cyclesleep(n2rng, tdelta);
635 				break;
636 			default:
637 				log_internal_errors(hverr, "hv_rng_data_read");
638 				goto exitpoint;
639 			}
640 		} /* while */
641 	} /* for */
642 
643 exitpoint:
644 
645 	return (rv);
646 }
647 
648 static uint64_t
649 sticks_per_usec(void)
650 {
651 	uint64_t starttick = gettick();
652 	hrtime_t starttime = gethrtime();
653 	uint64_t endtick;
654 	hrtime_t endtime;
655 
656 	delay(2);
657 
658 	endtick = gettick();
659 	endtime = gethrtime();
660 
661 	return ((1000 * (endtick - starttick)) / (endtime - starttime));
662 }
663 
664 /*
665  * n2rng_config_task()
666  *
667  * Runs health checks on the RNG hardware
668  * Configures the RNG hardware
669  * Registers with crypto framework if successful.
670  */
671 static void
672 n2rng_config_task(void * targ)
673 {
674 	int		rv;
675 	n2rng_t		*n2rng = (n2rng_t *)targ;
676 
677 	thread_affinity_set(curthread, CPU_CURRENT);
678 	rv = n2rng_do_health_check(n2rng);
679 	thread_affinity_clear(curthread);
680 
681 	switch (rv) {
682 	case 0:
683 		/* We are a control domain.  Success. */
684 		break;
685 	case EPERM:
686 		/* We must not be a control domain, declare success. */
687 		rv = 0;
688 		break;
689 	default:
690 		goto errorexit;
691 	}
692 
693 	/* Register with KCF and initialize FIPS state */
694 	rv = n2rng_init(n2rng);
695 	if (rv != DDI_SUCCESS) {
696 		goto errorexit;
697 	}
698 
699 	n2rng->n_flags &= ~N2RNG_FAILED;
700 	return;
701 
702 errorexit:
703 	cmn_err(CE_WARN, "n2rng_config_task: RNG configuration failed");
704 	n2rng->n_flags |= N2RNG_FAILED;
705 }
706