1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Niagara 2 Random Number Generator (RNG) driver 30 */ 31 32 #include <sys/types.h> 33 #include <sys/sysmacros.h> 34 #include <sys/modctl.h> 35 #include <sys/conf.h> 36 #include <sys/devops.h> 37 #include <sys/cmn_err.h> 38 #include <sys/ksynch.h> 39 #include <sys/kmem.h> 40 #include <sys/stat.h> 41 #include <sys/open.h> 42 #include <sys/file.h> 43 #include <sys/ddi.h> 44 #include <sys/sunddi.h> 45 #include <sys/param.h> 46 #include <sys/cpuvar.h> 47 #include <sys/disp.h> 48 #include <sys/hsvc.h> 49 #include <sys/machsystm.h> 50 #include <sys/hypervisor_api.h> 51 #include <sys/n2rng.h> 52 53 static int n2rng_attach(dev_info_t *, ddi_attach_cmd_t); 54 static int n2rng_detach(dev_info_t *, ddi_detach_cmd_t); 55 static int n2rng_suspend(n2rng_t *); 56 static int n2rng_resume(n2rng_t *); 57 static uint64_t sticks_per_usec(void); 58 u_longlong_t gettick(void); 59 static int n2rng_init_ctl(n2rng_t *); 60 static void n2rng_uninit_ctl(n2rng_t *); 61 static int n2rng_config(n2rng_t *); 62 static void n2rng_config_task(void * targ); 63 64 /* 65 * Device operations. 66 */ 67 68 static struct dev_ops devops = { 69 DEVO_REV, /* devo_rev */ 70 0, /* devo_refcnt */ 71 nodev, /* devo_getinfo */ 72 nulldev, /* devo_identify */ 73 nulldev, /* devo_probe */ 74 n2rng_attach, /* devo_attach */ 75 n2rng_detach, /* devo_detach */ 76 nodev, /* devo_reset */ 77 NULL, /* devo_cb_ops */ 78 NULL, /* devo_bus_ops */ 79 ddi_power /* devo_power */ 80 }; 81 82 /* 83 * Module linkage. 84 */ 85 static struct modldrv modldrv = { 86 &mod_driverops, /* drv_modops */ 87 "N2 RNG Driver v%I%", /* drv_linkinfo */ 88 &devops, /* drv_dev_ops */ 89 }; 90 91 static struct modlinkage modlinkage = { 92 MODREV_1, /* ml_rev */ 93 &modldrv, /* ml_linkage */ 94 NULL 95 }; 96 97 /* 98 * Driver globals Soft state. 99 */ 100 static void *n2rng_softstate = NULL; 101 102 /* 103 * Hypervisor NCS services information. 104 */ 105 static boolean_t ncs_hsvc_available = B_FALSE; 106 107 #define NVERSIONS 2 108 109 /* 110 * HV API versions supported by this driver. 111 */ 112 static hsvc_info_t ncs_hsvc[NVERSIONS] = { 113 { HSVC_REV_1, NULL, HSVC_GROUP_RNG, 2, 0, DRIVER }, /* v2.0 */ 114 { HSVC_REV_1, NULL, HSVC_GROUP_RNG, 1, 0, DRIVER }, /* v1.0 */ 115 }; 116 int ncs_version_index; /* index into ncs_hsvc[] */ 117 118 /* 119 * DDI entry points. 120 */ 121 int 122 _init(void) 123 { 124 int rv; 125 126 rv = ddi_soft_state_init(&n2rng_softstate, sizeof (n2rng_t), 1); 127 if (rv != 0) { 128 /* this should *never* happen! */ 129 return (rv); 130 } 131 132 if ((rv = mod_install(&modlinkage)) != 0) { 133 /* cleanup here */ 134 ddi_soft_state_fini(&n2rng_softstate); 135 return (rv); 136 } 137 138 return (0); 139 } 140 141 int 142 _fini(void) 143 { 144 int rv; 145 146 rv = mod_remove(&modlinkage); 147 if (rv == 0) { 148 /* cleanup here */ 149 ddi_soft_state_fini(&n2rng_softstate); 150 } 151 152 return (rv); 153 } 154 155 int 156 _info(struct modinfo *modinfop) 157 { 158 return (mod_info(&modlinkage, modinfop)); 159 } 160 161 static int 162 n2rng_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 163 { 164 n2rng_t *n2rng = NULL; 165 int instance; 166 int rv; 167 int version; 168 uint64_t ncs_minor_ver; 169 170 instance = ddi_get_instance(dip); 171 DBG1(NULL, DENTRY, "n2rng_attach called, instance %d", instance); 172 /* 173 * Only instance 0 of n2rng driver is allowed. 174 */ 175 if (instance != 0) { 176 n2rng_diperror(dip, "only one instance (0) allowed"); 177 return (DDI_FAILURE); 178 } 179 180 switch (cmd) { 181 case DDI_RESUME: 182 n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, 183 instance); 184 if (n2rng == NULL) { 185 n2rng_diperror(dip, "no soft state in attach"); 186 return (DDI_FAILURE); 187 } 188 return (n2rng_resume(n2rng)); 189 190 case DDI_ATTACH: 191 break; 192 default: 193 return (DDI_FAILURE); 194 } 195 196 rv = ddi_soft_state_zalloc(n2rng_softstate, instance); 197 if (rv != DDI_SUCCESS) { 198 n2rng_diperror(dip, "unable to allocate soft state"); 199 return (DDI_FAILURE); 200 } 201 n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance); 202 ASSERT(n2rng != NULL); 203 n2rng->n_dip = dip; 204 205 mutex_init(&n2rng->n_lock, NULL, MUTEX_DRIVER, NULL); 206 n2rng->n_flags = 0; 207 n2rng->n_timeout_id = 0; 208 n2rng->n_sticks_per_usec = sticks_per_usec(); 209 210 /* Determine binding type */ 211 n2rng->n_binding_name = ddi_binding_name(dip); 212 if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_N2, 213 strlen(N2RNG_BINDNAME_N2)) == 0) { 214 /* 215 * Niagara 2 216 */ 217 n2rng->n_binding = N2RNG_CPU_N2; 218 } else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_VF, 219 strlen(N2RNG_BINDNAME_VF)) == 0) { 220 /* 221 * Victoria Falls 222 */ 223 n2rng->n_binding = N2RNG_CPU_VF; 224 } else { 225 n2rng_diperror(dip, 226 "unable to determine n2rng (cpu) binding (%s)", 227 n2rng->n_binding_name); 228 goto errorexit; 229 } 230 DBG1(n2rng, DCHATTY, "n2rng_attach: n2rng->n_binding_name = %s", 231 n2rng->n_binding_name); 232 233 /* Negotiate HV api version number */ 234 for (version = 0; version < NVERSIONS; version++) { 235 rv = hsvc_register(&ncs_hsvc[version], &ncs_minor_ver); 236 if (rv == 0) 237 break; 238 239 DBG4(n2rng, DCHATTY, "n2rng_attach: grp: 0x%lx, maj: %ld, " 240 "min: %ld, errno: %d", ncs_hsvc[version].hsvc_group, 241 ncs_hsvc[version].hsvc_major, 242 ncs_hsvc[version].hsvc_minor, rv); 243 } 244 if (version == NVERSIONS) { 245 for (version = 0; version < NVERSIONS; version++) { 246 cmn_err(CE_WARN, 247 "%s: cannot negotiate hypervisor services " 248 "group: 0x%lx major: %ld minor: %ld errno: %d", 249 ncs_hsvc[version].hsvc_modname, 250 ncs_hsvc[version].hsvc_group, 251 ncs_hsvc[version].hsvc_major, 252 ncs_hsvc[version].hsvc_minor, rv); 253 } 254 goto errorexit; 255 } 256 ncs_version_index = version; 257 ncs_hsvc_available = B_TRUE; 258 DBG2(n2rng, DATTACH, "n2rng_attach: ncs api version (%ld.%ld)", 259 ncs_hsvc[ncs_version_index].hsvc_major, ncs_minor_ver); 260 n2rng->n_hvapi_major_version = ncs_hsvc[ncs_version_index].hsvc_major; 261 n2rng->n_hvapi_minor_version = (uint_t)ncs_minor_ver; 262 263 /* 264 * Verify that we are running version 2.0 or later api on multiple 265 * rng systems. 266 */ 267 if ((n2rng->n_binding != N2RNG_CPU_N2) && 268 (n2rng->n_hvapi_major_version < 2)) { 269 cmn_err(CE_NOTE, "n2rng: Incompatible hyperviser api " 270 "version %d.%d detected", n2rng->n_hvapi_major_version, 271 n2rng->n_hvapi_minor_version); 272 } 273 274 /* Initialize ctl structure if runnning in the control domain */ 275 if (n2rng_init_ctl(n2rng) != DDI_SUCCESS) { 276 cmn_err(CE_WARN, "n2rng: unable to initialize rng " 277 "control structures"); 278 goto errorexit; 279 } 280 281 /* Allocate single thread task queue for rng diags and registration */ 282 n2rng->n_taskq = ddi_taskq_create(dip, "n2rng_taskq", 1, 283 TASKQ_DEFAULTPRI, 0); 284 285 if (n2rng->n_taskq == NULL) { 286 n2rng_diperror(dip, "ddi_taskq_create() failed"); 287 goto errorexit; 288 } 289 290 /* Dispatch task to configure the RNG and register with KCF */ 291 if (ddi_taskq_dispatch(n2rng->n_taskq, n2rng_config_task, 292 (void *)n2rng, DDI_SLEEP) != DDI_SUCCESS) { 293 n2rng_diperror(dip, "ddi_taskq_dispatch() failed"); 294 goto errorexit; 295 } 296 297 return (DDI_SUCCESS); 298 299 errorexit: 300 /* Wait for pending config tasks to complete and delete the taskq */ 301 if (n2rng->n_taskq != NULL) { 302 ddi_taskq_destroy(n2rng->n_taskq); 303 n2rng->n_taskq = NULL; 304 } 305 306 n2rng_uninit_ctl(n2rng); 307 308 (void) n2rng_uninit(n2rng); 309 310 if (ncs_hsvc_available == B_TRUE) { 311 (void) hsvc_unregister(&ncs_hsvc[ncs_version_index]); 312 ncs_hsvc_available = B_FALSE; 313 } 314 315 mutex_destroy(&n2rng->n_lock); 316 ddi_soft_state_free(n2rng_softstate, instance); 317 318 return (DDI_FAILURE); 319 } 320 321 static int 322 n2rng_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 323 { 324 int instance; 325 int rv; 326 n2rng_t *n2rng; 327 timeout_id_t tid; 328 329 instance = ddi_get_instance(dip); 330 n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance); 331 if (n2rng == NULL) { 332 n2rng_diperror(dip, "no soft state in detach"); 333 return (DDI_FAILURE); 334 } 335 336 switch (cmd) { 337 case DDI_SUSPEND: 338 return (n2rng_suspend(n2rng)); 339 case DDI_DETACH: 340 break; 341 default: 342 return (DDI_FAILURE); 343 } 344 345 /* Destroy task queue first to insure configuration has completed */ 346 if (n2rng->n_taskq != NULL) { 347 ddi_taskq_destroy(n2rng->n_taskq); 348 n2rng->n_taskq = NULL; 349 } 350 351 /* Untimeout pending config retry operations */ 352 mutex_enter(&n2rng->n_lock); 353 tid = n2rng->n_timeout_id; 354 n2rng->n_timeout_id = 0; 355 mutex_exit(&n2rng->n_lock); 356 if (tid) { 357 DBG1(n2rng, DCHATTY, "n2rng_detach: untimeout pending retry " 358 "id = %x", tid); 359 (void) untimeout(tid); 360 } 361 362 n2rng_uninit_ctl(n2rng); 363 364 /* unregister with KCF---also tears down FIPS state */ 365 rv = n2rng_uninit(n2rng) ? DDI_FAILURE : DDI_SUCCESS; 366 367 if (ncs_hsvc_available == B_TRUE) { 368 (void) hsvc_unregister(&ncs_hsvc[ncs_version_index]); 369 ncs_hsvc_available = B_FALSE; 370 } 371 372 mutex_destroy(&n2rng->n_lock); 373 ddi_soft_state_free(n2rng_softstate, instance); 374 375 return (rv); 376 } 377 378 /*ARGSUSED*/ 379 static int 380 n2rng_suspend(n2rng_t *n2rng) 381 { 382 /* unregister with KCF---also tears down FIPS state */ 383 if (n2rng_uninit(n2rng) != DDI_SUCCESS) { 384 cmn_err(CE_WARN, "n2rng: unable to unregister from KCF"); 385 return (DDI_FAILURE); 386 } 387 388 return (DDI_SUCCESS); 389 } 390 391 /*ARGSUSED*/ 392 static int 393 n2rng_resume(n2rng_t *n2rng) 394 { 395 /* Assume clock is same speed and all data structures are intact */ 396 397 /* Re-configure the RNG hardware and register with KCF */ 398 return (n2rng_config(n2rng)); 399 } 400 401 /* 402 * Map hypervisor error code to solaris. Only 403 * H_ENORADDR, H_EBADALIGN, H_EWOULDBLOCK, and EIO 404 * are meaningful to this device. Any other error 405 * codes are mapped EINVAL. 406 */ 407 int 408 n2rng_herr2kerr(uint64_t hv_errcode) 409 { 410 int s_errcode; 411 412 switch (hv_errcode) { 413 case H_EWOULDBLOCK: 414 s_errcode = EWOULDBLOCK; 415 break; 416 case H_EIO: 417 s_errcode = EIO; 418 break; 419 case H_EBUSY: 420 s_errcode = EBUSY; 421 break; 422 case H_EOK: 423 s_errcode = 0; 424 break; 425 case H_ENOACCESS: 426 s_errcode = EPERM; 427 break; 428 case H_ENORADDR: 429 case H_EBADALIGN: 430 default: 431 s_errcode = EINVAL; 432 break; 433 } 434 return (s_errcode); 435 } 436 437 /* 438 * Waits approximately delay_sticks counts of the stick register. 439 * Times shorter than one sys clock tick (10ms on most systems) are 440 * done by busy waiting. 441 */ 442 void 443 cyclesleep(n2rng_t *n2rng, uint64_t delay_sticks) 444 { 445 uint64_t end_stick = gettick() + delay_sticks; 446 int64_t sticks_to_wait; 447 clock_t sys_ticks_to_wait; 448 clock_t usecs_to_wait; 449 450 /*CONSTCOND*/ 451 while (1) { 452 sticks_to_wait = end_stick - gettick(); 453 if (sticks_to_wait <= 0) { 454 return; 455 } 456 457 usecs_to_wait = sticks_to_wait / n2rng->n_sticks_per_usec; 458 sys_ticks_to_wait = drv_usectohz(usecs_to_wait); 459 460 if (sys_ticks_to_wait > 0) { 461 /* sleep */ 462 delay(sys_ticks_to_wait); 463 } else if (usecs_to_wait > 0) { 464 /* busy wait */ 465 drv_usecwait(usecs_to_wait); 466 } 467 } 468 } 469 470 static void 471 log_internal_errors(uint64_t hverr, char *fname) 472 { 473 switch (hverr) { 474 case H_EBADALIGN: 475 cmn_err(CE_WARN, 476 "n2rng: internal alignment " 477 "problem"); 478 break; 479 case H_ENORADDR: 480 cmn_err(CE_WARN, "n2rng: internal " 481 "invalid address"); 482 break; 483 case H_ENOACCESS: 484 cmn_err(CE_WARN, "n2rng: access failure"); 485 break; 486 case H_EWOULDBLOCK: 487 cmn_err(CE_WARN, "n2rng: hardware busy"); 488 break; 489 default: 490 cmn_err(CE_NOTE, 491 "n2rng: %s " 492 "unexpectedly " 493 "returned hverr %ld", fname, hverr); 494 break; 495 } 496 } 497 498 /* 499 * Collects a buffer full of bits, using the specified setup. numbytes 500 * must be a multiple of 8. If a sub-operation fails with EIO (handle 501 * mismatch), returns EIO. If collect_setupp is NULL, the current 502 * setup is used. If exit_setupp is NULL, the control configuratin 503 * and state are not set at exit. WARNING: the buffer must be 8-byte 504 * aligned and in contiguous physical addresses. Contiguousness is 505 * not checked! 506 */ 507 int 508 n2rng_collect_diag_bits(n2rng_t *n2rng, int rngid, 509 n2rng_setup_t *collect_setupp, void *buffer, int numbytes, 510 n2rng_setup_t *exit_setupp, uint64_t exitstate) 511 { 512 int rv; 513 int override_rv = 0; 514 uint64_t hverr; 515 int i; 516 uint64_t tdelta; 517 n2rng_setup_t setupbuffer[2]; 518 n2rng_setup_t *setupcontigp; 519 uint64_t setupphys; 520 int numchunks; 521 boolean_t rnglooping; 522 int busycount = 0; 523 int blockcount = 0; 524 525 if (numbytes % sizeof (uint64_t)) { 526 return (EINVAL); 527 } 528 529 if ((uint64_t)buffer % sizeof (uint64_t) != 0) { 530 return (EINVAL); 531 } 532 533 numchunks = ((numbytes / sizeof (uint64_t)) + RNG_DIAG_CHUNK_SIZE - 1) 534 / RNG_DIAG_CHUNK_SIZE; 535 /* 536 * Use setupbuffer[0] if it is contiguous, otherwise 537 * setupbuffer[1]. 538 */ 539 setupcontigp = &setupbuffer[ 540 CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1]; 541 setupphys = va_to_pa(setupcontigp); 542 543 /* 544 * If a non-null collect_setupp pointer has been provided, 545 * push the specified setup into the hardware. 546 */ 547 if (collect_setupp != NULL) { 548 /* copy the specified state to the aligned buffer */ 549 *setupcontigp = *collect_setupp; 550 rnglooping = B_TRUE; 551 while (rnglooping) { 552 hverr = n2rng_ctl_write(n2rng, rngid, setupphys, 553 CTL_STATE_HEALTHCHECK, 554 n2rng->n_ctl_data->n_watchdog_cycles, &tdelta); 555 rv = n2rng_herr2kerr(hverr); 556 switch (hverr) { 557 case H_EOK: 558 rnglooping = B_FALSE; 559 break; 560 case H_EIO: /* control yanked from us */ 561 case H_ENOACCESS: /* We are not control domain */ 562 return (rv); 563 case H_EWOULDBLOCK: 564 /* Data currently not available, try again */ 565 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) { 566 DBG1(n2rng, DHEALTH, 567 "n2rng_collect_diag_bits(1) : " 568 "exceeded block count of %d", 569 RNG_MAX_BLOCK_ATTEMPTS); 570 return (rv); 571 } else { 572 cyclesleep(n2rng, tdelta); 573 } 574 break; 575 case H_EBUSY: 576 /* 577 * A control write is already in progress. 578 * Note: This shouldn't happen since 579 * n2rng_ctl_write() waits for the 580 * write to complete. 581 */ 582 if (++busycount > RNG_MAX_BUSY_ATTEMPTS) { 583 DBG1(n2rng, DHEALTH, 584 "n2rng_collect_diag_bits(1): " 585 "exceeded busy count of %d", 586 RNG_MAX_BUSY_ATTEMPTS); 587 return (rv); 588 } else { 589 delay(RNG_RETRY_BUSY_DELAY); 590 } 591 break; 592 default: 593 log_internal_errors(hverr, "hv_rng_ctl_write"); 594 override_rv = rv; 595 goto restore_state; 596 } 597 } /* while (rnglooping) */ 598 } /* if (collect_setupp != NULL) */ 599 600 /* If the caller asks for some bytes, collect the data */ 601 if (numbytes > 0) { 602 for (i = 0; i < numchunks; i++) { 603 size_t thisnumbytes = (i == numchunks - 1) ? 604 numbytes - i * (RNG_DIAG_CHUNK_SIZE * 605 sizeof (uint64_t)) : 606 RNG_DIAG_CHUNK_SIZE * sizeof (uint64_t); 607 608 /* try until we successfully read a word of data */ 609 rnglooping = B_TRUE; 610 busycount = 0; 611 blockcount = 0; 612 while (rnglooping) { 613 hverr = n2rng_data_read_diag(n2rng, rngid, 614 va_to_pa((uint64_t *)buffer + 615 RNG_DIAG_CHUNK_SIZE * i), 616 thisnumbytes, &tdelta); 617 rv = n2rng_herr2kerr(hverr); 618 switch (hverr) { 619 case H_EOK: 620 rnglooping = B_FALSE; 621 break; 622 case H_EIO: 623 case H_ENOACCESS: 624 return (rv); 625 case H_EWOULDBLOCK: 626 /* Data not available, try again */ 627 if (++blockcount > 628 RNG_MAX_BLOCK_ATTEMPTS) { 629 DBG1(n2rng, DHEALTH, 630 "n2rng_collect_diag_bits" 631 "(2): exceeded block count" 632 " of %d", 633 RNG_MAX_BLOCK_ATTEMPTS); 634 return (rv); 635 } else { 636 cyclesleep(n2rng, tdelta); 637 } 638 break; 639 default: 640 log_internal_errors(hverr, 641 "hv_rng_data_read_diag"); 642 override_rv = rv; 643 goto restore_state; 644 } 645 } /* while (!rnglooping) */ 646 } /* for */ 647 } 648 649 restore_state: 650 651 /* restore the preferred configuration and set exit state */ 652 if (exit_setupp != NULL) { 653 654 *setupcontigp = *exit_setupp; 655 rnglooping = B_TRUE; 656 busycount = 0; 657 blockcount = 0; 658 while (rnglooping) { 659 hverr = n2rng_ctl_write(n2rng, rngid, setupphys, 660 exitstate, n2rng->n_ctl_data->n_watchdog_cycles, 661 &tdelta); 662 rv = n2rng_herr2kerr(hverr); 663 switch (hverr) { 664 case H_EOK: 665 case H_EIO: /* control yanked from us */ 666 case H_EINVAL: /* some external error, probably */ 667 case H_ENOACCESS: /* We are not control domain */ 668 rnglooping = B_FALSE; 669 break; 670 case H_EWOULDBLOCK: 671 /* Data currently not available, try again */ 672 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) { 673 DBG1(n2rng, DHEALTH, 674 "n2rng_collect_diag_bits(3): " 675 "exceeded block count of %d", 676 RNG_MAX_BLOCK_ATTEMPTS); 677 return (rv); 678 } else { 679 cyclesleep(n2rng, tdelta); 680 } 681 break; 682 case H_EBUSY: 683 /* 684 * A control write is already in progress. 685 * Note: This shouldn't happen since 686 * n2rng_ctl_write() waits for the 687 * write to complete. 688 */ 689 if (++busycount > RNG_MAX_BUSY_ATTEMPTS) { 690 DBG1(n2rng, DHEALTH, 691 "n2rng_collect_diag_bits(3): " 692 "exceeded busy count of %d", 693 RNG_MAX_BUSY_ATTEMPTS); 694 return (rv); 695 } else { 696 delay(RNG_RETRY_BUSY_DELAY); 697 } 698 break; 699 default: 700 rnglooping = B_FALSE; 701 log_internal_errors(hverr, "hv_rng_ctl_write"); 702 break; 703 } 704 } /* while */ 705 } /* if */ 706 707 /* 708 * override_rv takes care of the case where we abort becuase 709 * of some error, but still want to restore the peferred state 710 * and return the first error, even if other error occur. 711 */ 712 return (override_rv ? override_rv : rv); 713 } 714 715 int 716 n2rng_getentropy(n2rng_t *n2rng, void *buffer, size_t size) 717 { 718 int i, rv = 0; /* so it works if size is zero */ 719 uint64_t hverr; 720 uint64_t *buffer_w = (uint64_t *)buffer; 721 int num_w = size / sizeof (uint64_t); 722 uint64_t randval; 723 uint64_t randvalphys = va_to_pa(&randval); 724 uint64_t tdelta; 725 int failcount = 0; 726 int blockcount = 0; 727 boolean_t rnglooping; 728 729 for (i = 0; i < num_w; i++) { 730 rnglooping = B_TRUE; 731 while (rnglooping) { 732 hverr = hv_rng_data_read(randvalphys, &tdelta); 733 rv = n2rng_herr2kerr(hverr); 734 switch (hverr) { 735 case H_EOK: 736 buffer_w[i] = randval; 737 failcount = 0; 738 rnglooping = B_FALSE; 739 break; 740 case H_EIO: 741 /* 742 * Either a health check is in progress, or 743 * the watchdog timer has expired while running 744 * hv api version 2.0 or higher with health 745 * checks enabled. 746 */ 747 if (n2rng->n_hvapi_major_version < 2) { 748 /* 749 * A health check is in progress. 750 * Wait RNG_RETRY_HLCHK_USECS and fail 751 * after RNG_MAX_DATA_READ_ATTEMPTS 752 * failures. 753 */ 754 if (++failcount > 755 RNG_MAX_DATA_READ_ATTEMPTS) { 756 DBG2(n2rng, DHEALTH, 757 "n2rng_getentropy: exceeded" 758 "EIO count of %d on cpu %d", 759 RNG_MAX_DATA_READ_ATTEMPTS, 760 CPU->cpu_id); 761 goto exitpoint; 762 } else { 763 delay(drv_usectohz 764 (RNG_RETRY_HLCHK_USECS)); 765 } 766 } else { 767 /* 768 * Just return the error. If a flurry of 769 * random data requests happen to occur 770 * during a health check, there are 771 * multiple levels of defense: 772 * - 2.0 HV provides random data pool 773 * - FIPS algorithm tolerates failures 774 * - Software failover 775 * - Automatic configuration retries 776 * - Hardware failover on some systems 777 */ 778 goto exitpoint; 779 } 780 break; 781 case H_EWOULDBLOCK: 782 /* Data currently not available, try again */ 783 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) { 784 DBG1(n2rng, DHEALTH, 785 "n2rng_getentropy: " 786 "exceeded block count of %d", 787 RNG_MAX_BLOCK_ATTEMPTS); 788 goto exitpoint; 789 } else { 790 cyclesleep(n2rng, tdelta); 791 } 792 break; 793 default: 794 log_internal_errors(hverr, "hv_rng_data_read"); 795 goto exitpoint; 796 } 797 } /* while */ 798 } /* for */ 799 800 exitpoint: 801 return (rv); 802 } 803 804 uint64_t 805 n2rng_ctl_read(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa, uint64_t *state, 806 uint64_t *tdelta, uint64_t *wdelta) 807 { 808 uint64_t rv; 809 uint64_t wstatus; 810 811 /* Call correct hv function based on api version */ 812 if (n2rng->n_hvapi_major_version == 2) { 813 rv = hv_rng_ctl_read_v2(ctlregs_pa, (uint64_t)rngid, state, 814 tdelta, wdelta, &wstatus); 815 if (rv == 0) { 816 rv = wstatus; 817 } 818 } else { 819 rv = hv_rng_ctl_read(ctlregs_pa, state, tdelta); 820 *wdelta = 0; 821 } 822 823 return (rv); 824 } 825 826 uint64_t 827 n2rng_ctl_wait(n2rng_t *n2rng, int rngid) 828 { 829 uint64_t state; 830 uint64_t tdelta; 831 uint64_t wdelta; 832 uint64_t wstatus; 833 boolean_t rnglooping = B_TRUE; 834 uint64_t rv; 835 n2rng_setup_t setupbuffer[2]; 836 n2rng_setup_t *setupcontigp; 837 uint64_t setupphys; 838 int busycount = 0; 839 int blockcount = 0; 840 841 /* 842 * Use setupbuffer[0] if it is contiguous, otherwise 843 * setupbuffer[1]. 844 */ 845 setupcontigp = &setupbuffer[ 846 CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1]; 847 setupphys = va_to_pa(setupcontigp); 848 849 while (rnglooping) { 850 rv = hv_rng_ctl_read_v2(setupphys, (uint64_t)rngid, &state, 851 &tdelta, &wdelta, &wstatus); 852 switch (rv) { 853 case H_EOK: 854 rv = wstatus; 855 rnglooping = B_FALSE; 856 break; 857 case H_EWOULDBLOCK: 858 /* Data currently not available, try again */ 859 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) { 860 DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: " 861 "exceeded block count of %d", 862 RNG_MAX_BLOCK_ATTEMPTS); 863 return (rv); 864 } else { 865 cyclesleep(n2rng, tdelta); 866 } 867 break; 868 case H_EBUSY: 869 /* Control write still pending, try again */ 870 if (++busycount > RNG_MAX_BUSY_ATTEMPTS) { 871 DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: " 872 "exceeded busy count of %d", 873 RNG_MAX_BUSY_ATTEMPTS); 874 return (rv); 875 } else { 876 delay(RNG_RETRY_BUSY_DELAY); 877 } 878 break; 879 default: 880 log_internal_errors(rv, "n2rng_ctl_wait"); 881 rnglooping = B_FALSE; 882 } 883 } /* while (rnglooping) */ 884 885 return (rv); 886 } 887 888 uint64_t 889 n2rng_ctl_write(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa, 890 uint64_t newstate, uint64_t wtimeout, uint64_t *tdelta) 891 { 892 uint64_t rv; 893 894 /* Call correct hv function based on api version */ 895 if (n2rng->n_hvapi_major_version == 2) { 896 rv = hv_rng_ctl_write_v2(ctlregs_pa, newstate, wtimeout, 897 (uint64_t)rngid); 898 if (rv == H_EOK) { 899 /* Wait for control registers to be written */ 900 rv = n2rng_ctl_wait(n2rng, rngid); 901 } 902 *tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES; 903 } else { 904 rv = hv_rng_ctl_write(ctlregs_pa, newstate, wtimeout, tdelta); 905 } 906 907 return (rv); 908 } 909 910 uint64_t 911 n2rng_data_read_diag(n2rng_t *n2rng, int rngid, uint64_t data_pa, 912 size_t datalen, uint64_t *tdelta) 913 { 914 uint64_t rv; 915 916 /* Call correct hv function based on api version */ 917 if (n2rng->n_hvapi_major_version == 2) { 918 rv = hv_rng_data_read_diag_v2(data_pa, datalen, 919 (uint64_t)rngid, tdelta); 920 if (*tdelta == 0) { 921 *tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES; 922 } 923 } else { 924 rv = hv_rng_data_read_diag(data_pa, datalen, tdelta); 925 } 926 927 return (rv); 928 } 929 930 uint64_t 931 n2rng_check_ctl_access(n2rng_t *n2rng) 932 { 933 uint64_t rv; 934 uint64_t unused_64; 935 936 /* Call correct hv function based on api version */ 937 if (n2rng->n_hvapi_major_version == 2) { 938 /* 939 * Attempt to read control registers with invalid ID and data 940 * just to see if we get an access error 941 */ 942 rv = hv_rng_ctl_read_v2(NULL, N2RNG_INVALID_ID, 943 &unused_64, &unused_64, &unused_64, &unused_64); 944 } else { 945 rv = hv_rng_get_diag_control(); 946 } 947 948 return (rv); 949 } 950 951 /* 952 * n2rng_config_retry() 953 * 954 * Schedule a timed call to n2rng_config() if one is not already pending 955 */ 956 void 957 n2rng_config_retry(n2rng_t *n2rng, clock_t seconds) 958 { 959 mutex_enter(&n2rng->n_lock); 960 /* Check if a config retry is already pending */ 961 if (n2rng->n_timeout_id) { 962 DBG1(n2rng, DCFG, "n2rng_config_retry: retry pending " 963 "id = %x", n2rng->n_timeout_id); 964 } else { 965 n2rng->n_timeout_id = timeout(n2rng_config_task, 966 (void *)n2rng, drv_usectohz(seconds * SECOND)); 967 DBG2(n2rng, DCFG, "n2rng_config_retry: retry scheduled in " 968 "%d seconds, id = %x", seconds, n2rng->n_timeout_id); 969 } 970 mutex_exit(&n2rng->n_lock); 971 } 972 973 static uint64_t 974 sticks_per_usec(void) 975 { 976 uint64_t starttick = gettick(); 977 hrtime_t starttime = gethrtime(); 978 uint64_t endtick; 979 hrtime_t endtime; 980 981 delay(2); 982 983 endtick = gettick(); 984 endtime = gethrtime(); 985 986 return ((1000 * (endtick - starttick)) / (endtime - starttime)); 987 } 988 989 static int 990 n2rng_init_ctl(n2rng_t *n2rng) 991 { 992 int rv; 993 int hverr; 994 rng_entry_t *rng; 995 int rngid; 996 int blockcount = 0; 997 998 n2rng->n_ctl_data = NULL; 999 1000 /* Attempt to gain diagnostic control */ 1001 do { 1002 hverr = n2rng_check_ctl_access(n2rng); 1003 rv = n2rng_herr2kerr(hverr); 1004 if ((hverr == H_EWOULDBLOCK) && 1005 (++blockcount > RNG_MAX_BUSY_ATTEMPTS)) { 1006 DBG1(n2rng, DHEALTH, "n2rng_int_ctl: exceeded busy " 1007 "count of %d", RNG_MAX_BUSY_ATTEMPTS); 1008 return (rv); 1009 } else { 1010 delay(RNG_RETRY_BUSY_DELAY); 1011 } 1012 } while (hverr == H_EWOULDBLOCK); 1013 1014 /* 1015 * If attempt fails with EPERM, the driver is not running in the 1016 * control domain 1017 */ 1018 if (rv == EPERM) { 1019 DBG0(n2rng, DATTACH, 1020 "n2rng_init_ctl: Running in guest domain"); 1021 return (DDI_SUCCESS); 1022 } 1023 1024 /* Allocate control stucture only used in control domain */ 1025 n2rng->n_ctl_data = kmem_alloc(sizeof (rng_ctl_data_t), KM_SLEEP); 1026 n2rng->n_ctl_data->n_num_rngs_online = 0; 1027 1028 /* 1029 * If running with an API version less than 2.0 default to one rng. 1030 * Otherwise get number of rngs from device properties. 1031 */ 1032 if (n2rng->n_hvapi_major_version < 2) { 1033 n2rng->n_ctl_data->n_num_rngs = 1; 1034 } else { 1035 n2rng->n_ctl_data->n_num_rngs = 1036 ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip, 1037 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1038 N2RNG_PROP_NUM_UNITS, 0); 1039 if (n2rng->n_ctl_data->n_num_rngs == 0) { 1040 cmn_err(CE_WARN, "n2rng: %s property not found", 1041 N2RNG_PROP_NUM_UNITS); 1042 return (DDI_FAILURE); 1043 } 1044 } 1045 1046 /* Allocate space for all rng entries */ 1047 n2rng->n_ctl_data->n_rngs = 1048 kmem_zalloc(n2rng->n_ctl_data->n_num_rngs * 1049 sizeof (rng_entry_t), KM_SLEEP); 1050 1051 /* Get accumulate cycles from .conf file. */ 1052 n2rng->n_ctl_data->n_accumulate_cycles = 1053 ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip, 1054 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "acc_cycles", 1055 RNG_DEFAULT_ACCUMULATE_CYCLES); 1056 1057 /* Get health check frequency from .conf file */ 1058 n2rng->n_ctl_data->n_hc_secs = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip, 1059 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "hc_seconds", 1060 RNG_DEFAULT_HC_SECS); 1061 1062 /* API versions prior to 2.0 do not support health checks */ 1063 if ((n2rng->n_hvapi_major_version < 2) && 1064 (n2rng->n_ctl_data->n_hc_secs > 0)) { 1065 cmn_err(CE_WARN, "n2rng: Hyperviser api " 1066 "version %d.%d does not support health checks", 1067 n2rng->n_hvapi_major_version, 1068 n2rng->n_hvapi_minor_version); 1069 n2rng->n_ctl_data->n_hc_secs = 0; 1070 } 1071 1072 /* Calculate watchdog timeout value */ 1073 if (n2rng->n_ctl_data->n_hc_secs <= 0) { 1074 n2rng->n_ctl_data->n_watchdog_cycles = 0; 1075 } else { 1076 n2rng->n_ctl_data->n_watchdog_cycles = 1077 ((uint64_t)(RNG_EXTRA_WATCHDOG_SECS) + 1078 n2rng->n_ctl_data->n_hc_secs) * 1079 n2rng->n_sticks_per_usec * 1000000; 1080 } 1081 1082 /* 1083 * Set some plausible state into the preferred configuration. 1084 * The intent is that the health check will immediately overwrite it. 1085 */ 1086 for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs; rngid++) { 1087 1088 rng = &n2rng->n_ctl_data->n_rngs[rngid]; 1089 1090 rng->n_preferred_config.ctlwds[0].word = 0; 1091 rng->n_preferred_config.ctlwds[0].fields.rnc_anlg_sel = 1092 N2RNG_NOANALOGOUT; 1093 rng->n_preferred_config.ctlwds[0].fields.rnc_cnt = 1094 RNG_DEFAULT_ACCUMULATE_CYCLES; 1095 rng->n_preferred_config.ctlwds[0].fields.rnc_mode = 1096 RNG_MODE_NORMAL; 1097 rng->n_preferred_config.ctlwds[1].word = 1098 rng->n_preferred_config.ctlwds[0].word; 1099 rng->n_preferred_config.ctlwds[2].word = 1100 rng->n_preferred_config.ctlwds[0].word; 1101 rng->n_preferred_config.ctlwds[3].word = 1102 rng->n_preferred_config.ctlwds[0].word; 1103 rng->n_preferred_config.ctlwds[0].fields.rnc_vcoctl = 1; 1104 rng->n_preferred_config.ctlwds[0].fields.rnc_selbits = 1; 1105 rng->n_preferred_config.ctlwds[1].fields.rnc_vcoctl = 2; 1106 rng->n_preferred_config.ctlwds[1].fields.rnc_selbits = 2; 1107 rng->n_preferred_config.ctlwds[2].fields.rnc_vcoctl = 3; 1108 rng->n_preferred_config.ctlwds[2].fields.rnc_selbits = 4; 1109 rng->n_preferred_config.ctlwds[3].fields.rnc_vcoctl = 0; 1110 rng->n_preferred_config.ctlwds[3].fields.rnc_selbits = 7; 1111 } 1112 1113 n2rng_setcontrol(n2rng); 1114 DBG2(n2rng, DATTACH, 1115 "n2rng_init_ctl: Running in control domain with %d rng device%s", 1116 n2rng->n_ctl_data->n_num_rngs, 1117 (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s"); 1118 DBG2(n2rng, DCFG, 1119 "n2rng_init_ctl: n_sticks_per_usec = %ld, n_hc_secs = %d", 1120 n2rng->n_sticks_per_usec, 1121 n2rng->n_ctl_data->n_hc_secs); 1122 DBG2(n2rng, DCFG, 1123 "n2rng_init_ctl: n_watchdog_cycles = %ld, " 1124 "n_accumulate_cycles = %ld", n2rng->n_ctl_data->n_watchdog_cycles, 1125 n2rng->n_ctl_data->n_accumulate_cycles); 1126 1127 return (DDI_SUCCESS); 1128 } 1129 1130 static void 1131 n2rng_uninit_ctl(n2rng_t *n2rng) 1132 { 1133 if (n2rng->n_ctl_data) { 1134 if (n2rng->n_ctl_data->n_num_rngs) { 1135 kmem_free(n2rng->n_ctl_data->n_rngs, 1136 n2rng->n_ctl_data->n_num_rngs * 1137 sizeof (rng_entry_t)); 1138 n2rng->n_ctl_data->n_rngs = NULL; 1139 n2rng->n_ctl_data->n_num_rngs = 0; 1140 } 1141 kmem_free(n2rng->n_ctl_data, sizeof (rng_ctl_data_t)); 1142 n2rng->n_ctl_data = NULL; 1143 } 1144 } 1145 1146 1147 /* 1148 * n2rng_config_test() 1149 * 1150 * Attempt read random data to see if the rng is configured. 1151 */ 1152 int 1153 n2rng_config_test(n2rng_t *n2rng) 1154 { 1155 int rv = 0; 1156 uint64_t hverr; 1157 uint64_t randval = 0; 1158 uint64_t randvalphys = va_to_pa(&randval); 1159 uint64_t tdelta; 1160 int failcount = 0; 1161 int blockcount = 0; 1162 boolean_t rnglooping = B_TRUE; 1163 1164 while (rnglooping) { 1165 hverr = hv_rng_data_read(randvalphys, &tdelta); 1166 rv = n2rng_herr2kerr(hverr); 1167 switch (hverr) { 1168 case H_EOK: 1169 failcount = 0; 1170 rnglooping = B_FALSE; 1171 break; 1172 case H_EIO: 1173 /* 1174 * A health check is in progress. 1175 * Wait RNG_RETRY_HLCHK_USECS and fail 1176 * after RNG_MAX_DATA_READ_ATTEMPTS 1177 * failures. 1178 */ 1179 if (++failcount > RNG_MAX_DATA_READ_ATTEMPTS) { 1180 goto exitpoint; 1181 } else { 1182 delay(drv_usectohz(RNG_RETRY_HLCHK_USECS)); 1183 } 1184 break; 1185 case H_EWOULDBLOCK: 1186 /* Data currently not available, try again */ 1187 if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) { 1188 DBG1(n2rng, DHEALTH, "n2rng_config_test: " 1189 "exceeded block count of %d", 1190 RNG_MAX_BLOCK_ATTEMPTS); 1191 goto exitpoint; 1192 } else { 1193 cyclesleep(n2rng, tdelta); 1194 } 1195 break; 1196 case H_ENOACCESS: 1197 /* An rng error has occured during health check */ 1198 goto exitpoint; 1199 default: 1200 log_internal_errors(hverr, "hv_rng_data_read"); 1201 goto exitpoint; 1202 } 1203 } /* while */ 1204 1205 exitpoint: 1206 return (rv); 1207 } 1208 1209 /* 1210 * n2rng_config() 1211 * 1212 * Run health check on the RNG hardware 1213 * Configure the RNG hardware 1214 * Register with crypto framework 1215 */ 1216 static int 1217 n2rng_config(n2rng_t *n2rng) 1218 { 1219 int rv; 1220 rng_entry_t *rng; 1221 int rngid; 1222 1223 /* 1224 * Run health checks and configure rngs if running in control domain, 1225 * otherwise just check if at least one rng is available. 1226 */ 1227 if (n2rng_iscontrol(n2rng)) { 1228 1229 for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs; 1230 rngid++) { 1231 1232 rng = &n2rng->n_ctl_data->n_rngs[rngid]; 1233 1234 /* Only test rngs that have not already failed */ 1235 if (rng->n_rng_state == CTL_STATE_ERROR) { 1236 continue; 1237 } 1238 1239 if ((n2rng->n_binding == N2RNG_CPU_VF) && 1240 (n2rng->n_hvapi_major_version < 2)) { 1241 /* 1242 * Since api versions prior to 2.0 do not 1243 * support multiple rngs, bind to the current 1244 * processor for the entire health check 1245 * process. 1246 */ 1247 thread_affinity_set(curthread, CPU_CURRENT); 1248 DBG1(n2rng, DCFG, "n2rng_config: " 1249 "Configuring single rng from cpu %d", 1250 CPU->cpu_id); 1251 rv = n2rng_do_health_check(n2rng, rngid); 1252 thread_affinity_clear(curthread); 1253 } else { 1254 rv = n2rng_do_health_check(n2rng, rngid); 1255 } 1256 1257 switch (rv) { 1258 case 0: 1259 /* 1260 * Successful, increment online count if 1261 * necessary 1262 */ 1263 DBG1(n2rng, DCFG, "n2rng_config: rng(%d) " 1264 "passed health checks", rngid); 1265 if (rng->n_rng_state != CTL_STATE_CONFIGURED) { 1266 rng->n_rng_state = 1267 CTL_STATE_CONFIGURED; 1268 n2rng->n_ctl_data->n_num_rngs_online++; 1269 } 1270 break; 1271 default: 1272 /* 1273 * Health checks failed, decrement online 1274 * count if necessary 1275 */ 1276 cmn_err(CE_WARN, "n2rng: rng(%d) " 1277 "failed health checks", rngid); 1278 if (rng->n_rng_state == CTL_STATE_CONFIGURED) { 1279 n2rng->n_ctl_data->n_num_rngs_online--; 1280 } 1281 rng->n_rng_state = CTL_STATE_ERROR; 1282 break; 1283 } 1284 } 1285 DBG2(n2rng, DCFG, "n2rng_config: %d rng%s online", 1286 n2rng->n_ctl_data->n_num_rngs_online, 1287 (n2rng->n_ctl_data->n_num_rngs_online == 1) ? "" : "s"); 1288 1289 /* Check if all rngs have failed */ 1290 if (n2rng->n_ctl_data->n_num_rngs_online == 0) { 1291 cmn_err(CE_WARN, "n2rng: %d RNG device%s failed", 1292 n2rng->n_ctl_data->n_num_rngs, 1293 (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s"); 1294 goto errorexit; 1295 } else { 1296 n2rng_setconfigured(n2rng); 1297 } 1298 } else { 1299 /* Running in guest domain, just check if rng is configured */ 1300 rv = n2rng_config_test(n2rng); 1301 switch (rv) { 1302 case 0: 1303 n2rng_setconfigured(n2rng); 1304 break; 1305 case EIO: 1306 /* Don't set configured to force a retry */ 1307 break; 1308 default: 1309 goto errorexit; 1310 } 1311 } 1312 1313 /* 1314 * Initialize FIPS state and register with KCF if we have at least one 1315 * RNG configured. Otherwise schedule a retry if all rngs have not 1316 * failed. 1317 */ 1318 if (n2rng_isconfigured(n2rng)) { 1319 1320 if (n2rng_init(n2rng) != DDI_SUCCESS) { 1321 cmn_err(CE_WARN, "n2rng: unable to register with KCF"); 1322 goto errorexit; 1323 } 1324 1325 /* 1326 * Schedule a retry if running in the control domain and a 1327 * health check time has been specified. 1328 */ 1329 if (n2rng_iscontrol(n2rng) && 1330 (n2rng->n_ctl_data->n_hc_secs > 0)) { 1331 n2rng_config_retry(n2rng, 1332 n2rng->n_ctl_data->n_hc_secs); 1333 } 1334 } else if (!n2rng_isfailed(n2rng)) { 1335 /* Schedule a retry if one is not already pending */ 1336 n2rng_config_retry(n2rng, RNG_CFG_RETRY_SECS); 1337 } 1338 return (DDI_SUCCESS); 1339 1340 errorexit: 1341 /* Unregister from kCF if we are registered */ 1342 (void) n2rng_unregister_provider(n2rng); 1343 n2rng_setfailed(n2rng); 1344 cmn_err(CE_WARN, "n2rng: hardware failure detected"); 1345 return (DDI_FAILURE); 1346 } 1347 1348 /* 1349 * n2rng_config_task() 1350 * 1351 * Call n2rng_config() from the task queue or after a timeout, ignore result. 1352 */ 1353 static void 1354 n2rng_config_task(void *targ) 1355 { 1356 n2rng_t *n2rng = (n2rng_t *)targ; 1357 1358 mutex_enter(&n2rng->n_lock); 1359 n2rng->n_timeout_id = 0; 1360 mutex_exit(&n2rng->n_lock); 1361 (void) n2rng_config(n2rng); 1362 } 1363