1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Public interface to routines implemented by CPU modules 31 */ 32 33 #include <sys/types.h> 34 #include <sys/atomic.h> 35 #include <sys/x86_archext.h> 36 #include <sys/cpu_module_impl.h> 37 #include <sys/cpu_module_ms.h> 38 #include <sys/fm/util.h> 39 #include <sys/reboot.h> 40 #include <sys/modctl.h> 41 #include <sys/param.h> 42 #include <sys/cmn_err.h> 43 #include <sys/systm.h> 44 #include <sys/fm/protocol.h> 45 #include <sys/pcb.h> 46 #include <sys/ontrap.h> 47 #include <sys/psw.h> 48 #include <sys/privregs.h> 49 50 /* 51 * Set to force cmi_init to fail. 52 */ 53 int cmi_no_init = 0; 54 55 /* 56 * Set to avoid MCA initialization. 57 */ 58 int cmi_no_mca_init = 0; 59 60 /* 61 * If cleared for debugging we will not attempt to load a model-specific 62 * cpu module but will load the generic cpu module instead. 63 */ 64 int cmi_force_generic = 0; 65 66 /* 67 * If cleared for debugging, we will suppress panicking on fatal hardware 68 * errors. This should *only* be used for debugging; it use can and will 69 * cause data corruption if actual hardware errors are detected by the system. 70 */ 71 int cmi_panic_on_uncorrectable_error = 1; 72 73 /* 74 * Subdirectory (relative to the module search path) in which we will 75 * look for cpu modules. 76 */ 77 #define CPUMOD_SUBDIR "cpu" 78 79 /* 80 * CPU modules have a filenames such as "cpu.AuthenticAMD.15" and 81 * "cpu.generic" - the "cpu" prefix is specified by the following. 82 */ 83 #define CPUMOD_PREFIX "cpu" 84 85 /* 86 * Structure used to keep track of cpu modules we have loaded and their ops 87 */ 88 typedef struct cmi { 89 struct cmi *cmi_next; 90 struct cmi *cmi_prev; 91 const cmi_ops_t *cmi_ops; 92 struct modctl *cmi_modp; 93 uint_t cmi_refcnt; 94 } cmi_t; 95 96 static cmi_t *cmi_list; 97 static kmutex_t cmi_load_lock; 98 99 /* 100 * Functions we need from cmi_hw.c that are not part of the cpu_module.h 101 * interface. 102 */ 103 extern cmi_hdl_t cmi_hdl_create(enum cmi_hdl_class, uint_t, uint_t, uint_t); 104 extern void cmi_hdl_setcmi(cmi_hdl_t, void *, void *); 105 extern void *cmi_hdl_getcmi(cmi_hdl_t); 106 extern void cmi_hdl_setmc(cmi_hdl_t, const struct cmi_mc_ops *, void *); 107 108 #define HDL2CMI(hdl) cmi_hdl_getcmi(hdl) 109 110 #define CMI_OPS(cmi) (cmi)->cmi_ops 111 #define CMI_OP_PRESENT(cmi, op) ((cmi) && CMI_OPS(cmi)->op != NULL) 112 113 #define CMI_MATCH_VENDOR 0 /* Just match on vendor */ 114 #define CMI_MATCH_FAMILY 1 /* Match down to family */ 115 #define CMI_MATCH_MODEL 2 /* Match down to model */ 116 #define CMI_MATCH_STEPPING 3 /* Match down to stepping */ 117 118 static void 119 cmi_link(cmi_t *cmi) 120 { 121 ASSERT(MUTEX_HELD(&cmi_load_lock)); 122 123 cmi->cmi_prev = NULL; 124 cmi->cmi_next = cmi_list; 125 if (cmi_list != NULL) 126 cmi_list->cmi_prev = cmi; 127 cmi_list = cmi; 128 } 129 130 static void 131 cmi_unlink(cmi_t *cmi) 132 { 133 ASSERT(MUTEX_HELD(&cmi_load_lock)); 134 ASSERT(cmi->cmi_refcnt == 0); 135 136 if (cmi->cmi_prev != NULL) 137 cmi->cmi_prev = cmi->cmi_next; 138 139 if (cmi->cmi_next != NULL) 140 cmi->cmi_next->cmi_prev = cmi->cmi_prev; 141 142 if (cmi_list == cmi) 143 cmi_list = cmi->cmi_next; 144 } 145 146 /* 147 * Hold the module in memory. We call to CPU modules without using the 148 * stubs mechanism, so these modules must be manually held in memory. 149 * The mod_ref acts as if another loaded module has a dependency on us. 150 */ 151 static void 152 cmi_hold(cmi_t *cmi) 153 { 154 ASSERT(MUTEX_HELD(&cmi_load_lock)); 155 156 mutex_enter(&mod_lock); 157 cmi->cmi_modp->mod_ref++; 158 mutex_exit(&mod_lock); 159 cmi->cmi_refcnt++; 160 } 161 162 static void 163 cmi_rele(cmi_t *cmi) 164 { 165 ASSERT(MUTEX_HELD(&cmi_load_lock)); 166 167 mutex_enter(&mod_lock); 168 cmi->cmi_modp->mod_ref--; 169 mutex_exit(&mod_lock); 170 171 if (--cmi->cmi_refcnt == 0) { 172 cmi_unlink(cmi); 173 kmem_free(cmi, sizeof (cmi_t)); 174 } 175 } 176 177 static cmi_ops_t * 178 cmi_getops(modctl_t *modp) 179 { 180 cmi_ops_t *ops; 181 182 if ((ops = (cmi_ops_t *)modlookup_by_modctl(modp, "_cmi_ops")) == 183 NULL) { 184 cmn_err(CE_WARN, "cpu module '%s' is invalid: no _cmi_ops " 185 "found", modp->mod_modname); 186 return (NULL); 187 } 188 189 if (ops->cmi_init == NULL) { 190 cmn_err(CE_WARN, "cpu module '%s' is invalid: no cmi_init " 191 "entry point", modp->mod_modname); 192 return (NULL); 193 } 194 195 return (ops); 196 } 197 198 static cmi_t * 199 cmi_load_modctl(modctl_t *modp) 200 { 201 cmi_ops_t *ops; 202 uintptr_t ver; 203 cmi_t *cmi; 204 cmi_api_ver_t apiver; 205 206 ASSERT(MUTEX_HELD(&cmi_load_lock)); 207 208 for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 209 if (cmi->cmi_modp == modp) 210 return (cmi); 211 } 212 213 if ((ver = modlookup_by_modctl(modp, "_cmi_api_version")) == NULL) { 214 /* 215 * Apparently a cpu module before versioning was introduced - 216 * we call this version 0. 217 */ 218 apiver = CMI_API_VERSION_0; 219 } else { 220 apiver = *((cmi_api_ver_t *)ver); 221 if (!CMI_API_VERSION_CHKMAGIC(apiver)) { 222 cmn_err(CE_WARN, "cpu module '%s' is invalid: " 223 "_cmi_api_version 0x%x has bad magic", 224 modp->mod_modname, apiver); 225 return (NULL); 226 } 227 } 228 229 if (apiver != CMI_API_VERSION) { 230 cmn_err(CE_WARN, "cpu module '%s' has API version %d, " 231 "kernel requires API version %d", modp->mod_modname, 232 CMI_API_VERSION_TOPRINT(apiver), 233 CMI_API_VERSION_TOPRINT(CMI_API_VERSION)); 234 return (NULL); 235 } 236 237 if ((ops = cmi_getops(modp)) == NULL) 238 return (NULL); 239 240 cmi = kmem_zalloc(sizeof (*cmi), KM_SLEEP); 241 cmi->cmi_ops = ops; 242 cmi->cmi_modp = modp; 243 244 cmi_link(cmi); 245 246 return (cmi); 247 } 248 249 static int 250 cmi_cpu_match(cmi_hdl_t hdl1, cmi_hdl_t hdl2, int match) 251 { 252 if (match >= CMI_MATCH_VENDOR && 253 cmi_hdl_vendor(hdl1) != cmi_hdl_vendor(hdl2)) 254 return (0); 255 256 if (match >= CMI_MATCH_FAMILY && 257 cmi_hdl_family(hdl1) != cmi_hdl_family(hdl2)) 258 return (0); 259 260 if (match >= CMI_MATCH_MODEL && 261 cmi_hdl_model(hdl1) != cmi_hdl_model(hdl2)) 262 return (0); 263 264 if (match >= CMI_MATCH_STEPPING && 265 cmi_hdl_stepping(hdl1) != cmi_hdl_stepping(hdl2)) 266 return (0); 267 268 return (1); 269 } 270 271 static int 272 cmi_search_list_cb(cmi_hdl_t whdl, void *arg1, void *arg2, void *arg3) 273 { 274 cmi_hdl_t thdl = (cmi_hdl_t)arg1; 275 int match = *((int *)arg2); 276 cmi_hdl_t *rsltp = (cmi_hdl_t *)arg3; 277 278 if (cmi_cpu_match(thdl, whdl, match)) { 279 cmi_hdl_hold(whdl); /* short-term hold */ 280 *rsltp = whdl; 281 return (CMI_HDL_WALK_DONE); 282 } else { 283 return (CMI_HDL_WALK_NEXT); 284 } 285 } 286 287 static cmi_t * 288 cmi_search_list(cmi_hdl_t hdl, int match) 289 { 290 cmi_hdl_t dhdl = NULL; 291 cmi_t *cmi = NULL; 292 293 ASSERT(MUTEX_HELD(&cmi_load_lock)); 294 295 cmi_hdl_walk(cmi_search_list_cb, (void *)hdl, (void *)&match, &dhdl); 296 if (dhdl) { 297 cmi = HDL2CMI(dhdl); 298 cmi_hdl_rele(dhdl); /* held in cmi_search_list_cb */ 299 } 300 301 return (cmi); 302 } 303 304 static cmi_t * 305 cmi_load_module(cmi_hdl_t hdl, int match, int *chosenp) 306 { 307 modctl_t *modp; 308 cmi_t *cmi; 309 int modid; 310 uint_t s[3]; 311 312 ASSERT(MUTEX_HELD(&cmi_load_lock)); 313 ASSERT(match == CMI_MATCH_STEPPING || match == CMI_MATCH_MODEL || 314 match == CMI_MATCH_FAMILY || match == CMI_MATCH_VENDOR); 315 316 /* 317 * Have we already loaded a module for a cpu with the same 318 * vendor/family/model/stepping? 319 */ 320 if ((cmi = cmi_search_list(hdl, match)) != NULL) { 321 cmi_hold(cmi); 322 return (cmi); 323 } 324 325 s[0] = cmi_hdl_family(hdl); 326 s[1] = cmi_hdl_model(hdl); 327 s[2] = cmi_hdl_stepping(hdl); 328 modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 329 cmi_hdl_vendorstr(hdl), ".", s, match, chosenp); 330 331 if (modid == -1) 332 return (NULL); 333 334 modp = mod_hold_by_id(modid); 335 cmi = cmi_load_modctl(modp); 336 if (cmi) 337 cmi_hold(cmi); 338 mod_release_mod(modp); 339 340 return (cmi); 341 } 342 343 /* 344 * Try to load a cpu module with specific support for this chip type. 345 */ 346 static cmi_t * 347 cmi_load_specific(cmi_hdl_t hdl, void **datap) 348 { 349 cmi_t *cmi; 350 int err; 351 int i; 352 353 ASSERT(MUTEX_HELD(&cmi_load_lock)); 354 355 for (i = CMI_MATCH_STEPPING; i >= CMI_MATCH_VENDOR; i--) { 356 int suffixlevel; 357 358 if ((cmi = cmi_load_module(hdl, i, &suffixlevel)) == NULL) 359 return (NULL); 360 361 /* 362 * A module has loaded and has a _cmi_ops structure, and the 363 * module has been held for this instance. Call its cmi_init 364 * entry point - we expect success (0) or ENOTSUP. 365 */ 366 if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) == 0) { 367 if (boothowto & RB_VERBOSE) { 368 printf("initialized cpu module '%s' on " 369 "chip %d core %d strand %d\n", 370 cmi->cmi_modp->mod_modname, 371 cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 372 cmi_hdl_strandid(hdl)); 373 } 374 return (cmi); 375 } else if (err != ENOTSUP) { 376 cmn_err(CE_WARN, "failed to init cpu module '%s' on " 377 "chip %d core %d strand %d: err=%d\n", 378 cmi->cmi_modp->mod_modname, 379 cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 380 cmi_hdl_strandid(hdl), err); 381 } 382 383 /* 384 * The module failed or declined to init, so release 385 * it and update i to be equal to the number 386 * of suffices actually used in the last module path. 387 */ 388 cmi_rele(cmi); 389 i = suffixlevel; 390 } 391 392 return (NULL); 393 } 394 395 /* 396 * Load the generic IA32 MCA cpu module, which may still supplement 397 * itself with model-specific support through cpu model-specific modules. 398 */ 399 static cmi_t * 400 cmi_load_generic(cmi_hdl_t hdl, void **datap) 401 { 402 modctl_t *modp; 403 cmi_t *cmi; 404 int modid; 405 int err; 406 407 ASSERT(MUTEX_HELD(&cmi_load_lock)); 408 409 if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 410 return (NULL); 411 412 modp = mod_hold_by_id(modid); 413 cmi = cmi_load_modctl(modp); 414 if (cmi) 415 cmi_hold(cmi); 416 mod_release_mod(modp); 417 418 if (cmi == NULL) 419 return (NULL); 420 421 if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) != 0) { 422 if (err != ENOTSUP) 423 cmn_err(CE_WARN, CPUMOD_PREFIX ".generic failed to " 424 "init: err=%d", err); 425 cmi_rele(cmi); 426 return (NULL); 427 } 428 429 return (cmi); 430 } 431 432 cmi_hdl_t 433 cmi_init(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 434 uint_t strandid) 435 { 436 cmi_t *cmi = NULL; 437 cmi_hdl_t hdl; 438 void *data; 439 440 if (cmi_no_init) { 441 cmi_no_mca_init = 1; 442 return (NULL); 443 } 444 445 mutex_enter(&cmi_load_lock); 446 447 if ((hdl = cmi_hdl_create(class, chipid, coreid, strandid)) == NULL) { 448 mutex_exit(&cmi_load_lock); 449 cmn_err(CE_WARN, "There will be no MCA support on chip %d " 450 "core %d strand %d (cmi_hdl_create returned NULL)\n", 451 chipid, coreid, strandid); 452 return (NULL); 453 } 454 455 if (!cmi_force_generic) 456 cmi = cmi_load_specific(hdl, &data); 457 458 if (cmi == NULL && (cmi = cmi_load_generic(hdl, &data)) == NULL) { 459 cmn_err(CE_WARN, "There will be no MCA support on chip %d " 460 "core %d strand %d\n", chipid, coreid, strandid); 461 cmi_hdl_rele(hdl); 462 mutex_exit(&cmi_load_lock); 463 return (NULL); 464 } 465 466 cmi_hdl_setcmi(hdl, cmi, data); 467 468 cms_init(hdl); 469 470 mutex_exit(&cmi_load_lock); 471 472 return (hdl); 473 } 474 475 /* 476 * cmi_fini is not called at the moment. It is intended to be called 477 * on DR deconfigure of a cpu resource. It should not be called at 478 * simple offline of a cpu. 479 */ 480 void 481 cmi_fini(cmi_hdl_t hdl) 482 { 483 cmi_t *cmi = HDL2CMI(hdl); 484 485 if (cms_present(hdl)) 486 cms_fini(hdl); 487 488 if (CMI_OP_PRESENT(cmi, cmi_fini)) 489 CMI_OPS(cmi)->cmi_fini(hdl); 490 491 cmi_hdl_rele(hdl); /* release hold obtained in cmi_hdl_create */ 492 } 493 494 /* 495 * cmi_post_startup is called from post_startup for the boot cpu only. 496 */ 497 void 498 cmi_post_startup(void) 499 { 500 cmi_hdl_t hdl; 501 cmi_t *cmi; 502 503 if (cmi_no_mca_init != 0 || 504 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 505 return; 506 507 cmi = HDL2CMI(hdl); 508 509 if (CMI_OP_PRESENT(cmi, cmi_post_startup)) 510 CMI_OPS(cmi)->cmi_post_startup(hdl); 511 512 cmi_hdl_rele(hdl); 513 } 514 515 /* 516 * Called just once from start_other_cpus when all processors are started. 517 * This will not be called for each cpu, so the registered op must not 518 * assume it is called as such. 519 */ 520 void 521 cmi_post_mpstartup(void) 522 { 523 cmi_hdl_t hdl; 524 cmi_t *cmi; 525 526 if (cmi_no_mca_init != 0 || 527 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 528 return; 529 530 cmi = HDL2CMI(hdl); 531 532 if (CMI_OP_PRESENT(cmi, cmi_post_mpstartup)) 533 CMI_OPS(cmi)->cmi_post_mpstartup(hdl); 534 535 cmi_hdl_rele(hdl); 536 } 537 538 void 539 cmi_faulted_enter(cmi_hdl_t hdl) 540 { 541 cmi_t *cmi = HDL2CMI(hdl); 542 543 if (cmi_no_mca_init != 0) 544 return; 545 546 if (CMI_OP_PRESENT(cmi, cmi_faulted_enter)) 547 CMI_OPS(cmi)->cmi_faulted_enter(hdl); 548 } 549 550 void 551 cmi_faulted_exit(cmi_hdl_t hdl) 552 { 553 cmi_t *cmi = HDL2CMI(hdl); 554 555 if (cmi_no_mca_init != 0) 556 return; 557 558 if (CMI_OP_PRESENT(cmi, cmi_faulted_exit)) 559 CMI_OPS(cmi)->cmi_faulted_exit(hdl); 560 } 561 562 void 563 cmi_mca_init(cmi_hdl_t hdl) 564 { 565 cmi_t *cmi; 566 567 if (cmi_no_mca_init != 0) 568 return; 569 570 cmi = HDL2CMI(hdl); 571 572 if (CMI_OP_PRESENT(cmi, cmi_mca_init)) 573 CMI_OPS(cmi)->cmi_mca_init(hdl); 574 } 575 576 #define CMI_RESPONSE_PANIC 0x0 /* panic must have value 0 */ 577 #define CMI_RESPONSE_NONE 0x1 578 #define CMI_RESPONSE_CKILL 0x2 579 #define CMI_RESPONSE_REBOOT 0x3 /* not implemented */ 580 #define CMI_RESPONSE_ONTRAP_PROT 0x4 581 #define CMI_RESPONSE_LOFAULT_PROT 0x5 582 583 /* 584 * Return 0 if we will panic in response to this machine check, otherwise 585 * non-zero. If the caller is cmi_mca_trap in this file then the nonzero 586 * return values are to be interpreted from CMI_RESPONSE_* above. 587 * 588 * This function must just return what will be done without actually 589 * doing anything; this includes not changing the regs. 590 */ 591 int 592 cmi_mce_response(struct regs *rp, uint64_t disp) 593 { 594 int panicrsp = cmi_panic_on_uncorrectable_error ? CMI_RESPONSE_PANIC : 595 CMI_RESPONSE_NONE; 596 on_trap_data_t *otp; 597 598 ASSERT(rp != NULL); /* don't call for polling, only on #MC */ 599 600 /* 601 * If no bits are set in the disposition then there is nothing to 602 * worry about and we do not need to trampoline to ontrap or 603 * lofault handlers. 604 */ 605 if (disp == 0) 606 return (CMI_RESPONSE_NONE); 607 608 /* 609 * Unconstrained errors cannot be forgiven, even by ontrap or 610 * lofault protection. The data is not poisoned and may not 611 * even belong to the trapped context - eg a writeback of 612 * data that is found to be bad. 613 */ 614 if (disp & CMI_ERRDISP_UC_UNCONSTRAINED) 615 return (panicrsp); 616 617 /* 618 * ontrap OT_DATA_EC and lofault protection forgive any disposition 619 * other than unconstrained, even those normally forced fatal. 620 */ 621 if ((otp = curthread->t_ontrap) != NULL && otp->ot_prot & OT_DATA_EC) 622 return (CMI_RESPONSE_ONTRAP_PROT); 623 else if (curthread->t_lofault) 624 return (CMI_RESPONSE_LOFAULT_PROT); 625 626 /* 627 * Forced-fatal errors are terminal even in user mode. 628 */ 629 if (disp & CMI_ERRDISP_FORCEFATAL) 630 return (panicrsp); 631 632 /* 633 * If the trapped context is corrupt or we have no instruction pointer 634 * to resume at (and aren't trampolining to a fault handler) 635 * then in the kernel case we must panic and in usermode we 636 * kill the affected contract. 637 */ 638 if (disp & (CMI_ERRDISP_CURCTXBAD | CMI_ERRDISP_RIPV_INVALID)) 639 return (USERMODE(rp->r_cs) ? CMI_RESPONSE_CKILL : panicrsp); 640 641 /* 642 * Anything else is harmless 643 */ 644 return (CMI_RESPONSE_NONE); 645 } 646 647 int cma_mca_trap_panic_suppressed = 0; 648 649 static void 650 cmi_mca_panic(void) 651 { 652 if (cmi_panic_on_uncorrectable_error) { 653 fm_panic("Unrecoverable Machine-Check Exception"); 654 } else { 655 cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 656 cma_mca_trap_panic_suppressed++; 657 } 658 } 659 660 661 int cma_mca_trap_contract_kills = 0; 662 int cma_mca_trap_ontrap_forgiven = 0; 663 int cma_mca_trap_lofault_forgiven = 0; 664 665 /* 666 * Native #MC handler - we branch to here from mcetrap 667 */ 668 /*ARGSUSED*/ 669 void 670 cmi_mca_trap(struct regs *rp) 671 { 672 #ifndef __xpv 673 cmi_hdl_t hdl = NULL; 674 uint64_t disp; 675 cmi_t *cmi; 676 int s; 677 678 if (cmi_no_mca_init != 0) 679 return; 680 681 /* 682 * This function can call cmn_err, and the cpu module cmi_mca_trap 683 * entry point may also elect to call cmn_err (e.g., if it can't 684 * log the error onto an errorq, say very early in boot). 685 * We need to let cprintf know that we must not block. 686 */ 687 s = spl8(); 688 689 if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 690 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 691 (cmi = HDL2CMI(hdl)) == NULL || 692 !CMI_OP_PRESENT(cmi, cmi_mca_trap)) { 693 694 cmn_err(CE_WARN, "#MC exception on cpuid %d: %s", 695 CPU->cpu_id, 696 hdl ? "handle lookup ok but no #MC handler found" : 697 "handle lookup failed"); 698 699 if (hdl != NULL) 700 cmi_hdl_rele(hdl); 701 702 splx(s); 703 return; 704 } 705 706 disp = CMI_OPS(cmi)->cmi_mca_trap(hdl, rp); 707 708 switch (cmi_mce_response(rp, disp)) { 709 default: 710 cmn_err(CE_WARN, "Invalid response from cmi_mce_response"); 711 /*FALLTHRU*/ 712 713 case CMI_RESPONSE_PANIC: 714 cmi_mca_panic(); 715 break; 716 717 case CMI_RESPONSE_NONE: 718 break; 719 720 case CMI_RESPONSE_CKILL: 721 ttolwp(curthread)->lwp_pcb.pcb_flags |= ASYNC_HWERR; 722 aston(curthread); 723 cma_mca_trap_contract_kills++; 724 break; 725 726 case CMI_RESPONSE_ONTRAP_PROT: { 727 on_trap_data_t *otp = curthread->t_ontrap; 728 otp->ot_trap = OT_DATA_EC; 729 rp->r_pc = otp->ot_trampoline; 730 cma_mca_trap_ontrap_forgiven++; 731 break; 732 } 733 734 case CMI_RESPONSE_LOFAULT_PROT: 735 rp->r_r0 = EFAULT; 736 rp->r_pc = curthread->t_lofault; 737 cma_mca_trap_lofault_forgiven++; 738 break; 739 } 740 741 cmi_hdl_rele(hdl); 742 splx(s); 743 #endif /* __xpv */ 744 } 745 746 void 747 cmi_hdl_poke(cmi_hdl_t hdl) 748 { 749 cmi_t *cmi = HDL2CMI(hdl); 750 751 if (!CMI_OP_PRESENT(cmi, cmi_hdl_poke)) 752 return; 753 754 CMI_OPS(cmi)->cmi_hdl_poke(hdl); 755 } 756 757 void 758 cmi_mc_register(cmi_hdl_t hdl, const cmi_mc_ops_t *mcops, void *mcdata) 759 { 760 if (!cmi_no_mca_init) 761 cmi_hdl_setmc(hdl, mcops, mcdata); 762 } 763 764 cmi_errno_t 765 cmi_mc_patounum(uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, uint32_t synd, 766 int syndtype, mc_unum_t *up) 767 { 768 const struct cmi_mc_ops *mcops; 769 cmi_hdl_t hdl; 770 cmi_errno_t rv; 771 772 if (cmi_no_mca_init || 773 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 774 return (CMIERR_MC_ABSENT); 775 776 if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 777 mcops->cmi_mc_patounum == NULL) { 778 cmi_hdl_rele(hdl); 779 return (CMIERR_MC_NOTSUP); 780 } 781 782 rv = mcops->cmi_mc_patounum(cmi_hdl_getmcdata(hdl), pa, valid_hi, 783 valid_lo, synd, syndtype, up); 784 785 cmi_hdl_rele(hdl); 786 787 return (rv); 788 } 789 790 cmi_errno_t 791 cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 792 { 793 const struct cmi_mc_ops *mcops; 794 cmi_hdl_t hdl; 795 cmi_errno_t rv; 796 797 if (up != NULL && nvl != NULL) 798 return (CMIERR_API); /* convert from just one form */ 799 800 if (cmi_no_mca_init || 801 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 802 return (CMIERR_MC_ABSENT); 803 804 if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 805 mcops->cmi_mc_unumtopa == NULL) { 806 cmi_hdl_rele(hdl); 807 808 if (nvl != NULL && nvlist_lookup_uint64(nvl, 809 FM_FMRI_MEM_PHYSADDR, pap) == 0) { 810 return (CMIERR_MC_PARTIALUNUMTOPA); 811 } else { 812 return (mcops && mcops->cmi_mc_unumtopa ? 813 CMIERR_MC_NOTSUP : CMIERR_MC_ABSENT); 814 } 815 } 816 817 rv = mcops->cmi_mc_unumtopa(cmi_hdl_getmcdata(hdl), up, nvl, pap); 818 819 cmi_hdl_rele(hdl); 820 821 return (rv); 822 } 823 824 void 825 cmi_mc_logout(cmi_hdl_t hdl, boolean_t ismc, boolean_t sync) 826 { 827 const struct cmi_mc_ops *mcops; 828 829 if (cmi_no_mca_init || (mcops = cmi_hdl_getmcops(hdl)) == NULL) 830 return; 831 832 if (mcops->cmi_mc_logout != NULL) 833 mcops->cmi_mc_logout(hdl, ismc, sync); 834 } 835 836 cmi_errno_t 837 cmi_hdl_msrinject(cmi_hdl_t hdl, cmi_mca_regs_t *regs, uint_t nregs, 838 int force) 839 { 840 cmi_t *cmi = cmi_hdl_getcmi(hdl); 841 842 if (!CMI_OP_PRESENT(cmi, cmi_msrinject)) 843 return (CMIERR_NOTSUP); 844 845 return (CMI_OPS(cmi)->cmi_msrinject(hdl, regs, nregs, force)); 846 } 847 848 boolean_t 849 cmi_panic_on_ue(void) 850 { 851 return (cmi_panic_on_uncorrectable_error ? B_TRUE : B_FALSE); 852 } 853