1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Public interface to routines implemented by CPU modules 29 */ 30 31 #include <sys/types.h> 32 #include <sys/atomic.h> 33 #include <sys/x86_archext.h> 34 #include <sys/cpu_module_impl.h> 35 #include <sys/cpu_module_ms.h> 36 #include <sys/fm/util.h> 37 #include <sys/reboot.h> 38 #include <sys/modctl.h> 39 #include <sys/param.h> 40 #include <sys/cmn_err.h> 41 #include <sys/systm.h> 42 #include <sys/fm/protocol.h> 43 #include <sys/pcb.h> 44 #include <sys/ontrap.h> 45 #include <sys/psw.h> 46 #include <sys/privregs.h> 47 #include <sys/machsystm.h> 48 49 /* 50 * Set to force cmi_init to fail. 51 */ 52 int cmi_no_init = 0; 53 54 /* 55 * Set to avoid MCA initialization. 56 */ 57 int cmi_no_mca_init = 0; 58 59 /* 60 * If cleared for debugging we will not attempt to load a model-specific 61 * cpu module but will load the generic cpu module instead. 62 */ 63 int cmi_force_generic = 0; 64 65 /* 66 * If cleared for debugging, we will suppress panicking on fatal hardware 67 * errors. This should *only* be used for debugging; it use can and will 68 * cause data corruption if actual hardware errors are detected by the system. 69 */ 70 int cmi_panic_on_uncorrectable_error = 1; 71 72 #ifndef __xpv 73 /* 74 * Set to indicate whether we are able to enable cmci interrupt. 75 */ 76 int cmi_enable_cmci = 0; 77 #endif 78 79 /* 80 * Subdirectory (relative to the module search path) in which we will 81 * look for cpu modules. 82 */ 83 #define CPUMOD_SUBDIR "cpu" 84 85 /* 86 * CPU modules have a filenames such as "cpu.AuthenticAMD.15" and 87 * "cpu.generic" - the "cpu" prefix is specified by the following. 88 */ 89 #define CPUMOD_PREFIX "cpu" 90 91 /* 92 * Structure used to keep track of cpu modules we have loaded and their ops 93 */ 94 typedef struct cmi { 95 struct cmi *cmi_next; 96 struct cmi *cmi_prev; 97 const cmi_ops_t *cmi_ops; 98 struct modctl *cmi_modp; 99 uint_t cmi_refcnt; 100 } cmi_t; 101 102 static cmi_t *cmi_list; 103 static kmutex_t cmi_load_lock; 104 105 /* 106 * Functions we need from cmi_hw.c that are not part of the cpu_module.h 107 * interface. 108 */ 109 extern cmi_hdl_t cmi_hdl_create(enum cmi_hdl_class, uint_t, uint_t, uint_t); 110 extern void cmi_hdl_setcmi(cmi_hdl_t, void *, void *); 111 extern void *cmi_hdl_getcmi(cmi_hdl_t); 112 extern void cmi_hdl_setmc(cmi_hdl_t, const struct cmi_mc_ops *, void *); 113 extern void cmi_hdl_inj_begin(cmi_hdl_t); 114 extern void cmi_hdl_inj_end(cmi_hdl_t); 115 extern void cmi_read_smbios(cmi_hdl_t); 116 117 #define HDL2CMI(hdl) cmi_hdl_getcmi(hdl) 118 119 #define CMI_OPS(cmi) (cmi)->cmi_ops 120 #define CMI_OP_PRESENT(cmi, op) ((cmi) && CMI_OPS(cmi)->op != NULL) 121 122 #define CMI_MATCH_VENDOR 0 /* Just match on vendor */ 123 #define CMI_MATCH_FAMILY 1 /* Match down to family */ 124 #define CMI_MATCH_MODEL 2 /* Match down to model */ 125 #define CMI_MATCH_STEPPING 3 /* Match down to stepping */ 126 127 static void 128 cmi_link(cmi_t *cmi) 129 { 130 ASSERT(MUTEX_HELD(&cmi_load_lock)); 131 132 cmi->cmi_prev = NULL; 133 cmi->cmi_next = cmi_list; 134 if (cmi_list != NULL) 135 cmi_list->cmi_prev = cmi; 136 cmi_list = cmi; 137 } 138 139 static void 140 cmi_unlink(cmi_t *cmi) 141 { 142 ASSERT(MUTEX_HELD(&cmi_load_lock)); 143 ASSERT(cmi->cmi_refcnt == 0); 144 145 if (cmi->cmi_prev != NULL) 146 cmi->cmi_prev = cmi->cmi_next; 147 148 if (cmi->cmi_next != NULL) 149 cmi->cmi_next->cmi_prev = cmi->cmi_prev; 150 151 if (cmi_list == cmi) 152 cmi_list = cmi->cmi_next; 153 } 154 155 /* 156 * Hold the module in memory. We call to CPU modules without using the 157 * stubs mechanism, so these modules must be manually held in memory. 158 * The mod_ref acts as if another loaded module has a dependency on us. 159 */ 160 static void 161 cmi_hold(cmi_t *cmi) 162 { 163 ASSERT(MUTEX_HELD(&cmi_load_lock)); 164 165 mutex_enter(&mod_lock); 166 cmi->cmi_modp->mod_ref++; 167 mutex_exit(&mod_lock); 168 cmi->cmi_refcnt++; 169 } 170 171 static void 172 cmi_rele(cmi_t *cmi) 173 { 174 ASSERT(MUTEX_HELD(&cmi_load_lock)); 175 176 mutex_enter(&mod_lock); 177 cmi->cmi_modp->mod_ref--; 178 mutex_exit(&mod_lock); 179 180 if (--cmi->cmi_refcnt == 0) { 181 cmi_unlink(cmi); 182 kmem_free(cmi, sizeof (cmi_t)); 183 } 184 } 185 186 static cmi_ops_t * 187 cmi_getops(modctl_t *modp) 188 { 189 cmi_ops_t *ops; 190 191 if ((ops = (cmi_ops_t *)modlookup_by_modctl(modp, "_cmi_ops")) == 192 NULL) { 193 cmn_err(CE_WARN, "cpu module '%s' is invalid: no _cmi_ops " 194 "found", modp->mod_modname); 195 return (NULL); 196 } 197 198 if (ops->cmi_init == NULL) { 199 cmn_err(CE_WARN, "cpu module '%s' is invalid: no cmi_init " 200 "entry point", modp->mod_modname); 201 return (NULL); 202 } 203 204 return (ops); 205 } 206 207 static cmi_t * 208 cmi_load_modctl(modctl_t *modp) 209 { 210 cmi_ops_t *ops; 211 uintptr_t ver; 212 cmi_t *cmi; 213 cmi_api_ver_t apiver; 214 215 ASSERT(MUTEX_HELD(&cmi_load_lock)); 216 217 for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 218 if (cmi->cmi_modp == modp) 219 return (cmi); 220 } 221 222 if ((ver = modlookup_by_modctl(modp, "_cmi_api_version")) == NULL) { 223 /* 224 * Apparently a cpu module before versioning was introduced - 225 * we call this version 0. 226 */ 227 apiver = CMI_API_VERSION_0; 228 } else { 229 apiver = *((cmi_api_ver_t *)ver); 230 if (!CMI_API_VERSION_CHKMAGIC(apiver)) { 231 cmn_err(CE_WARN, "cpu module '%s' is invalid: " 232 "_cmi_api_version 0x%x has bad magic", 233 modp->mod_modname, apiver); 234 return (NULL); 235 } 236 } 237 238 if (apiver != CMI_API_VERSION) { 239 cmn_err(CE_WARN, "cpu module '%s' has API version %d, " 240 "kernel requires API version %d", modp->mod_modname, 241 CMI_API_VERSION_TOPRINT(apiver), 242 CMI_API_VERSION_TOPRINT(CMI_API_VERSION)); 243 return (NULL); 244 } 245 246 if ((ops = cmi_getops(modp)) == NULL) 247 return (NULL); 248 249 cmi = kmem_zalloc(sizeof (*cmi), KM_SLEEP); 250 cmi->cmi_ops = ops; 251 cmi->cmi_modp = modp; 252 253 cmi_link(cmi); 254 255 return (cmi); 256 } 257 258 static int 259 cmi_cpu_match(cmi_hdl_t hdl1, cmi_hdl_t hdl2, int match) 260 { 261 if (match >= CMI_MATCH_VENDOR && 262 cmi_hdl_vendor(hdl1) != cmi_hdl_vendor(hdl2)) 263 return (0); 264 265 if (match >= CMI_MATCH_FAMILY && 266 cmi_hdl_family(hdl1) != cmi_hdl_family(hdl2)) 267 return (0); 268 269 if (match >= CMI_MATCH_MODEL && 270 cmi_hdl_model(hdl1) != cmi_hdl_model(hdl2)) 271 return (0); 272 273 if (match >= CMI_MATCH_STEPPING && 274 cmi_hdl_stepping(hdl1) != cmi_hdl_stepping(hdl2)) 275 return (0); 276 277 return (1); 278 } 279 280 static int 281 cmi_search_list_cb(cmi_hdl_t whdl, void *arg1, void *arg2, void *arg3) 282 { 283 cmi_hdl_t thdl = (cmi_hdl_t)arg1; 284 int match = *((int *)arg2); 285 cmi_hdl_t *rsltp = (cmi_hdl_t *)arg3; 286 287 if (cmi_cpu_match(thdl, whdl, match)) { 288 cmi_hdl_hold(whdl); /* short-term hold */ 289 *rsltp = whdl; 290 return (CMI_HDL_WALK_DONE); 291 } else { 292 return (CMI_HDL_WALK_NEXT); 293 } 294 } 295 296 static cmi_t * 297 cmi_search_list(cmi_hdl_t hdl, int match) 298 { 299 cmi_hdl_t dhdl = NULL; 300 cmi_t *cmi = NULL; 301 302 ASSERT(MUTEX_HELD(&cmi_load_lock)); 303 304 cmi_hdl_walk(cmi_search_list_cb, (void *)hdl, (void *)&match, &dhdl); 305 if (dhdl) { 306 cmi = HDL2CMI(dhdl); 307 cmi_hdl_rele(dhdl); /* held in cmi_search_list_cb */ 308 } 309 310 return (cmi); 311 } 312 313 static cmi_t * 314 cmi_load_module(cmi_hdl_t hdl, int match, int *chosenp) 315 { 316 modctl_t *modp; 317 cmi_t *cmi; 318 int modid; 319 uint_t s[3]; 320 321 ASSERT(MUTEX_HELD(&cmi_load_lock)); 322 ASSERT(match == CMI_MATCH_STEPPING || match == CMI_MATCH_MODEL || 323 match == CMI_MATCH_FAMILY || match == CMI_MATCH_VENDOR); 324 325 /* 326 * Have we already loaded a module for a cpu with the same 327 * vendor/family/model/stepping? 328 */ 329 if ((cmi = cmi_search_list(hdl, match)) != NULL) { 330 cmi_hold(cmi); 331 return (cmi); 332 } 333 334 s[0] = cmi_hdl_family(hdl); 335 s[1] = cmi_hdl_model(hdl); 336 s[2] = cmi_hdl_stepping(hdl); 337 modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 338 cmi_hdl_vendorstr(hdl), ".", s, match, chosenp); 339 340 if (modid == -1) 341 return (NULL); 342 343 modp = mod_hold_by_id(modid); 344 cmi = cmi_load_modctl(modp); 345 if (cmi) 346 cmi_hold(cmi); 347 mod_release_mod(modp); 348 349 return (cmi); 350 } 351 352 /* 353 * Try to load a cpu module with specific support for this chip type. 354 */ 355 static cmi_t * 356 cmi_load_specific(cmi_hdl_t hdl, void **datap) 357 { 358 cmi_t *cmi; 359 int err; 360 int i; 361 362 ASSERT(MUTEX_HELD(&cmi_load_lock)); 363 364 for (i = CMI_MATCH_STEPPING; i >= CMI_MATCH_VENDOR; i--) { 365 int suffixlevel; 366 367 if ((cmi = cmi_load_module(hdl, i, &suffixlevel)) == NULL) 368 return (NULL); 369 370 /* 371 * A module has loaded and has a _cmi_ops structure, and the 372 * module has been held for this instance. Call its cmi_init 373 * entry point - we expect success (0) or ENOTSUP. 374 */ 375 if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) == 0) { 376 if (boothowto & RB_VERBOSE) { 377 printf("initialized cpu module '%s' on " 378 "chip %d core %d strand %d\n", 379 cmi->cmi_modp->mod_modname, 380 cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 381 cmi_hdl_strandid(hdl)); 382 } 383 return (cmi); 384 } else if (err != ENOTSUP) { 385 cmn_err(CE_WARN, "failed to init cpu module '%s' on " 386 "chip %d core %d strand %d: err=%d\n", 387 cmi->cmi_modp->mod_modname, 388 cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 389 cmi_hdl_strandid(hdl), err); 390 } 391 392 /* 393 * The module failed or declined to init, so release 394 * it and update i to be equal to the number 395 * of suffices actually used in the last module path. 396 */ 397 cmi_rele(cmi); 398 i = suffixlevel; 399 } 400 401 return (NULL); 402 } 403 404 /* 405 * Load the generic IA32 MCA cpu module, which may still supplement 406 * itself with model-specific support through cpu model-specific modules. 407 */ 408 static cmi_t * 409 cmi_load_generic(cmi_hdl_t hdl, void **datap) 410 { 411 modctl_t *modp; 412 cmi_t *cmi; 413 int modid; 414 int err; 415 416 ASSERT(MUTEX_HELD(&cmi_load_lock)); 417 418 if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 419 return (NULL); 420 421 modp = mod_hold_by_id(modid); 422 cmi = cmi_load_modctl(modp); 423 if (cmi) 424 cmi_hold(cmi); 425 mod_release_mod(modp); 426 427 if (cmi == NULL) 428 return (NULL); 429 430 if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) != 0) { 431 if (err != ENOTSUP) 432 cmn_err(CE_WARN, CPUMOD_PREFIX ".generic failed to " 433 "init: err=%d", err); 434 cmi_rele(cmi); 435 return (NULL); 436 } 437 438 return (cmi); 439 } 440 441 cmi_hdl_t 442 cmi_init(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 443 uint_t strandid) 444 { 445 cmi_t *cmi = NULL; 446 cmi_hdl_t hdl; 447 void *data; 448 449 if (cmi_no_init) { 450 cmi_no_mca_init = 1; 451 return (NULL); 452 } 453 454 mutex_enter(&cmi_load_lock); 455 456 if ((hdl = cmi_hdl_create(class, chipid, coreid, strandid)) == NULL) { 457 mutex_exit(&cmi_load_lock); 458 cmn_err(CE_WARN, "There will be no MCA support on chip %d " 459 "core %d strand %d (cmi_hdl_create returned NULL)\n", 460 chipid, coreid, strandid); 461 return (NULL); 462 } 463 464 if (!cmi_force_generic) 465 cmi = cmi_load_specific(hdl, &data); 466 467 if (cmi == NULL && (cmi = cmi_load_generic(hdl, &data)) == NULL) { 468 cmn_err(CE_WARN, "There will be no MCA support on chip %d " 469 "core %d strand %d\n", chipid, coreid, strandid); 470 cmi_hdl_rele(hdl); 471 mutex_exit(&cmi_load_lock); 472 return (NULL); 473 } 474 475 cmi_hdl_setcmi(hdl, cmi, data); 476 477 cms_init(hdl); 478 479 cmi_read_smbios(hdl); 480 481 mutex_exit(&cmi_load_lock); 482 483 return (hdl); 484 } 485 486 /* 487 * cmi_fini is not called at the moment. It is intended to be called 488 * on DR deconfigure of a cpu resource. It should not be called at 489 * simple offline of a cpu. 490 */ 491 void 492 cmi_fini(cmi_hdl_t hdl) 493 { 494 cmi_t *cmi = HDL2CMI(hdl); 495 496 if (cms_present(hdl)) 497 cms_fini(hdl); 498 499 if (CMI_OP_PRESENT(cmi, cmi_fini)) 500 CMI_OPS(cmi)->cmi_fini(hdl); 501 502 cmi_hdl_rele(hdl); /* release hold obtained in cmi_hdl_create */ 503 } 504 505 /* 506 * cmi_post_startup is called from post_startup for the boot cpu only (no 507 * other cpus are started yet). 508 */ 509 void 510 cmi_post_startup(void) 511 { 512 cmi_hdl_t hdl; 513 cmi_t *cmi; 514 515 if (cmi_no_mca_init != 0 || 516 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 517 return; 518 519 cmi = HDL2CMI(hdl); 520 521 if (CMI_OP_PRESENT(cmi, cmi_post_startup)) 522 CMI_OPS(cmi)->cmi_post_startup(hdl); 523 524 cmi_hdl_rele(hdl); 525 } 526 527 /* 528 * Called just once from start_other_cpus when all processors are started. 529 * This will not be called for each cpu, so the registered op must not 530 * assume it is called as such. We are not necessarily executing on 531 * the boot cpu. 532 */ 533 void 534 cmi_post_mpstartup(void) 535 { 536 cmi_hdl_t hdl; 537 cmi_t *cmi; 538 539 if (cmi_no_mca_init != 0 || 540 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 541 return; 542 543 cmi = HDL2CMI(hdl); 544 545 if (CMI_OP_PRESENT(cmi, cmi_post_mpstartup)) 546 CMI_OPS(cmi)->cmi_post_mpstartup(hdl); 547 548 cmi_hdl_rele(hdl); 549 } 550 551 void 552 cmi_faulted_enter(cmi_hdl_t hdl) 553 { 554 cmi_t *cmi = HDL2CMI(hdl); 555 556 if (cmi_no_mca_init != 0) 557 return; 558 559 if (CMI_OP_PRESENT(cmi, cmi_faulted_enter)) 560 CMI_OPS(cmi)->cmi_faulted_enter(hdl); 561 } 562 563 void 564 cmi_faulted_exit(cmi_hdl_t hdl) 565 { 566 cmi_t *cmi = HDL2CMI(hdl); 567 568 if (cmi_no_mca_init != 0) 569 return; 570 571 if (CMI_OP_PRESENT(cmi, cmi_faulted_exit)) 572 CMI_OPS(cmi)->cmi_faulted_exit(hdl); 573 } 574 575 void 576 cmi_mca_init(cmi_hdl_t hdl) 577 { 578 cmi_t *cmi; 579 580 if (cmi_no_mca_init != 0) 581 return; 582 583 cmi = HDL2CMI(hdl); 584 585 if (CMI_OP_PRESENT(cmi, cmi_mca_init)) 586 CMI_OPS(cmi)->cmi_mca_init(hdl); 587 } 588 589 #define CMI_RESPONSE_PANIC 0x0 /* panic must have value 0 */ 590 #define CMI_RESPONSE_NONE 0x1 591 #define CMI_RESPONSE_CKILL 0x2 592 #define CMI_RESPONSE_REBOOT 0x3 /* not implemented */ 593 #define CMI_RESPONSE_ONTRAP_PROT 0x4 594 #define CMI_RESPONSE_LOFAULT_PROT 0x5 595 596 /* 597 * Return 0 if we will panic in response to this machine check, otherwise 598 * non-zero. If the caller is cmi_mca_trap in this file then the nonzero 599 * return values are to be interpreted from CMI_RESPONSE_* above. 600 * 601 * This function must just return what will be done without actually 602 * doing anything; this includes not changing the regs. 603 */ 604 int 605 cmi_mce_response(struct regs *rp, uint64_t disp) 606 { 607 int panicrsp = cmi_panic_on_uncorrectable_error ? CMI_RESPONSE_PANIC : 608 CMI_RESPONSE_NONE; 609 on_trap_data_t *otp; 610 611 ASSERT(rp != NULL); /* don't call for polling, only on #MC */ 612 613 /* 614 * If no bits are set in the disposition then there is nothing to 615 * worry about and we do not need to trampoline to ontrap or 616 * lofault handlers. 617 */ 618 if (disp == 0) 619 return (CMI_RESPONSE_NONE); 620 621 /* 622 * Unconstrained errors cannot be forgiven, even by ontrap or 623 * lofault protection. The data is not poisoned and may not 624 * even belong to the trapped context - eg a writeback of 625 * data that is found to be bad. 626 */ 627 if (disp & CMI_ERRDISP_UC_UNCONSTRAINED) 628 return (panicrsp); 629 630 /* 631 * ontrap OT_DATA_EC and lofault protection forgive any disposition 632 * other than unconstrained, even those normally forced fatal. 633 */ 634 if ((otp = curthread->t_ontrap) != NULL && otp->ot_prot & OT_DATA_EC) 635 return (CMI_RESPONSE_ONTRAP_PROT); 636 else if (curthread->t_lofault) 637 return (CMI_RESPONSE_LOFAULT_PROT); 638 639 /* 640 * Forced-fatal errors are terminal even in user mode. 641 */ 642 if (disp & CMI_ERRDISP_FORCEFATAL) 643 return (panicrsp); 644 645 /* 646 * If the trapped context is corrupt or we have no instruction pointer 647 * to resume at (and aren't trampolining to a fault handler) 648 * then in the kernel case we must panic and in usermode we 649 * kill the affected contract. 650 */ 651 if (disp & (CMI_ERRDISP_CURCTXBAD | CMI_ERRDISP_RIPV_INVALID)) 652 return (USERMODE(rp->r_cs) ? CMI_RESPONSE_CKILL : panicrsp); 653 654 /* 655 * Anything else is harmless 656 */ 657 return (CMI_RESPONSE_NONE); 658 } 659 660 int cma_mca_trap_panic_suppressed = 0; 661 662 static void 663 cmi_mca_panic(void) 664 { 665 if (cmi_panic_on_uncorrectable_error) { 666 fm_panic("Unrecoverable Machine-Check Exception"); 667 } else { 668 cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 669 cma_mca_trap_panic_suppressed++; 670 } 671 } 672 673 674 int cma_mca_trap_contract_kills = 0; 675 int cma_mca_trap_ontrap_forgiven = 0; 676 int cma_mca_trap_lofault_forgiven = 0; 677 678 /* 679 * Native #MC handler - we branch to here from mcetrap 680 */ 681 /*ARGSUSED*/ 682 void 683 cmi_mca_trap(struct regs *rp) 684 { 685 #ifndef __xpv 686 cmi_hdl_t hdl = NULL; 687 uint64_t disp; 688 cmi_t *cmi; 689 int s; 690 691 if (cmi_no_mca_init != 0) 692 return; 693 694 /* 695 * This function can call cmn_err, and the cpu module cmi_mca_trap 696 * entry point may also elect to call cmn_err (e.g., if it can't 697 * log the error onto an errorq, say very early in boot). 698 * We need to let cprintf know that we must not block. 699 */ 700 s = spl8(); 701 702 if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 703 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 704 (cmi = HDL2CMI(hdl)) == NULL || 705 !CMI_OP_PRESENT(cmi, cmi_mca_trap)) { 706 707 cmn_err(CE_WARN, "#MC exception on cpuid %d: %s", 708 CPU->cpu_id, 709 hdl ? "handle lookup ok but no #MC handler found" : 710 "handle lookup failed"); 711 712 if (hdl != NULL) 713 cmi_hdl_rele(hdl); 714 715 splx(s); 716 return; 717 } 718 719 disp = CMI_OPS(cmi)->cmi_mca_trap(hdl, rp); 720 721 switch (cmi_mce_response(rp, disp)) { 722 default: 723 cmn_err(CE_WARN, "Invalid response from cmi_mce_response"); 724 /*FALLTHRU*/ 725 726 case CMI_RESPONSE_PANIC: 727 cmi_mca_panic(); 728 break; 729 730 case CMI_RESPONSE_NONE: 731 break; 732 733 case CMI_RESPONSE_CKILL: 734 ttolwp(curthread)->lwp_pcb.pcb_flags |= ASYNC_HWERR; 735 aston(curthread); 736 cma_mca_trap_contract_kills++; 737 break; 738 739 case CMI_RESPONSE_ONTRAP_PROT: { 740 on_trap_data_t *otp = curthread->t_ontrap; 741 otp->ot_trap = OT_DATA_EC; 742 rp->r_pc = otp->ot_trampoline; 743 cma_mca_trap_ontrap_forgiven++; 744 break; 745 } 746 747 case CMI_RESPONSE_LOFAULT_PROT: 748 rp->r_r0 = EFAULT; 749 rp->r_pc = curthread->t_lofault; 750 cma_mca_trap_lofault_forgiven++; 751 break; 752 } 753 754 cmi_hdl_rele(hdl); 755 splx(s); 756 #endif /* __xpv */ 757 } 758 759 void 760 cmi_hdl_poke(cmi_hdl_t hdl) 761 { 762 cmi_t *cmi = HDL2CMI(hdl); 763 764 if (!CMI_OP_PRESENT(cmi, cmi_hdl_poke)) 765 return; 766 767 CMI_OPS(cmi)->cmi_hdl_poke(hdl); 768 } 769 770 #ifndef __xpv 771 void 772 cmi_cmci_trap() 773 { 774 cmi_hdl_t hdl = NULL; 775 cmi_t *cmi; 776 777 if (cmi_no_mca_init != 0) 778 return; 779 780 if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 781 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 782 (cmi = HDL2CMI(hdl)) == NULL || 783 !CMI_OP_PRESENT(cmi, cmi_cmci_trap)) { 784 785 cmn_err(CE_WARN, "CMCI interrupt on cpuid %d: %s", 786 CPU->cpu_id, 787 hdl ? "handle lookup ok but no CMCI handler found" : 788 "handle lookup failed"); 789 790 if (hdl != NULL) 791 cmi_hdl_rele(hdl); 792 793 return; 794 } 795 796 CMI_OPS(cmi)->cmi_cmci_trap(hdl); 797 798 cmi_hdl_rele(hdl); 799 } 800 #endif /* __xpv */ 801 802 void 803 cmi_mc_register(cmi_hdl_t hdl, const cmi_mc_ops_t *mcops, void *mcdata) 804 { 805 if (!cmi_no_mca_init) 806 cmi_hdl_setmc(hdl, mcops, mcdata); 807 } 808 809 void 810 cmi_mc_sw_memscrub_disable(void) 811 { 812 memscrub_disable(); 813 } 814 815 cmi_errno_t 816 cmi_mc_patounum(uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, uint32_t synd, 817 int syndtype, mc_unum_t *up) 818 { 819 const struct cmi_mc_ops *mcops; 820 cmi_hdl_t hdl; 821 cmi_errno_t rv; 822 823 if (cmi_no_mca_init || 824 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 825 return (CMIERR_MC_ABSENT); 826 827 if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 828 mcops->cmi_mc_patounum == NULL) { 829 cmi_hdl_rele(hdl); 830 return (CMIERR_MC_NOTSUP); 831 } 832 833 rv = mcops->cmi_mc_patounum(cmi_hdl_getmcdata(hdl), pa, valid_hi, 834 valid_lo, synd, syndtype, up); 835 836 cmi_hdl_rele(hdl); 837 838 return (rv); 839 } 840 841 cmi_errno_t 842 cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 843 { 844 const struct cmi_mc_ops *mcops; 845 cmi_hdl_t hdl; 846 cmi_errno_t rv; 847 nvlist_t *hcsp; 848 849 if (up != NULL && nvl != NULL) 850 return (CMIERR_API); /* convert from just one form */ 851 852 if (cmi_no_mca_init || 853 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 854 return (CMIERR_MC_ABSENT); 855 856 if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 857 mcops->cmi_mc_unumtopa == NULL) { 858 cmi_hdl_rele(hdl); 859 860 if (nvl != NULL && nvlist_lookup_nvlist(nvl, 861 FM_FMRI_HC_SPECIFIC, &hcsp) == 0 && 862 (nvlist_lookup_uint64(hcsp, 863 "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, pap) == 0 || 864 nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_PHYSADDR, 865 pap) == 0)) { 866 return (CMIERR_MC_PARTIALUNUMTOPA); 867 } else { 868 return (mcops && mcops->cmi_mc_unumtopa ? 869 CMIERR_MC_NOTSUP : CMIERR_MC_ABSENT); 870 } 871 } 872 873 rv = mcops->cmi_mc_unumtopa(cmi_hdl_getmcdata(hdl), up, nvl, pap); 874 875 cmi_hdl_rele(hdl); 876 877 return (rv); 878 } 879 880 void 881 cmi_mc_logout(cmi_hdl_t hdl, boolean_t ismc, boolean_t sync) 882 { 883 const struct cmi_mc_ops *mcops; 884 885 if (cmi_no_mca_init || (mcops = cmi_hdl_getmcops(hdl)) == NULL) 886 return; 887 888 if (mcops->cmi_mc_logout != NULL) 889 mcops->cmi_mc_logout(hdl, ismc, sync); 890 } 891 892 cmi_errno_t 893 cmi_hdl_msrinject(cmi_hdl_t hdl, cmi_mca_regs_t *regs, uint_t nregs, 894 int force) 895 { 896 cmi_t *cmi = cmi_hdl_getcmi(hdl); 897 cmi_errno_t rc; 898 899 if (!CMI_OP_PRESENT(cmi, cmi_msrinject)) 900 return (CMIERR_NOTSUP); 901 902 cmi_hdl_inj_begin(hdl); 903 rc = CMI_OPS(cmi)->cmi_msrinject(hdl, regs, nregs, force); 904 cmi_hdl_inj_end(hdl); 905 906 return (rc); 907 } 908 909 boolean_t 910 cmi_panic_on_ue(void) 911 { 912 return (cmi_panic_on_uncorrectable_error ? B_TRUE : B_FALSE); 913 } 914 915 void 916 cmi_panic_callback(void) 917 { 918 cmi_hdl_t hdl; 919 cmi_t *cmi; 920 921 if (cmi_no_mca_init || (hdl = cmi_hdl_any()) == NULL) 922 return; 923 924 cmi = cmi_hdl_getcmi(hdl); 925 if (CMI_OP_PRESENT(cmi, cmi_panic_callback)) 926 CMI_OPS(cmi)->cmi_panic_callback(); 927 928 cmi_hdl_rele(hdl); 929 } 930