1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Public interface to routines implemented by CPU modules 29 */ 30 31 #include <sys/types.h> 32 #include <sys/atomic.h> 33 #include <sys/x86_archext.h> 34 #include <sys/cpu_module_impl.h> 35 #include <sys/cpu_module_ms.h> 36 #include <sys/fm/util.h> 37 #include <sys/reboot.h> 38 #include <sys/modctl.h> 39 #include <sys/param.h> 40 #include <sys/cmn_err.h> 41 #include <sys/systm.h> 42 #include <sys/fm/protocol.h> 43 #include <sys/pcb.h> 44 #include <sys/ontrap.h> 45 #include <sys/psw.h> 46 #include <sys/privregs.h> 47 #include <sys/machsystm.h> 48 49 /* 50 * Set to force cmi_init to fail. 51 */ 52 int cmi_no_init = 0; 53 54 /* 55 * Set to avoid MCA initialization. 56 */ 57 int cmi_no_mca_init = 0; 58 59 /* 60 * If cleared for debugging we will not attempt to load a model-specific 61 * cpu module but will load the generic cpu module instead. 62 */ 63 int cmi_force_generic = 0; 64 65 /* 66 * If cleared for debugging, we will suppress panicking on fatal hardware 67 * errors. This should *only* be used for debugging; it use can and will 68 * cause data corruption if actual hardware errors are detected by the system. 69 */ 70 int cmi_panic_on_uncorrectable_error = 1; 71 72 #ifndef __xpv 73 /* 74 * Set to indicate whether we are able to enable cmci interrupt. 75 */ 76 int cmi_enable_cmci = 0; 77 #endif 78 79 /* 80 * Subdirectory (relative to the module search path) in which we will 81 * look for cpu modules. 82 */ 83 #define CPUMOD_SUBDIR "cpu" 84 85 /* 86 * CPU modules have a filenames such as "cpu.AuthenticAMD.15" and 87 * "cpu.generic" - the "cpu" prefix is specified by the following. 88 */ 89 #define CPUMOD_PREFIX "cpu" 90 91 /* 92 * Structure used to keep track of cpu modules we have loaded and their ops 93 */ 94 typedef struct cmi { 95 struct cmi *cmi_next; 96 struct cmi *cmi_prev; 97 const cmi_ops_t *cmi_ops; 98 struct modctl *cmi_modp; 99 uint_t cmi_refcnt; 100 } cmi_t; 101 102 static cmi_t *cmi_list; 103 static kmutex_t cmi_load_lock; 104 105 /* 106 * Functions we need from cmi_hw.c that are not part of the cpu_module.h 107 * interface. 108 */ 109 extern cmi_hdl_t cmi_hdl_create(enum cmi_hdl_class, uint_t, uint_t, uint_t); 110 extern void cmi_hdl_setcmi(cmi_hdl_t, void *, void *); 111 extern void *cmi_hdl_getcmi(cmi_hdl_t); 112 extern void cmi_hdl_setmc(cmi_hdl_t, const struct cmi_mc_ops *, void *); 113 extern void cmi_hdl_inj_begin(cmi_hdl_t); 114 extern void cmi_hdl_inj_end(cmi_hdl_t); 115 116 #define HDL2CMI(hdl) cmi_hdl_getcmi(hdl) 117 118 #define CMI_OPS(cmi) (cmi)->cmi_ops 119 #define CMI_OP_PRESENT(cmi, op) ((cmi) && CMI_OPS(cmi)->op != NULL) 120 121 #define CMI_MATCH_VENDOR 0 /* Just match on vendor */ 122 #define CMI_MATCH_FAMILY 1 /* Match down to family */ 123 #define CMI_MATCH_MODEL 2 /* Match down to model */ 124 #define CMI_MATCH_STEPPING 3 /* Match down to stepping */ 125 126 static void 127 cmi_link(cmi_t *cmi) 128 { 129 ASSERT(MUTEX_HELD(&cmi_load_lock)); 130 131 cmi->cmi_prev = NULL; 132 cmi->cmi_next = cmi_list; 133 if (cmi_list != NULL) 134 cmi_list->cmi_prev = cmi; 135 cmi_list = cmi; 136 } 137 138 static void 139 cmi_unlink(cmi_t *cmi) 140 { 141 ASSERT(MUTEX_HELD(&cmi_load_lock)); 142 ASSERT(cmi->cmi_refcnt == 0); 143 144 if (cmi->cmi_prev != NULL) 145 cmi->cmi_prev = cmi->cmi_next; 146 147 if (cmi->cmi_next != NULL) 148 cmi->cmi_next->cmi_prev = cmi->cmi_prev; 149 150 if (cmi_list == cmi) 151 cmi_list = cmi->cmi_next; 152 } 153 154 /* 155 * Hold the module in memory. We call to CPU modules without using the 156 * stubs mechanism, so these modules must be manually held in memory. 157 * The mod_ref acts as if another loaded module has a dependency on us. 158 */ 159 static void 160 cmi_hold(cmi_t *cmi) 161 { 162 ASSERT(MUTEX_HELD(&cmi_load_lock)); 163 164 mutex_enter(&mod_lock); 165 cmi->cmi_modp->mod_ref++; 166 mutex_exit(&mod_lock); 167 cmi->cmi_refcnt++; 168 } 169 170 static void 171 cmi_rele(cmi_t *cmi) 172 { 173 ASSERT(MUTEX_HELD(&cmi_load_lock)); 174 175 mutex_enter(&mod_lock); 176 cmi->cmi_modp->mod_ref--; 177 mutex_exit(&mod_lock); 178 179 if (--cmi->cmi_refcnt == 0) { 180 cmi_unlink(cmi); 181 kmem_free(cmi, sizeof (cmi_t)); 182 } 183 } 184 185 static cmi_ops_t * 186 cmi_getops(modctl_t *modp) 187 { 188 cmi_ops_t *ops; 189 190 if ((ops = (cmi_ops_t *)modlookup_by_modctl(modp, "_cmi_ops")) == 191 NULL) { 192 cmn_err(CE_WARN, "cpu module '%s' is invalid: no _cmi_ops " 193 "found", modp->mod_modname); 194 return (NULL); 195 } 196 197 if (ops->cmi_init == NULL) { 198 cmn_err(CE_WARN, "cpu module '%s' is invalid: no cmi_init " 199 "entry point", modp->mod_modname); 200 return (NULL); 201 } 202 203 return (ops); 204 } 205 206 static cmi_t * 207 cmi_load_modctl(modctl_t *modp) 208 { 209 cmi_ops_t *ops; 210 uintptr_t ver; 211 cmi_t *cmi; 212 cmi_api_ver_t apiver; 213 214 ASSERT(MUTEX_HELD(&cmi_load_lock)); 215 216 for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 217 if (cmi->cmi_modp == modp) 218 return (cmi); 219 } 220 221 if ((ver = modlookup_by_modctl(modp, "_cmi_api_version")) == NULL) { 222 /* 223 * Apparently a cpu module before versioning was introduced - 224 * we call this version 0. 225 */ 226 apiver = CMI_API_VERSION_0; 227 } else { 228 apiver = *((cmi_api_ver_t *)ver); 229 if (!CMI_API_VERSION_CHKMAGIC(apiver)) { 230 cmn_err(CE_WARN, "cpu module '%s' is invalid: " 231 "_cmi_api_version 0x%x has bad magic", 232 modp->mod_modname, apiver); 233 return (NULL); 234 } 235 } 236 237 if (apiver != CMI_API_VERSION) { 238 cmn_err(CE_WARN, "cpu module '%s' has API version %d, " 239 "kernel requires API version %d", modp->mod_modname, 240 CMI_API_VERSION_TOPRINT(apiver), 241 CMI_API_VERSION_TOPRINT(CMI_API_VERSION)); 242 return (NULL); 243 } 244 245 if ((ops = cmi_getops(modp)) == NULL) 246 return (NULL); 247 248 cmi = kmem_zalloc(sizeof (*cmi), KM_SLEEP); 249 cmi->cmi_ops = ops; 250 cmi->cmi_modp = modp; 251 252 cmi_link(cmi); 253 254 return (cmi); 255 } 256 257 static int 258 cmi_cpu_match(cmi_hdl_t hdl1, cmi_hdl_t hdl2, int match) 259 { 260 if (match >= CMI_MATCH_VENDOR && 261 cmi_hdl_vendor(hdl1) != cmi_hdl_vendor(hdl2)) 262 return (0); 263 264 if (match >= CMI_MATCH_FAMILY && 265 cmi_hdl_family(hdl1) != cmi_hdl_family(hdl2)) 266 return (0); 267 268 if (match >= CMI_MATCH_MODEL && 269 cmi_hdl_model(hdl1) != cmi_hdl_model(hdl2)) 270 return (0); 271 272 if (match >= CMI_MATCH_STEPPING && 273 cmi_hdl_stepping(hdl1) != cmi_hdl_stepping(hdl2)) 274 return (0); 275 276 return (1); 277 } 278 279 static int 280 cmi_search_list_cb(cmi_hdl_t whdl, void *arg1, void *arg2, void *arg3) 281 { 282 cmi_hdl_t thdl = (cmi_hdl_t)arg1; 283 int match = *((int *)arg2); 284 cmi_hdl_t *rsltp = (cmi_hdl_t *)arg3; 285 286 if (cmi_cpu_match(thdl, whdl, match)) { 287 cmi_hdl_hold(whdl); /* short-term hold */ 288 *rsltp = whdl; 289 return (CMI_HDL_WALK_DONE); 290 } else { 291 return (CMI_HDL_WALK_NEXT); 292 } 293 } 294 295 static cmi_t * 296 cmi_search_list(cmi_hdl_t hdl, int match) 297 { 298 cmi_hdl_t dhdl = NULL; 299 cmi_t *cmi = NULL; 300 301 ASSERT(MUTEX_HELD(&cmi_load_lock)); 302 303 cmi_hdl_walk(cmi_search_list_cb, (void *)hdl, (void *)&match, &dhdl); 304 if (dhdl) { 305 cmi = HDL2CMI(dhdl); 306 cmi_hdl_rele(dhdl); /* held in cmi_search_list_cb */ 307 } 308 309 return (cmi); 310 } 311 312 static cmi_t * 313 cmi_load_module(cmi_hdl_t hdl, int match, int *chosenp) 314 { 315 modctl_t *modp; 316 cmi_t *cmi; 317 int modid; 318 uint_t s[3]; 319 320 ASSERT(MUTEX_HELD(&cmi_load_lock)); 321 ASSERT(match == CMI_MATCH_STEPPING || match == CMI_MATCH_MODEL || 322 match == CMI_MATCH_FAMILY || match == CMI_MATCH_VENDOR); 323 324 /* 325 * Have we already loaded a module for a cpu with the same 326 * vendor/family/model/stepping? 327 */ 328 if ((cmi = cmi_search_list(hdl, match)) != NULL) { 329 cmi_hold(cmi); 330 return (cmi); 331 } 332 333 s[0] = cmi_hdl_family(hdl); 334 s[1] = cmi_hdl_model(hdl); 335 s[2] = cmi_hdl_stepping(hdl); 336 modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 337 cmi_hdl_vendorstr(hdl), ".", s, match, chosenp); 338 339 if (modid == -1) 340 return (NULL); 341 342 modp = mod_hold_by_id(modid); 343 cmi = cmi_load_modctl(modp); 344 if (cmi) 345 cmi_hold(cmi); 346 mod_release_mod(modp); 347 348 return (cmi); 349 } 350 351 /* 352 * Try to load a cpu module with specific support for this chip type. 353 */ 354 static cmi_t * 355 cmi_load_specific(cmi_hdl_t hdl, void **datap) 356 { 357 cmi_t *cmi; 358 int err; 359 int i; 360 361 ASSERT(MUTEX_HELD(&cmi_load_lock)); 362 363 for (i = CMI_MATCH_STEPPING; i >= CMI_MATCH_VENDOR; i--) { 364 int suffixlevel; 365 366 if ((cmi = cmi_load_module(hdl, i, &suffixlevel)) == NULL) 367 return (NULL); 368 369 /* 370 * A module has loaded and has a _cmi_ops structure, and the 371 * module has been held for this instance. Call its cmi_init 372 * entry point - we expect success (0) or ENOTSUP. 373 */ 374 if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) == 0) { 375 if (boothowto & RB_VERBOSE) { 376 printf("initialized cpu module '%s' on " 377 "chip %d core %d strand %d\n", 378 cmi->cmi_modp->mod_modname, 379 cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 380 cmi_hdl_strandid(hdl)); 381 } 382 return (cmi); 383 } else if (err != ENOTSUP) { 384 cmn_err(CE_WARN, "failed to init cpu module '%s' on " 385 "chip %d core %d strand %d: err=%d\n", 386 cmi->cmi_modp->mod_modname, 387 cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 388 cmi_hdl_strandid(hdl), err); 389 } 390 391 /* 392 * The module failed or declined to init, so release 393 * it and update i to be equal to the number 394 * of suffices actually used in the last module path. 395 */ 396 cmi_rele(cmi); 397 i = suffixlevel; 398 } 399 400 return (NULL); 401 } 402 403 /* 404 * Load the generic IA32 MCA cpu module, which may still supplement 405 * itself with model-specific support through cpu model-specific modules. 406 */ 407 static cmi_t * 408 cmi_load_generic(cmi_hdl_t hdl, void **datap) 409 { 410 modctl_t *modp; 411 cmi_t *cmi; 412 int modid; 413 int err; 414 415 ASSERT(MUTEX_HELD(&cmi_load_lock)); 416 417 if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 418 return (NULL); 419 420 modp = mod_hold_by_id(modid); 421 cmi = cmi_load_modctl(modp); 422 if (cmi) 423 cmi_hold(cmi); 424 mod_release_mod(modp); 425 426 if (cmi == NULL) 427 return (NULL); 428 429 if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) != 0) { 430 if (err != ENOTSUP) 431 cmn_err(CE_WARN, CPUMOD_PREFIX ".generic failed to " 432 "init: err=%d", err); 433 cmi_rele(cmi); 434 return (NULL); 435 } 436 437 return (cmi); 438 } 439 440 cmi_hdl_t 441 cmi_init(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 442 uint_t strandid) 443 { 444 cmi_t *cmi = NULL; 445 cmi_hdl_t hdl; 446 void *data; 447 448 if (cmi_no_init) { 449 cmi_no_mca_init = 1; 450 return (NULL); 451 } 452 453 mutex_enter(&cmi_load_lock); 454 455 if ((hdl = cmi_hdl_create(class, chipid, coreid, strandid)) == NULL) { 456 mutex_exit(&cmi_load_lock); 457 cmn_err(CE_WARN, "There will be no MCA support on chip %d " 458 "core %d strand %d (cmi_hdl_create returned NULL)\n", 459 chipid, coreid, strandid); 460 return (NULL); 461 } 462 463 if (!cmi_force_generic) 464 cmi = cmi_load_specific(hdl, &data); 465 466 if (cmi == NULL && (cmi = cmi_load_generic(hdl, &data)) == NULL) { 467 cmn_err(CE_WARN, "There will be no MCA support on chip %d " 468 "core %d strand %d\n", chipid, coreid, strandid); 469 cmi_hdl_rele(hdl); 470 mutex_exit(&cmi_load_lock); 471 return (NULL); 472 } 473 474 cmi_hdl_setcmi(hdl, cmi, data); 475 476 cms_init(hdl); 477 478 mutex_exit(&cmi_load_lock); 479 480 return (hdl); 481 } 482 483 /* 484 * cmi_fini is not called at the moment. It is intended to be called 485 * on DR deconfigure of a cpu resource. It should not be called at 486 * simple offline of a cpu. 487 */ 488 void 489 cmi_fini(cmi_hdl_t hdl) 490 { 491 cmi_t *cmi = HDL2CMI(hdl); 492 493 if (cms_present(hdl)) 494 cms_fini(hdl); 495 496 if (CMI_OP_PRESENT(cmi, cmi_fini)) 497 CMI_OPS(cmi)->cmi_fini(hdl); 498 499 cmi_hdl_rele(hdl); /* release hold obtained in cmi_hdl_create */ 500 } 501 502 /* 503 * cmi_post_startup is called from post_startup for the boot cpu only (no 504 * other cpus are started yet). 505 */ 506 void 507 cmi_post_startup(void) 508 { 509 cmi_hdl_t hdl; 510 cmi_t *cmi; 511 512 if (cmi_no_mca_init != 0 || 513 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 514 return; 515 516 cmi = HDL2CMI(hdl); 517 518 if (CMI_OP_PRESENT(cmi, cmi_post_startup)) 519 CMI_OPS(cmi)->cmi_post_startup(hdl); 520 521 cmi_hdl_rele(hdl); 522 } 523 524 /* 525 * Called just once from start_other_cpus when all processors are started. 526 * This will not be called for each cpu, so the registered op must not 527 * assume it is called as such. We are not necessarily executing on 528 * the boot cpu. 529 */ 530 void 531 cmi_post_mpstartup(void) 532 { 533 cmi_hdl_t hdl; 534 cmi_t *cmi; 535 536 if (cmi_no_mca_init != 0 || 537 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 538 return; 539 540 cmi = HDL2CMI(hdl); 541 542 if (CMI_OP_PRESENT(cmi, cmi_post_mpstartup)) 543 CMI_OPS(cmi)->cmi_post_mpstartup(hdl); 544 545 cmi_hdl_rele(hdl); 546 } 547 548 void 549 cmi_faulted_enter(cmi_hdl_t hdl) 550 { 551 cmi_t *cmi = HDL2CMI(hdl); 552 553 if (cmi_no_mca_init != 0) 554 return; 555 556 if (CMI_OP_PRESENT(cmi, cmi_faulted_enter)) 557 CMI_OPS(cmi)->cmi_faulted_enter(hdl); 558 } 559 560 void 561 cmi_faulted_exit(cmi_hdl_t hdl) 562 { 563 cmi_t *cmi = HDL2CMI(hdl); 564 565 if (cmi_no_mca_init != 0) 566 return; 567 568 if (CMI_OP_PRESENT(cmi, cmi_faulted_exit)) 569 CMI_OPS(cmi)->cmi_faulted_exit(hdl); 570 } 571 572 void 573 cmi_mca_init(cmi_hdl_t hdl) 574 { 575 cmi_t *cmi; 576 577 if (cmi_no_mca_init != 0) 578 return; 579 580 cmi = HDL2CMI(hdl); 581 582 if (CMI_OP_PRESENT(cmi, cmi_mca_init)) 583 CMI_OPS(cmi)->cmi_mca_init(hdl); 584 } 585 586 #define CMI_RESPONSE_PANIC 0x0 /* panic must have value 0 */ 587 #define CMI_RESPONSE_NONE 0x1 588 #define CMI_RESPONSE_CKILL 0x2 589 #define CMI_RESPONSE_REBOOT 0x3 /* not implemented */ 590 #define CMI_RESPONSE_ONTRAP_PROT 0x4 591 #define CMI_RESPONSE_LOFAULT_PROT 0x5 592 593 /* 594 * Return 0 if we will panic in response to this machine check, otherwise 595 * non-zero. If the caller is cmi_mca_trap in this file then the nonzero 596 * return values are to be interpreted from CMI_RESPONSE_* above. 597 * 598 * This function must just return what will be done without actually 599 * doing anything; this includes not changing the regs. 600 */ 601 int 602 cmi_mce_response(struct regs *rp, uint64_t disp) 603 { 604 int panicrsp = cmi_panic_on_uncorrectable_error ? CMI_RESPONSE_PANIC : 605 CMI_RESPONSE_NONE; 606 on_trap_data_t *otp; 607 608 ASSERT(rp != NULL); /* don't call for polling, only on #MC */ 609 610 /* 611 * If no bits are set in the disposition then there is nothing to 612 * worry about and we do not need to trampoline to ontrap or 613 * lofault handlers. 614 */ 615 if (disp == 0) 616 return (CMI_RESPONSE_NONE); 617 618 /* 619 * Unconstrained errors cannot be forgiven, even by ontrap or 620 * lofault protection. The data is not poisoned and may not 621 * even belong to the trapped context - eg a writeback of 622 * data that is found to be bad. 623 */ 624 if (disp & CMI_ERRDISP_UC_UNCONSTRAINED) 625 return (panicrsp); 626 627 /* 628 * ontrap OT_DATA_EC and lofault protection forgive any disposition 629 * other than unconstrained, even those normally forced fatal. 630 */ 631 if ((otp = curthread->t_ontrap) != NULL && otp->ot_prot & OT_DATA_EC) 632 return (CMI_RESPONSE_ONTRAP_PROT); 633 else if (curthread->t_lofault) 634 return (CMI_RESPONSE_LOFAULT_PROT); 635 636 /* 637 * Forced-fatal errors are terminal even in user mode. 638 */ 639 if (disp & CMI_ERRDISP_FORCEFATAL) 640 return (panicrsp); 641 642 /* 643 * If the trapped context is corrupt or we have no instruction pointer 644 * to resume at (and aren't trampolining to a fault handler) 645 * then in the kernel case we must panic and in usermode we 646 * kill the affected contract. 647 */ 648 if (disp & (CMI_ERRDISP_CURCTXBAD | CMI_ERRDISP_RIPV_INVALID)) 649 return (USERMODE(rp->r_cs) ? CMI_RESPONSE_CKILL : panicrsp); 650 651 /* 652 * Anything else is harmless 653 */ 654 return (CMI_RESPONSE_NONE); 655 } 656 657 int cma_mca_trap_panic_suppressed = 0; 658 659 static void 660 cmi_mca_panic(void) 661 { 662 if (cmi_panic_on_uncorrectable_error) { 663 fm_panic("Unrecoverable Machine-Check Exception"); 664 } else { 665 cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 666 cma_mca_trap_panic_suppressed++; 667 } 668 } 669 670 671 int cma_mca_trap_contract_kills = 0; 672 int cma_mca_trap_ontrap_forgiven = 0; 673 int cma_mca_trap_lofault_forgiven = 0; 674 675 /* 676 * Native #MC handler - we branch to here from mcetrap 677 */ 678 /*ARGSUSED*/ 679 void 680 cmi_mca_trap(struct regs *rp) 681 { 682 #ifndef __xpv 683 cmi_hdl_t hdl = NULL; 684 uint64_t disp; 685 cmi_t *cmi; 686 int s; 687 688 if (cmi_no_mca_init != 0) 689 return; 690 691 /* 692 * This function can call cmn_err, and the cpu module cmi_mca_trap 693 * entry point may also elect to call cmn_err (e.g., if it can't 694 * log the error onto an errorq, say very early in boot). 695 * We need to let cprintf know that we must not block. 696 */ 697 s = spl8(); 698 699 if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 700 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 701 (cmi = HDL2CMI(hdl)) == NULL || 702 !CMI_OP_PRESENT(cmi, cmi_mca_trap)) { 703 704 cmn_err(CE_WARN, "#MC exception on cpuid %d: %s", 705 CPU->cpu_id, 706 hdl ? "handle lookup ok but no #MC handler found" : 707 "handle lookup failed"); 708 709 if (hdl != NULL) 710 cmi_hdl_rele(hdl); 711 712 splx(s); 713 return; 714 } 715 716 disp = CMI_OPS(cmi)->cmi_mca_trap(hdl, rp); 717 718 switch (cmi_mce_response(rp, disp)) { 719 default: 720 cmn_err(CE_WARN, "Invalid response from cmi_mce_response"); 721 /*FALLTHRU*/ 722 723 case CMI_RESPONSE_PANIC: 724 cmi_mca_panic(); 725 break; 726 727 case CMI_RESPONSE_NONE: 728 break; 729 730 case CMI_RESPONSE_CKILL: 731 ttolwp(curthread)->lwp_pcb.pcb_flags |= ASYNC_HWERR; 732 aston(curthread); 733 cma_mca_trap_contract_kills++; 734 break; 735 736 case CMI_RESPONSE_ONTRAP_PROT: { 737 on_trap_data_t *otp = curthread->t_ontrap; 738 otp->ot_trap = OT_DATA_EC; 739 rp->r_pc = otp->ot_trampoline; 740 cma_mca_trap_ontrap_forgiven++; 741 break; 742 } 743 744 case CMI_RESPONSE_LOFAULT_PROT: 745 rp->r_r0 = EFAULT; 746 rp->r_pc = curthread->t_lofault; 747 cma_mca_trap_lofault_forgiven++; 748 break; 749 } 750 751 cmi_hdl_rele(hdl); 752 splx(s); 753 #endif /* __xpv */ 754 } 755 756 void 757 cmi_hdl_poke(cmi_hdl_t hdl) 758 { 759 cmi_t *cmi = HDL2CMI(hdl); 760 761 if (!CMI_OP_PRESENT(cmi, cmi_hdl_poke)) 762 return; 763 764 CMI_OPS(cmi)->cmi_hdl_poke(hdl); 765 } 766 767 #ifndef __xpv 768 void 769 cmi_cmci_trap() 770 { 771 cmi_hdl_t hdl = NULL; 772 cmi_t *cmi; 773 774 if (cmi_no_mca_init != 0) 775 return; 776 777 if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 778 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 779 (cmi = HDL2CMI(hdl)) == NULL || 780 !CMI_OP_PRESENT(cmi, cmi_cmci_trap)) { 781 782 cmn_err(CE_WARN, "CMCI interrupt on cpuid %d: %s", 783 CPU->cpu_id, 784 hdl ? "handle lookup ok but no CMCI handler found" : 785 "handle lookup failed"); 786 787 if (hdl != NULL) 788 cmi_hdl_rele(hdl); 789 790 return; 791 } 792 793 CMI_OPS(cmi)->cmi_cmci_trap(hdl); 794 795 cmi_hdl_rele(hdl); 796 } 797 #endif /* __xpv */ 798 799 void 800 cmi_mc_register(cmi_hdl_t hdl, const cmi_mc_ops_t *mcops, void *mcdata) 801 { 802 if (!cmi_no_mca_init) 803 cmi_hdl_setmc(hdl, mcops, mcdata); 804 } 805 806 void 807 cmi_mc_sw_memscrub_disable(void) 808 { 809 memscrub_disable(); 810 } 811 812 cmi_errno_t 813 cmi_mc_patounum(uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, uint32_t synd, 814 int syndtype, mc_unum_t *up) 815 { 816 const struct cmi_mc_ops *mcops; 817 cmi_hdl_t hdl; 818 cmi_errno_t rv; 819 820 if (cmi_no_mca_init || 821 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 822 return (CMIERR_MC_ABSENT); 823 824 if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 825 mcops->cmi_mc_patounum == NULL) { 826 cmi_hdl_rele(hdl); 827 return (CMIERR_MC_NOTSUP); 828 } 829 830 rv = mcops->cmi_mc_patounum(cmi_hdl_getmcdata(hdl), pa, valid_hi, 831 valid_lo, synd, syndtype, up); 832 833 cmi_hdl_rele(hdl); 834 835 return (rv); 836 } 837 838 cmi_errno_t 839 cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 840 { 841 const struct cmi_mc_ops *mcops; 842 cmi_hdl_t hdl; 843 cmi_errno_t rv; 844 nvlist_t *hcsp; 845 846 if (up != NULL && nvl != NULL) 847 return (CMIERR_API); /* convert from just one form */ 848 849 if (cmi_no_mca_init || 850 (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 851 return (CMIERR_MC_ABSENT); 852 853 if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 854 mcops->cmi_mc_unumtopa == NULL) { 855 cmi_hdl_rele(hdl); 856 857 if (nvl != NULL && nvlist_lookup_nvlist(nvl, 858 FM_FMRI_HC_SPECIFIC, &hcsp) == 0 && 859 (nvlist_lookup_uint64(hcsp, 860 "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, pap) == 0 || 861 nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_PHYSADDR, 862 pap) == 0)) { 863 return (CMIERR_MC_PARTIALUNUMTOPA); 864 } else { 865 return (mcops && mcops->cmi_mc_unumtopa ? 866 CMIERR_MC_NOTSUP : CMIERR_MC_ABSENT); 867 } 868 } 869 870 rv = mcops->cmi_mc_unumtopa(cmi_hdl_getmcdata(hdl), up, nvl, pap); 871 872 cmi_hdl_rele(hdl); 873 874 return (rv); 875 } 876 877 void 878 cmi_mc_logout(cmi_hdl_t hdl, boolean_t ismc, boolean_t sync) 879 { 880 const struct cmi_mc_ops *mcops; 881 882 if (cmi_no_mca_init || (mcops = cmi_hdl_getmcops(hdl)) == NULL) 883 return; 884 885 if (mcops->cmi_mc_logout != NULL) 886 mcops->cmi_mc_logout(hdl, ismc, sync); 887 } 888 889 cmi_errno_t 890 cmi_hdl_msrinject(cmi_hdl_t hdl, cmi_mca_regs_t *regs, uint_t nregs, 891 int force) 892 { 893 cmi_t *cmi = cmi_hdl_getcmi(hdl); 894 cmi_errno_t rc; 895 896 if (!CMI_OP_PRESENT(cmi, cmi_msrinject)) 897 return (CMIERR_NOTSUP); 898 899 cmi_hdl_inj_begin(hdl); 900 rc = CMI_OPS(cmi)->cmi_msrinject(hdl, regs, nregs, force); 901 cmi_hdl_inj_end(hdl); 902 903 return (rc); 904 } 905 906 boolean_t 907 cmi_panic_on_ue(void) 908 { 909 return (cmi_panic_on_uncorrectable_error ? B_TRUE : B_FALSE); 910 } 911 912 void 913 cmi_panic_callback(void) 914 { 915 cmi_hdl_t hdl; 916 cmi_t *cmi; 917 918 if (cmi_no_mca_init || (hdl = cmi_hdl_any()) == NULL) 919 return; 920 921 cmi = cmi_hdl_getcmi(hdl); 922 if (CMI_OP_PRESENT(cmi, cmi_panic_callback)) 923 CMI_OPS(cmi)->cmi_panic_callback(); 924 925 cmi_hdl_rele(hdl); 926 } 927