17aec1d6eScindi /* 23ad553a7Sgavinm * CDDL HEADER START 33ad553a7Sgavinm * 43ad553a7Sgavinm * The contents of this file are subject to the terms of the 53ad553a7Sgavinm * Common Development and Distribution License (the "License"). 63ad553a7Sgavinm * You may not use this file except in compliance with the License. 73ad553a7Sgavinm * 83ad553a7Sgavinm * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93ad553a7Sgavinm * or http://www.opensolaris.org/os/licensing. 103ad553a7Sgavinm * See the License for the specific language governing permissions 113ad553a7Sgavinm * and limitations under the License. 123ad553a7Sgavinm * 133ad553a7Sgavinm * When distributing Covered Code, include this CDDL HEADER in each 143ad553a7Sgavinm * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153ad553a7Sgavinm * If applicable, add the following below this CDDL HEADER, with the 163ad553a7Sgavinm * fields enclosed by brackets "[]" replaced with your own identifying 173ad553a7Sgavinm * information: Portions Copyright [yyyy] [name of copyright owner] 183ad553a7Sgavinm * 193ad553a7Sgavinm * CDDL HEADER END 203ad553a7Sgavinm */ 213ad553a7Sgavinm 223ad553a7Sgavinm /* 23ae115bc7Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 247aec1d6eScindi * Use is subject to license terms. 257aec1d6eScindi */ 267aec1d6eScindi 277aec1d6eScindi #pragma ident "%Z%%M% %I% %E% SMI" 287aec1d6eScindi 297aec1d6eScindi /* 307aec1d6eScindi * Public interface to routines implemented by CPU modules 317aec1d6eScindi */ 327aec1d6eScindi 33*20c794b3Sgavinm #include <sys/types.h> 34*20c794b3Sgavinm #include <sys/atomic.h> 357aec1d6eScindi #include <sys/x86_archext.h> 367aec1d6eScindi #include <sys/cpu_module_impl.h> 37*20c794b3Sgavinm #include <sys/cpu_module_ms.h> 387aec1d6eScindi #include <sys/fm/util.h> 397aec1d6eScindi #include <sys/reboot.h> 407aec1d6eScindi #include <sys/modctl.h> 417aec1d6eScindi #include <sys/param.h> 427aec1d6eScindi #include <sys/cmn_err.h> 437aec1d6eScindi #include <sys/systm.h> 44*20c794b3Sgavinm #include <sys/fm/protocol.h> 45*20c794b3Sgavinm #include <sys/pcb.h> 46*20c794b3Sgavinm #include <sys/ontrap.h> 47*20c794b3Sgavinm #include <sys/psw.h> 48*20c794b3Sgavinm #include <sys/privregs.h> 497aec1d6eScindi 50*20c794b3Sgavinm /* 51*20c794b3Sgavinm * Set to force cmi_init to fail. 52*20c794b3Sgavinm */ 53*20c794b3Sgavinm int cmi_no_init = 0; 547aec1d6eScindi 55*20c794b3Sgavinm /* 56*20c794b3Sgavinm * Set to avoid MCA initialization. 57*20c794b3Sgavinm */ 58*20c794b3Sgavinm int cmi_no_mca_init = 0; 597aec1d6eScindi 607aec1d6eScindi /* 618a40a695Sgavinm * If cleared for debugging we will not attempt to load a model-specific 628a40a695Sgavinm * cpu module but will load the generic cpu module instead. 638a40a695Sgavinm */ 648a40a695Sgavinm int cmi_force_generic = 0; 658a40a695Sgavinm 668a40a695Sgavinm /* 677aec1d6eScindi * If cleared for debugging, we will suppress panicking on fatal hardware 687aec1d6eScindi * errors. This should *only* be used for debugging; it use can and will 697aec1d6eScindi * cause data corruption if actual hardware errors are detected by the system. 707aec1d6eScindi */ 717aec1d6eScindi int cmi_panic_on_uncorrectable_error = 1; 727aec1d6eScindi 73*20c794b3Sgavinm /* 74*20c794b3Sgavinm * Subdirectory (relative to the module search path) in which we will 75*20c794b3Sgavinm * look for cpu modules. 76*20c794b3Sgavinm */ 77*20c794b3Sgavinm #define CPUMOD_SUBDIR "cpu" 78*20c794b3Sgavinm 79*20c794b3Sgavinm /* 80*20c794b3Sgavinm * CPU modules have a filenames such as "cpu.AuthenticAMD.15" and 81*20c794b3Sgavinm * "cpu.generic" - the "cpu" prefix is specified by the following. 82*20c794b3Sgavinm */ 83*20c794b3Sgavinm #define CPUMOD_PREFIX "cpu" 84*20c794b3Sgavinm 85*20c794b3Sgavinm /* 86*20c794b3Sgavinm * Structure used to keep track of cpu modules we have loaded and their ops 87*20c794b3Sgavinm */ 88*20c794b3Sgavinm typedef struct cmi { 89*20c794b3Sgavinm struct cmi *cmi_next; 90*20c794b3Sgavinm struct cmi *cmi_prev; 91*20c794b3Sgavinm const cmi_ops_t *cmi_ops; 92*20c794b3Sgavinm struct modctl *cmi_modp; 93*20c794b3Sgavinm uint_t cmi_refcnt; 94*20c794b3Sgavinm } cmi_t; 95*20c794b3Sgavinm 967aec1d6eScindi static cmi_t *cmi_list; 977aec1d6eScindi static kmutex_t cmi_load_lock; 987aec1d6eScindi 99*20c794b3Sgavinm /* 100*20c794b3Sgavinm * Functions we need from cmi_hw.c that are not part of the cpu_module.h 101*20c794b3Sgavinm * interface. 102*20c794b3Sgavinm */ 103*20c794b3Sgavinm extern cmi_hdl_t cmi_hdl_create(enum cmi_hdl_class, uint_t, uint_t, uint_t); 104*20c794b3Sgavinm extern void cmi_hdl_setcmi(cmi_hdl_t, void *, void *); 105*20c794b3Sgavinm extern void *cmi_hdl_getcmi(cmi_hdl_t); 106*20c794b3Sgavinm extern void cmi_hdl_setmc(cmi_hdl_t, const struct cmi_mc_ops *, void *); 107*20c794b3Sgavinm 108*20c794b3Sgavinm #define HDL2CMI(hdl) cmi_hdl_getcmi(hdl) 109*20c794b3Sgavinm 110*20c794b3Sgavinm #define CMI_OPS(cmi) (cmi)->cmi_ops 111*20c794b3Sgavinm #define CMI_OP_PRESENT(cmi, op) ((cmi) && CMI_OPS(cmi)->op != NULL) 112*20c794b3Sgavinm 113*20c794b3Sgavinm #define CMI_MATCH_VENDOR 0 /* Just match on vendor */ 114*20c794b3Sgavinm #define CMI_MATCH_FAMILY 1 /* Match down to family */ 115*20c794b3Sgavinm #define CMI_MATCH_MODEL 2 /* Match down to model */ 116*20c794b3Sgavinm #define CMI_MATCH_STEPPING 3 /* Match down to stepping */ 117*20c794b3Sgavinm 118*20c794b3Sgavinm static void 119*20c794b3Sgavinm cmi_link(cmi_t *cmi) 1207aec1d6eScindi { 121*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 122*20c794b3Sgavinm 123*20c794b3Sgavinm cmi->cmi_prev = NULL; 124*20c794b3Sgavinm cmi->cmi_next = cmi_list; 125*20c794b3Sgavinm if (cmi_list != NULL) 126*20c794b3Sgavinm cmi_list->cmi_prev = cmi; 127*20c794b3Sgavinm cmi_list = cmi; 128*20c794b3Sgavinm } 129*20c794b3Sgavinm 130*20c794b3Sgavinm static void 131*20c794b3Sgavinm cmi_unlink(cmi_t *cmi) 132*20c794b3Sgavinm { 133*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 134*20c794b3Sgavinm ASSERT(cmi->cmi_refcnt == 0); 135*20c794b3Sgavinm 136*20c794b3Sgavinm if (cmi->cmi_prev != NULL) 137*20c794b3Sgavinm cmi->cmi_prev = cmi->cmi_next; 138*20c794b3Sgavinm 139*20c794b3Sgavinm if (cmi->cmi_next != NULL) 140*20c794b3Sgavinm cmi->cmi_next->cmi_prev = cmi->cmi_prev; 141*20c794b3Sgavinm 142*20c794b3Sgavinm if (cmi_list == cmi) 143*20c794b3Sgavinm cmi_list = cmi->cmi_next; 144*20c794b3Sgavinm } 145*20c794b3Sgavinm 146*20c794b3Sgavinm /* 147*20c794b3Sgavinm * Hold the module in memory. We call to CPU modules without using the 148*20c794b3Sgavinm * stubs mechanism, so these modules must be manually held in memory. 149*20c794b3Sgavinm * The mod_ref acts as if another loaded module has a dependency on us. 150*20c794b3Sgavinm */ 151*20c794b3Sgavinm static void 152*20c794b3Sgavinm cmi_hold(cmi_t *cmi) 153*20c794b3Sgavinm { 154*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 155*20c794b3Sgavinm 156*20c794b3Sgavinm mutex_enter(&mod_lock); 157*20c794b3Sgavinm cmi->cmi_modp->mod_ref++; 158*20c794b3Sgavinm mutex_exit(&mod_lock); 159*20c794b3Sgavinm cmi->cmi_refcnt++; 160*20c794b3Sgavinm } 161*20c794b3Sgavinm 162*20c794b3Sgavinm static void 163*20c794b3Sgavinm cmi_rele(cmi_t *cmi) 164*20c794b3Sgavinm { 165*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 166*20c794b3Sgavinm 167*20c794b3Sgavinm mutex_enter(&mod_lock); 168*20c794b3Sgavinm cmi->cmi_modp->mod_ref--; 169*20c794b3Sgavinm mutex_exit(&mod_lock); 170*20c794b3Sgavinm 171*20c794b3Sgavinm if (--cmi->cmi_refcnt == 0) { 172*20c794b3Sgavinm cmi_unlink(cmi); 173*20c794b3Sgavinm kmem_free(cmi, sizeof (cmi_t)); 174*20c794b3Sgavinm } 175*20c794b3Sgavinm } 176*20c794b3Sgavinm 177*20c794b3Sgavinm static cmi_ops_t * 178*20c794b3Sgavinm cmi_getops(modctl_t *modp) 179*20c794b3Sgavinm { 180*20c794b3Sgavinm cmi_ops_t *ops; 181*20c794b3Sgavinm 182*20c794b3Sgavinm if ((ops = (cmi_ops_t *)modlookup_by_modctl(modp, "_cmi_ops")) == 183*20c794b3Sgavinm NULL) { 184*20c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' is invalid: no _cmi_ops " 185*20c794b3Sgavinm "found", modp->mod_modname); 186*20c794b3Sgavinm return (NULL); 187*20c794b3Sgavinm } 188*20c794b3Sgavinm 189*20c794b3Sgavinm if (ops->cmi_init == NULL) { 190*20c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' is invalid: no cmi_init " 191*20c794b3Sgavinm "entry point", modp->mod_modname); 192*20c794b3Sgavinm return (NULL); 193*20c794b3Sgavinm } 194*20c794b3Sgavinm 195*20c794b3Sgavinm return (ops); 1967aec1d6eScindi } 1977aec1d6eScindi 1987aec1d6eScindi static cmi_t * 1997aec1d6eScindi cmi_load_modctl(modctl_t *modp) 2007aec1d6eScindi { 201*20c794b3Sgavinm cmi_ops_t *ops; 202*20c794b3Sgavinm uintptr_t ver; 2037aec1d6eScindi cmi_t *cmi; 204*20c794b3Sgavinm cmi_api_ver_t apiver; 2057aec1d6eScindi 2067aec1d6eScindi ASSERT(MUTEX_HELD(&cmi_load_lock)); 2077aec1d6eScindi 2087aec1d6eScindi for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 2097aec1d6eScindi if (cmi->cmi_modp == modp) 2107aec1d6eScindi return (cmi); 2117aec1d6eScindi } 2127aec1d6eScindi 213*20c794b3Sgavinm if ((ver = modlookup_by_modctl(modp, "_cmi_api_version")) == NULL) { 214*20c794b3Sgavinm /* 215*20c794b3Sgavinm * Apparently a cpu module before versioning was introduced - 216*20c794b3Sgavinm * we call this version 0. 217*20c794b3Sgavinm */ 218*20c794b3Sgavinm apiver = CMI_API_VERSION_0; 219*20c794b3Sgavinm } else { 220*20c794b3Sgavinm apiver = *((cmi_api_ver_t *)ver); 221*20c794b3Sgavinm if (!CMI_API_VERSION_CHKMAGIC(apiver)) { 222*20c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' is invalid: " 223*20c794b3Sgavinm "_cmi_api_version 0x%x has bad magic", 224*20c794b3Sgavinm modp->mod_modname, apiver); 225*20c794b3Sgavinm return (NULL); 226*20c794b3Sgavinm } 227*20c794b3Sgavinm } 228*20c794b3Sgavinm 229*20c794b3Sgavinm if (apiver != CMI_API_VERSION) { 230*20c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' has API version %d, " 231*20c794b3Sgavinm "kernel requires API version %d", modp->mod_modname, 232*20c794b3Sgavinm CMI_API_VERSION_TOPRINT(apiver), 233*20c794b3Sgavinm CMI_API_VERSION_TOPRINT(CMI_API_VERSION)); 2347aec1d6eScindi return (NULL); 2357aec1d6eScindi } 2367aec1d6eScindi 237*20c794b3Sgavinm if ((ops = cmi_getops(modp)) == NULL) 238*20c794b3Sgavinm return (NULL); 2397aec1d6eScindi 240*20c794b3Sgavinm cmi = kmem_zalloc(sizeof (*cmi), KM_SLEEP); 241*20c794b3Sgavinm cmi->cmi_ops = ops; 2427aec1d6eScindi cmi->cmi_modp = modp; 2437aec1d6eScindi 244*20c794b3Sgavinm cmi_link(cmi); 245*20c794b3Sgavinm 246*20c794b3Sgavinm return (cmi); 247*20c794b3Sgavinm } 248*20c794b3Sgavinm 249*20c794b3Sgavinm static int 250*20c794b3Sgavinm cmi_cpu_match(cmi_hdl_t hdl1, cmi_hdl_t hdl2, int match) 251*20c794b3Sgavinm { 252*20c794b3Sgavinm if (match >= CMI_MATCH_VENDOR && 253*20c794b3Sgavinm cmi_hdl_vendor(hdl1) != cmi_hdl_vendor(hdl2)) 254*20c794b3Sgavinm return (0); 255*20c794b3Sgavinm 256*20c794b3Sgavinm if (match >= CMI_MATCH_FAMILY && 257*20c794b3Sgavinm cmi_hdl_family(hdl1) != cmi_hdl_family(hdl2)) 258*20c794b3Sgavinm return (0); 259*20c794b3Sgavinm 260*20c794b3Sgavinm if (match >= CMI_MATCH_MODEL && 261*20c794b3Sgavinm cmi_hdl_model(hdl1) != cmi_hdl_model(hdl2)) 262*20c794b3Sgavinm return (0); 263*20c794b3Sgavinm 264*20c794b3Sgavinm if (match >= CMI_MATCH_STEPPING && 265*20c794b3Sgavinm cmi_hdl_stepping(hdl1) != cmi_hdl_stepping(hdl2)) 266*20c794b3Sgavinm return (0); 267*20c794b3Sgavinm 268*20c794b3Sgavinm return (1); 269*20c794b3Sgavinm } 270*20c794b3Sgavinm 271*20c794b3Sgavinm static int 272*20c794b3Sgavinm cmi_search_list_cb(cmi_hdl_t whdl, void *arg1, void *arg2, void *arg3) 273*20c794b3Sgavinm { 274*20c794b3Sgavinm cmi_hdl_t thdl = (cmi_hdl_t)arg1; 275*20c794b3Sgavinm int match = *((int *)arg2); 276*20c794b3Sgavinm cmi_hdl_t *rsltp = (cmi_hdl_t *)arg3; 277*20c794b3Sgavinm 278*20c794b3Sgavinm if (cmi_cpu_match(thdl, whdl, match)) { 279*20c794b3Sgavinm cmi_hdl_hold(whdl); /* short-term hold */ 280*20c794b3Sgavinm *rsltp = whdl; 281*20c794b3Sgavinm return (CMI_HDL_WALK_DONE); 282*20c794b3Sgavinm } else { 283*20c794b3Sgavinm return (CMI_HDL_WALK_NEXT); 284*20c794b3Sgavinm } 285*20c794b3Sgavinm } 286*20c794b3Sgavinm 287*20c794b3Sgavinm static cmi_t * 288*20c794b3Sgavinm cmi_search_list(cmi_hdl_t hdl, int match) 289*20c794b3Sgavinm { 290*20c794b3Sgavinm cmi_hdl_t dhdl = NULL; 291*20c794b3Sgavinm cmi_t *cmi = NULL; 292*20c794b3Sgavinm 293*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 294*20c794b3Sgavinm 295*20c794b3Sgavinm cmi_hdl_walk(cmi_search_list_cb, (void *)hdl, (void *)&match, &dhdl); 296*20c794b3Sgavinm if (dhdl) { 297*20c794b3Sgavinm cmi = HDL2CMI(dhdl); 298*20c794b3Sgavinm cmi_hdl_rele(dhdl); /* held in cmi_search_list_cb */ 299*20c794b3Sgavinm } 3007aec1d6eScindi 3017aec1d6eScindi return (cmi); 3027aec1d6eScindi } 3037aec1d6eScindi 3047aec1d6eScindi static cmi_t * 305*20c794b3Sgavinm cmi_load_module(cmi_hdl_t hdl, int match, int *chosenp) 3067aec1d6eScindi { 3077aec1d6eScindi modctl_t *modp; 3087aec1d6eScindi cmi_t *cmi; 309*20c794b3Sgavinm int modid; 3107aec1d6eScindi uint_t s[3]; 3117aec1d6eScindi 3127aec1d6eScindi ASSERT(MUTEX_HELD(&cmi_load_lock)); 313*20c794b3Sgavinm ASSERT(match == CMI_MATCH_STEPPING || match == CMI_MATCH_MODEL || 314*20c794b3Sgavinm match == CMI_MATCH_FAMILY || match == CMI_MATCH_VENDOR); 3157aec1d6eScindi 3167aec1d6eScindi /* 317*20c794b3Sgavinm * Have we already loaded a module for a cpu with the same 318*20c794b3Sgavinm * vendor/family/model/stepping? 3197aec1d6eScindi */ 320*20c794b3Sgavinm if ((cmi = cmi_search_list(hdl, match)) != NULL) { 321*20c794b3Sgavinm cmi_hold(cmi); 322*20c794b3Sgavinm return (cmi); 323*20c794b3Sgavinm } 3247aec1d6eScindi 325*20c794b3Sgavinm s[0] = cmi_hdl_family(hdl); 326*20c794b3Sgavinm s[1] = cmi_hdl_model(hdl); 327*20c794b3Sgavinm s[2] = cmi_hdl_stepping(hdl); 3287aec1d6eScindi modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 329*20c794b3Sgavinm cmi_hdl_vendorstr(hdl), ".", s, match, chosenp); 3307aec1d6eScindi 3317aec1d6eScindi if (modid == -1) 3327aec1d6eScindi return (NULL); 3337aec1d6eScindi 3347aec1d6eScindi modp = mod_hold_by_id(modid); 3357aec1d6eScindi cmi = cmi_load_modctl(modp); 336*20c794b3Sgavinm if (cmi) 337*20c794b3Sgavinm cmi_hold(cmi); 3387aec1d6eScindi mod_release_mod(modp); 3397aec1d6eScindi 3407aec1d6eScindi return (cmi); 3417aec1d6eScindi } 3427aec1d6eScindi 343*20c794b3Sgavinm /* 344*20c794b3Sgavinm * Try to load a cpu module with specific support for this chip type. 345*20c794b3Sgavinm */ 3467aec1d6eScindi static cmi_t * 347*20c794b3Sgavinm cmi_load_specific(cmi_hdl_t hdl, void **datap) 348*20c794b3Sgavinm { 349*20c794b3Sgavinm cmi_t *cmi; 350*20c794b3Sgavinm int err; 351*20c794b3Sgavinm int i; 352*20c794b3Sgavinm 353*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 354*20c794b3Sgavinm 355*20c794b3Sgavinm for (i = CMI_MATCH_STEPPING; i >= CMI_MATCH_VENDOR; i--) { 356*20c794b3Sgavinm int suffixlevel; 357*20c794b3Sgavinm 358*20c794b3Sgavinm if ((cmi = cmi_load_module(hdl, i, &suffixlevel)) == NULL) 359*20c794b3Sgavinm return (NULL); 360*20c794b3Sgavinm 361*20c794b3Sgavinm /* 362*20c794b3Sgavinm * A module has loaded and has a _cmi_ops structure, and the 363*20c794b3Sgavinm * module has been held for this instance. Call its cmi_init 364*20c794b3Sgavinm * entry point - we expect success (0) or ENOTSUP. 365*20c794b3Sgavinm */ 366*20c794b3Sgavinm if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) == 0) { 367*20c794b3Sgavinm if (boothowto & RB_VERBOSE) { 368*20c794b3Sgavinm printf("initialized cpu module '%s' on " 369*20c794b3Sgavinm "chip %d core %d strand %d\n", 370*20c794b3Sgavinm cmi->cmi_modp->mod_modname, 371*20c794b3Sgavinm cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 372*20c794b3Sgavinm cmi_hdl_strandid(hdl)); 373*20c794b3Sgavinm } 374*20c794b3Sgavinm return (cmi); 375*20c794b3Sgavinm } else if (err != ENOTSUP) { 376*20c794b3Sgavinm cmn_err(CE_WARN, "failed to init cpu module '%s' on " 377*20c794b3Sgavinm "chip %d core %d strand %d: err=%d\n", 378*20c794b3Sgavinm cmi->cmi_modp->mod_modname, 379*20c794b3Sgavinm cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 380*20c794b3Sgavinm cmi_hdl_strandid(hdl), err); 381*20c794b3Sgavinm } 382*20c794b3Sgavinm 383*20c794b3Sgavinm /* 384*20c794b3Sgavinm * The module failed or declined to init, so release 385*20c794b3Sgavinm * it and update i to be equal to the number 386*20c794b3Sgavinm * of suffices actually used in the last module path. 387*20c794b3Sgavinm */ 388*20c794b3Sgavinm cmi_rele(cmi); 389*20c794b3Sgavinm i = suffixlevel; 390*20c794b3Sgavinm } 391*20c794b3Sgavinm 392*20c794b3Sgavinm return (NULL); 393*20c794b3Sgavinm } 394*20c794b3Sgavinm 395*20c794b3Sgavinm /* 396*20c794b3Sgavinm * Load the generic IA32 MCA cpu module, which may still supplement 397*20c794b3Sgavinm * itself with model-specific support through cpu model-specific modules. 398*20c794b3Sgavinm */ 399*20c794b3Sgavinm static cmi_t * 400*20c794b3Sgavinm cmi_load_generic(cmi_hdl_t hdl, void **datap) 4017aec1d6eScindi { 4027aec1d6eScindi modctl_t *modp; 4037aec1d6eScindi cmi_t *cmi; 4047aec1d6eScindi int modid; 405*20c794b3Sgavinm int err; 406*20c794b3Sgavinm 407*20c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 4087aec1d6eScindi 4097aec1d6eScindi if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 4107aec1d6eScindi return (NULL); 4117aec1d6eScindi 4127aec1d6eScindi modp = mod_hold_by_id(modid); 4137aec1d6eScindi cmi = cmi_load_modctl(modp); 414*20c794b3Sgavinm if (cmi) 415*20c794b3Sgavinm cmi_hold(cmi); 4167aec1d6eScindi mod_release_mod(modp); 4177aec1d6eScindi 418*20c794b3Sgavinm if (cmi == NULL) 419*20c794b3Sgavinm return (NULL); 420*20c794b3Sgavinm 421*20c794b3Sgavinm if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) != 0) { 422*20c794b3Sgavinm if (err != ENOTSUP) 423*20c794b3Sgavinm cmn_err(CE_WARN, CPUMOD_PREFIX ".generic failed to " 424*20c794b3Sgavinm "init: err=%d", err); 425*20c794b3Sgavinm cmi_rele(cmi); 426*20c794b3Sgavinm return (NULL); 427*20c794b3Sgavinm } 428*20c794b3Sgavinm 4297aec1d6eScindi return (cmi); 4307aec1d6eScindi } 4317aec1d6eScindi 432*20c794b3Sgavinm cmi_hdl_t 433*20c794b3Sgavinm cmi_init(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 434*20c794b3Sgavinm uint_t strandid) 4357aec1d6eScindi { 436*20c794b3Sgavinm cmi_t *cmi = NULL; 437*20c794b3Sgavinm cmi_hdl_t hdl; 4387aec1d6eScindi void *data; 4397aec1d6eScindi 440*20c794b3Sgavinm if (cmi_no_init) { 441*20c794b3Sgavinm cmi_no_mca_init = 1; 442*20c794b3Sgavinm return (NULL); 443*20c794b3Sgavinm } 444*20c794b3Sgavinm 4457aec1d6eScindi mutex_enter(&cmi_load_lock); 4467aec1d6eScindi 447*20c794b3Sgavinm if ((hdl = cmi_hdl_create(class, chipid, coreid, strandid)) == NULL) { 4487aec1d6eScindi mutex_exit(&cmi_load_lock); 449*20c794b3Sgavinm cmn_err(CE_WARN, "There will be no MCA support on chip %d " 450*20c794b3Sgavinm "core %d strand %d (cmi_hdl_create returned NULL)\n", 451*20c794b3Sgavinm chipid, coreid, strandid); 452*20c794b3Sgavinm return (NULL); 4537aec1d6eScindi } 4547aec1d6eScindi 455*20c794b3Sgavinm if (!cmi_force_generic) 456*20c794b3Sgavinm cmi = cmi_load_specific(hdl, &data); 457*20c794b3Sgavinm 458*20c794b3Sgavinm if (cmi == NULL && (cmi = cmi_load_generic(hdl, &data)) == NULL) { 459*20c794b3Sgavinm cmn_err(CE_WARN, "There will be no MCA support on chip %d " 460*20c794b3Sgavinm "core %d strand %d\n", chipid, coreid, strandid); 461*20c794b3Sgavinm cmi_hdl_rele(hdl); 4627aec1d6eScindi mutex_exit(&cmi_load_lock); 463*20c794b3Sgavinm return (NULL); 4647aec1d6eScindi } 4657aec1d6eScindi 466*20c794b3Sgavinm cmi_hdl_setcmi(hdl, cmi, data); 4677aec1d6eScindi 468*20c794b3Sgavinm cms_init(hdl); 469*20c794b3Sgavinm 4707aec1d6eScindi mutex_exit(&cmi_load_lock); 4717aec1d6eScindi 472*20c794b3Sgavinm return (hdl); 4737aec1d6eScindi } 4747aec1d6eScindi 475*20c794b3Sgavinm /* 476*20c794b3Sgavinm * cmi_fini is not called at the moment. It is intended to be called 477*20c794b3Sgavinm * on DR deconfigure of a cpu resource. It should not be called at 478*20c794b3Sgavinm * simple offline of a cpu. 479*20c794b3Sgavinm */ 4807aec1d6eScindi void 481*20c794b3Sgavinm cmi_fini(cmi_hdl_t hdl) 4827aec1d6eScindi { 483*20c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 484*20c794b3Sgavinm 485*20c794b3Sgavinm if (cms_present(hdl)) 486*20c794b3Sgavinm cms_fini(hdl); 487*20c794b3Sgavinm 488*20c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_fini)) 489*20c794b3Sgavinm CMI_OPS(cmi)->cmi_fini(hdl); 490*20c794b3Sgavinm 491*20c794b3Sgavinm cmi_hdl_rele(hdl); /* release hold obtained in cmi_hdl_create */ 4927aec1d6eScindi } 4937aec1d6eScindi 494*20c794b3Sgavinm /* 495*20c794b3Sgavinm * cmi_post_startup is called from post_startup for the boot cpu only. 496*20c794b3Sgavinm */ 4977aec1d6eScindi void 498*20c794b3Sgavinm cmi_post_startup(void) 4997aec1d6eScindi { 500*20c794b3Sgavinm cmi_hdl_t hdl; 501*20c794b3Sgavinm cmi_t *cmi; 502*20c794b3Sgavinm 503*20c794b3Sgavinm if (cmi_no_mca_init != 0 || 504*20c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 505*20c794b3Sgavinm return; 506*20c794b3Sgavinm 507*20c794b3Sgavinm cmi = HDL2CMI(hdl); 508*20c794b3Sgavinm 509*20c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_post_startup)) 510*20c794b3Sgavinm CMI_OPS(cmi)->cmi_post_startup(hdl); 511*20c794b3Sgavinm 512*20c794b3Sgavinm cmi_hdl_rele(hdl); 5137aec1d6eScindi } 5147aec1d6eScindi 5158a40a695Sgavinm /* 5168a40a695Sgavinm * Called just once from start_other_cpus when all processors are started. 5178a40a695Sgavinm * This will not be called for each cpu, so the registered op must not 5188a40a695Sgavinm * assume it is called as such. 5198a40a695Sgavinm */ 5207aec1d6eScindi void 5213ad553a7Sgavinm cmi_post_mpstartup(void) 5223ad553a7Sgavinm { 523*20c794b3Sgavinm cmi_hdl_t hdl; 524*20c794b3Sgavinm cmi_t *cmi; 525*20c794b3Sgavinm 526*20c794b3Sgavinm if (cmi_no_mca_init != 0 || 527*20c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 528*20c794b3Sgavinm return; 529*20c794b3Sgavinm 530*20c794b3Sgavinm cmi = HDL2CMI(hdl); 531*20c794b3Sgavinm 532*20c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_post_mpstartup)) 533*20c794b3Sgavinm CMI_OPS(cmi)->cmi_post_mpstartup(hdl); 534*20c794b3Sgavinm 535*20c794b3Sgavinm cmi_hdl_rele(hdl); 5363ad553a7Sgavinm } 5373ad553a7Sgavinm 5383ad553a7Sgavinm void 539*20c794b3Sgavinm cmi_faulted_enter(cmi_hdl_t hdl) 5407aec1d6eScindi { 541*20c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 542*20c794b3Sgavinm 543*20c794b3Sgavinm if (cmi_no_mca_init != 0) 544*20c794b3Sgavinm return; 545*20c794b3Sgavinm 546*20c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_faulted_enter)) 547*20c794b3Sgavinm CMI_OPS(cmi)->cmi_faulted_enter(hdl); 5487aec1d6eScindi } 5497aec1d6eScindi 5507aec1d6eScindi void 551*20c794b3Sgavinm cmi_faulted_exit(cmi_hdl_t hdl) 5527aec1d6eScindi { 553*20c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 554*20c794b3Sgavinm 555*20c794b3Sgavinm if (cmi_no_mca_init != 0) 556*20c794b3Sgavinm return; 557*20c794b3Sgavinm 558*20c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_faulted_exit)) 559*20c794b3Sgavinm CMI_OPS(cmi)->cmi_faulted_exit(hdl); 5607aec1d6eScindi } 5617aec1d6eScindi 562*20c794b3Sgavinm void 563*20c794b3Sgavinm cmi_mca_init(cmi_hdl_t hdl) 564*20c794b3Sgavinm { 565*20c794b3Sgavinm cmi_t *cmi; 566*20c794b3Sgavinm 567*20c794b3Sgavinm if (cmi_no_mca_init != 0) 568*20c794b3Sgavinm return; 569*20c794b3Sgavinm 570*20c794b3Sgavinm cmi = HDL2CMI(hdl); 571*20c794b3Sgavinm 572*20c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_mca_init)) 573*20c794b3Sgavinm CMI_OPS(cmi)->cmi_mca_init(hdl); 574*20c794b3Sgavinm } 575*20c794b3Sgavinm 576*20c794b3Sgavinm #define CMI_RESPONSE_PANIC 0x0 /* panic must have value 0 */ 577*20c794b3Sgavinm #define CMI_RESPONSE_NONE 0x1 578*20c794b3Sgavinm #define CMI_RESPONSE_CKILL 0x2 579*20c794b3Sgavinm #define CMI_RESPONSE_REBOOT 0x3 /* not implemented */ 580*20c794b3Sgavinm #define CMI_RESPONSE_ONTRAP_PROT 0x4 581*20c794b3Sgavinm #define CMI_RESPONSE_LOFAULT_PROT 0x5 582*20c794b3Sgavinm 583*20c794b3Sgavinm /* 584*20c794b3Sgavinm * Return 0 if we will panic in response to this machine check, otherwise 585*20c794b3Sgavinm * non-zero. If the caller is cmi_mca_trap in this file then the nonzero 586*20c794b3Sgavinm * return values are to be interpreted from CMI_RESPONSE_* above. 587*20c794b3Sgavinm * 588*20c794b3Sgavinm * This function must just return what will be done without actually 589*20c794b3Sgavinm * doing anything; this includes not changing the regs. 590*20c794b3Sgavinm */ 5917aec1d6eScindi int 592*20c794b3Sgavinm cmi_mce_response(struct regs *rp, uint64_t disp) 5937aec1d6eScindi { 594*20c794b3Sgavinm int panicrsp = cmi_panic_on_uncorrectable_error ? CMI_RESPONSE_PANIC : 595*20c794b3Sgavinm CMI_RESPONSE_NONE; 596*20c794b3Sgavinm on_trap_data_t *otp; 597*20c794b3Sgavinm 598*20c794b3Sgavinm ASSERT(rp != NULL); /* don't call for polling, only on #MC */ 599*20c794b3Sgavinm 600*20c794b3Sgavinm /* 601*20c794b3Sgavinm * If no bits are set in the disposition then there is nothing to 602*20c794b3Sgavinm * worry about and we do not need to trampoline to ontrap or 603*20c794b3Sgavinm * lofault handlers. 604*20c794b3Sgavinm */ 605*20c794b3Sgavinm if (disp == 0) 606*20c794b3Sgavinm return (CMI_RESPONSE_NONE); 607*20c794b3Sgavinm 608*20c794b3Sgavinm /* 609*20c794b3Sgavinm * Unconstrained errors cannot be forgiven, even by ontrap or 610*20c794b3Sgavinm * lofault protection. The data is not poisoned and may not 611*20c794b3Sgavinm * even belong to the trapped context - eg a writeback of 612*20c794b3Sgavinm * data that is found to be bad. 613*20c794b3Sgavinm */ 614*20c794b3Sgavinm if (disp & CMI_ERRDISP_UC_UNCONSTRAINED) 615*20c794b3Sgavinm return (panicrsp); 616*20c794b3Sgavinm 617*20c794b3Sgavinm /* 618*20c794b3Sgavinm * ontrap OT_DATA_EC and lofault protection forgive any disposition 619*20c794b3Sgavinm * other than unconstrained, even those normally forced fatal. 620*20c794b3Sgavinm */ 621*20c794b3Sgavinm if ((otp = curthread->t_ontrap) != NULL && otp->ot_prot & OT_DATA_EC) 622*20c794b3Sgavinm return (CMI_RESPONSE_ONTRAP_PROT); 623*20c794b3Sgavinm else if (curthread->t_lofault) 624*20c794b3Sgavinm return (CMI_RESPONSE_LOFAULT_PROT); 625*20c794b3Sgavinm 626*20c794b3Sgavinm /* 627*20c794b3Sgavinm * Forced-fatal errors are terminal even in user mode. 628*20c794b3Sgavinm */ 629*20c794b3Sgavinm if (disp & CMI_ERRDISP_FORCEFATAL) 630*20c794b3Sgavinm return (panicrsp); 631*20c794b3Sgavinm 632*20c794b3Sgavinm /* 633*20c794b3Sgavinm * If the trapped context is corrupt or we have no instruction pointer 634*20c794b3Sgavinm * to resume at (and aren't trampolining to a fault handler) 635*20c794b3Sgavinm * then in the kernel case we must panic and in usermode we 636*20c794b3Sgavinm * kill the affected contract. 637*20c794b3Sgavinm */ 638*20c794b3Sgavinm if (disp & (CMI_ERRDISP_CURCTXBAD | CMI_ERRDISP_RIPV_INVALID)) 639*20c794b3Sgavinm return (USERMODE(rp->r_cs) ? CMI_RESPONSE_CKILL : panicrsp); 640*20c794b3Sgavinm 641*20c794b3Sgavinm /* 642*20c794b3Sgavinm * Anything else is harmless 643*20c794b3Sgavinm */ 644*20c794b3Sgavinm return (CMI_RESPONSE_NONE); 6457aec1d6eScindi } 6467aec1d6eScindi 647*20c794b3Sgavinm int cma_mca_trap_panic_suppressed = 0; 648*20c794b3Sgavinm 649*20c794b3Sgavinm static void 650*20c794b3Sgavinm cmi_mca_panic(void) 6517aec1d6eScindi { 652*20c794b3Sgavinm if (cmi_panic_on_uncorrectable_error) { 653*20c794b3Sgavinm fm_panic("Unrecoverable Machine-Check Exception"); 654*20c794b3Sgavinm } else { 655*20c794b3Sgavinm cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 656*20c794b3Sgavinm cma_mca_trap_panic_suppressed++; 657*20c794b3Sgavinm } 6587aec1d6eScindi } 6597aec1d6eScindi 660*20c794b3Sgavinm 661*20c794b3Sgavinm int cma_mca_trap_contract_kills = 0; 662*20c794b3Sgavinm int cma_mca_trap_ontrap_forgiven = 0; 663*20c794b3Sgavinm int cma_mca_trap_lofault_forgiven = 0; 664*20c794b3Sgavinm 665*20c794b3Sgavinm /* 666*20c794b3Sgavinm * Native #MC handler - we branch to here from mcetrap 667*20c794b3Sgavinm */ 668*20c794b3Sgavinm /*ARGSUSED*/ 6697aec1d6eScindi void 6707aec1d6eScindi cmi_mca_trap(struct regs *rp) 6717aec1d6eScindi { 672*20c794b3Sgavinm #ifndef __xpv 673*20c794b3Sgavinm cmi_hdl_t hdl = NULL; 674*20c794b3Sgavinm uint64_t disp; 675*20c794b3Sgavinm cmi_t *cmi; 676*20c794b3Sgavinm int s; 677*20c794b3Sgavinm 678*20c794b3Sgavinm if (cmi_no_mca_init != 0) 679*20c794b3Sgavinm return; 680*20c794b3Sgavinm 681*20c794b3Sgavinm /* 682*20c794b3Sgavinm * This function can call cmn_err, and the cpu module cmi_mca_trap 683*20c794b3Sgavinm * entry point may also elect to call cmn_err (e.g., if it can't 684*20c794b3Sgavinm * log the error onto an errorq, say very early in boot). 685*20c794b3Sgavinm * We need to let cprintf know that we must not block. 686*20c794b3Sgavinm */ 687*20c794b3Sgavinm s = spl8(); 688*20c794b3Sgavinm 689*20c794b3Sgavinm if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 690*20c794b3Sgavinm cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 691*20c794b3Sgavinm (cmi = HDL2CMI(hdl)) == NULL || 692*20c794b3Sgavinm !CMI_OP_PRESENT(cmi, cmi_mca_trap)) { 693*20c794b3Sgavinm 694*20c794b3Sgavinm cmn_err(CE_WARN, "#MC exception on cpuid %d: %s", 695*20c794b3Sgavinm CPU->cpu_id, 696*20c794b3Sgavinm hdl ? "handle lookup ok but no #MC handler found" : 697*20c794b3Sgavinm "handle lookup failed"); 698*20c794b3Sgavinm 699*20c794b3Sgavinm if (hdl != NULL) 700*20c794b3Sgavinm cmi_hdl_rele(hdl); 701*20c794b3Sgavinm 702*20c794b3Sgavinm splx(s); 703*20c794b3Sgavinm return; 7047aec1d6eScindi } 7057aec1d6eScindi 706*20c794b3Sgavinm disp = CMI_OPS(cmi)->cmi_mca_trap(hdl, rp); 7077aec1d6eScindi 708*20c794b3Sgavinm switch (cmi_mce_response(rp, disp)) { 709*20c794b3Sgavinm default: 710*20c794b3Sgavinm cmn_err(CE_WARN, "Invalid response from cmi_mce_response"); 711*20c794b3Sgavinm /*FALLTHRU*/ 7127aec1d6eScindi 713*20c794b3Sgavinm case CMI_RESPONSE_PANIC: 714*20c794b3Sgavinm cmi_mca_panic(); 715*20c794b3Sgavinm break; 716*20c794b3Sgavinm 717*20c794b3Sgavinm case CMI_RESPONSE_NONE: 718*20c794b3Sgavinm break; 719*20c794b3Sgavinm 720*20c794b3Sgavinm case CMI_RESPONSE_CKILL: 721*20c794b3Sgavinm ttolwp(curthread)->lwp_pcb.pcb_flags |= ASYNC_HWERR; 722*20c794b3Sgavinm aston(curthread); 723*20c794b3Sgavinm cma_mca_trap_contract_kills++; 724*20c794b3Sgavinm break; 725*20c794b3Sgavinm 726*20c794b3Sgavinm case CMI_RESPONSE_ONTRAP_PROT: { 727*20c794b3Sgavinm on_trap_data_t *otp = curthread->t_ontrap; 728*20c794b3Sgavinm otp->ot_trap = OT_DATA_EC; 729*20c794b3Sgavinm rp->r_pc = otp->ot_trampoline; 730*20c794b3Sgavinm cma_mca_trap_ontrap_forgiven++; 731*20c794b3Sgavinm break; 732*20c794b3Sgavinm } 733*20c794b3Sgavinm 734*20c794b3Sgavinm case CMI_RESPONSE_LOFAULT_PROT: 735*20c794b3Sgavinm rp->r_r0 = EFAULT; 736*20c794b3Sgavinm rp->r_pc = curthread->t_lofault; 737*20c794b3Sgavinm cma_mca_trap_lofault_forgiven++; 738*20c794b3Sgavinm break; 739*20c794b3Sgavinm } 740*20c794b3Sgavinm 741*20c794b3Sgavinm cmi_hdl_rele(hdl); 742*20c794b3Sgavinm splx(s); 743*20c794b3Sgavinm #endif /* __xpv */ 7447aec1d6eScindi } 7457aec1d6eScindi 7467aec1d6eScindi void 747*20c794b3Sgavinm cmi_hdl_poke(cmi_hdl_t hdl) 7487aec1d6eScindi { 749*20c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 750*20c794b3Sgavinm 751*20c794b3Sgavinm if (!CMI_OP_PRESENT(cmi, cmi_hdl_poke)) 752*20c794b3Sgavinm return; 753*20c794b3Sgavinm 754*20c794b3Sgavinm CMI_OPS(cmi)->cmi_hdl_poke(hdl); 7557aec1d6eScindi } 7567aec1d6eScindi 7577aec1d6eScindi void 758*20c794b3Sgavinm cmi_mc_register(cmi_hdl_t hdl, const cmi_mc_ops_t *mcops, void *mcdata) 7597aec1d6eScindi { 760*20c794b3Sgavinm if (!cmi_no_mca_init) 761*20c794b3Sgavinm cmi_hdl_setmc(hdl, mcops, mcdata); 7627aec1d6eScindi } 7637aec1d6eScindi 764*20c794b3Sgavinm cmi_errno_t 7654156fc34Sgavinm cmi_mc_patounum(uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, uint32_t synd, 7664156fc34Sgavinm int syndtype, mc_unum_t *up) 7677aec1d6eScindi { 7687aec1d6eScindi const struct cmi_mc_ops *mcops; 769*20c794b3Sgavinm cmi_hdl_t hdl; 770*20c794b3Sgavinm cmi_errno_t rv; 7717aec1d6eScindi 772*20c794b3Sgavinm if (cmi_no_mca_init || 773*20c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 774*20c794b3Sgavinm return (CMIERR_MC_ABSENT); 7757aec1d6eScindi 776*20c794b3Sgavinm if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 777*20c794b3Sgavinm mcops->cmi_mc_patounum == NULL) { 778*20c794b3Sgavinm cmi_hdl_rele(hdl); 779*20c794b3Sgavinm return (CMIERR_MC_NOTSUP); 7807aec1d6eScindi } 7817aec1d6eScindi 782*20c794b3Sgavinm rv = mcops->cmi_mc_patounum(cmi_hdl_getmcdata(hdl), pa, valid_hi, 783*20c794b3Sgavinm valid_lo, synd, syndtype, up); 784*20c794b3Sgavinm 785*20c794b3Sgavinm cmi_hdl_rele(hdl); 786*20c794b3Sgavinm 787*20c794b3Sgavinm return (rv); 788*20c794b3Sgavinm } 789*20c794b3Sgavinm 790*20c794b3Sgavinm cmi_errno_t 7917aec1d6eScindi cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 7927aec1d6eScindi { 7937aec1d6eScindi const struct cmi_mc_ops *mcops; 794*20c794b3Sgavinm cmi_hdl_t hdl; 795*20c794b3Sgavinm cmi_errno_t rv; 7967aec1d6eScindi 7977aec1d6eScindi if (up != NULL && nvl != NULL) 798*20c794b3Sgavinm return (CMIERR_API); /* convert from just one form */ 7997aec1d6eScindi 800*20c794b3Sgavinm if (cmi_no_mca_init || 801*20c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 802*20c794b3Sgavinm return (CMIERR_MC_ABSENT); 8037aec1d6eScindi 804*20c794b3Sgavinm if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 805*20c794b3Sgavinm mcops->cmi_mc_unumtopa == NULL) { 806*20c794b3Sgavinm cmi_hdl_rele(hdl); 807*20c794b3Sgavinm 808*20c794b3Sgavinm if (nvl != NULL && nvlist_lookup_uint64(nvl, 809*20c794b3Sgavinm FM_FMRI_MEM_PHYSADDR, pap) == 0) { 810*20c794b3Sgavinm return (CMIERR_MC_PARTIALUNUMTOPA); 811*20c794b3Sgavinm } else { 812*20c794b3Sgavinm return (mcops && mcops->cmi_mc_unumtopa ? 813*20c794b3Sgavinm CMIERR_MC_NOTSUP : CMIERR_MC_ABSENT); 814*20c794b3Sgavinm } 815*20c794b3Sgavinm } 816*20c794b3Sgavinm 817*20c794b3Sgavinm rv = mcops->cmi_mc_unumtopa(cmi_hdl_getmcdata(hdl), up, nvl, pap); 818*20c794b3Sgavinm 819*20c794b3Sgavinm cmi_hdl_rele(hdl); 820*20c794b3Sgavinm 821*20c794b3Sgavinm return (rv); 822*20c794b3Sgavinm } 823*20c794b3Sgavinm 824*20c794b3Sgavinm void 825*20c794b3Sgavinm cmi_mc_logout(cmi_hdl_t hdl, boolean_t ismc, boolean_t sync) 826*20c794b3Sgavinm { 827*20c794b3Sgavinm const struct cmi_mc_ops *mcops; 828*20c794b3Sgavinm 829*20c794b3Sgavinm if (cmi_no_mca_init || (mcops = cmi_hdl_getmcops(hdl)) == NULL) 830*20c794b3Sgavinm return; 831*20c794b3Sgavinm 832*20c794b3Sgavinm if (mcops->cmi_mc_logout != NULL) 833*20c794b3Sgavinm mcops->cmi_mc_logout(hdl, ismc, sync); 834*20c794b3Sgavinm } 835*20c794b3Sgavinm 836*20c794b3Sgavinm cmi_errno_t 837*20c794b3Sgavinm cmi_hdl_msrinject(cmi_hdl_t hdl, cmi_mca_regs_t *regs, uint_t nregs, 838*20c794b3Sgavinm int force) 839*20c794b3Sgavinm { 840*20c794b3Sgavinm cmi_t *cmi = cmi_hdl_getcmi(hdl); 841*20c794b3Sgavinm 842*20c794b3Sgavinm if (!CMI_OP_PRESENT(cmi, cmi_msrinject)) 843*20c794b3Sgavinm return (CMIERR_NOTSUP); 844*20c794b3Sgavinm 845*20c794b3Sgavinm return (CMI_OPS(cmi)->cmi_msrinject(hdl, regs, nregs, force)); 846*20c794b3Sgavinm } 847*20c794b3Sgavinm 848*20c794b3Sgavinm boolean_t 849*20c794b3Sgavinm cmi_panic_on_ue(void) 850*20c794b3Sgavinm { 851*20c794b3Sgavinm return (cmi_panic_on_uncorrectable_error ? B_TRUE : B_FALSE); 8527aec1d6eScindi } 853