17aec1d6eScindi /* 23ad553a7Sgavinm * CDDL HEADER START 33ad553a7Sgavinm * 43ad553a7Sgavinm * The contents of this file are subject to the terms of the 53ad553a7Sgavinm * Common Development and Distribution License (the "License"). 63ad553a7Sgavinm * You may not use this file except in compliance with the License. 73ad553a7Sgavinm * 83ad553a7Sgavinm * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93ad553a7Sgavinm * or http://www.opensolaris.org/os/licensing. 103ad553a7Sgavinm * See the License for the specific language governing permissions 113ad553a7Sgavinm * and limitations under the License. 123ad553a7Sgavinm * 133ad553a7Sgavinm * When distributing Covered Code, include this CDDL HEADER in each 143ad553a7Sgavinm * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153ad553a7Sgavinm * If applicable, add the following below this CDDL HEADER, with the 163ad553a7Sgavinm * fields enclosed by brackets "[]" replaced with your own identifying 173ad553a7Sgavinm * information: Portions Copyright [yyyy] [name of copyright owner] 183ad553a7Sgavinm * 193ad553a7Sgavinm * CDDL HEADER END 203ad553a7Sgavinm */ 213ad553a7Sgavinm 223ad553a7Sgavinm /* 23*e3d60c9bSAdrian Frost * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 247aec1d6eScindi * Use is subject to license terms. 257aec1d6eScindi */ 267aec1d6eScindi 277aec1d6eScindi /* 287aec1d6eScindi * Public interface to routines implemented by CPU modules 297aec1d6eScindi */ 307aec1d6eScindi 3120c794b3Sgavinm #include <sys/types.h> 3220c794b3Sgavinm #include <sys/atomic.h> 337aec1d6eScindi #include <sys/x86_archext.h> 347aec1d6eScindi #include <sys/cpu_module_impl.h> 3520c794b3Sgavinm #include <sys/cpu_module_ms.h> 367aec1d6eScindi #include <sys/fm/util.h> 377aec1d6eScindi #include <sys/reboot.h> 387aec1d6eScindi #include <sys/modctl.h> 397aec1d6eScindi #include <sys/param.h> 407aec1d6eScindi #include <sys/cmn_err.h> 417aec1d6eScindi #include <sys/systm.h> 4220c794b3Sgavinm #include <sys/fm/protocol.h> 4320c794b3Sgavinm #include <sys/pcb.h> 4420c794b3Sgavinm #include <sys/ontrap.h> 4520c794b3Sgavinm #include <sys/psw.h> 4620c794b3Sgavinm #include <sys/privregs.h> 477aec1d6eScindi 4820c794b3Sgavinm /* 4920c794b3Sgavinm * Set to force cmi_init to fail. 5020c794b3Sgavinm */ 5120c794b3Sgavinm int cmi_no_init = 0; 527aec1d6eScindi 5320c794b3Sgavinm /* 5420c794b3Sgavinm * Set to avoid MCA initialization. 5520c794b3Sgavinm */ 5620c794b3Sgavinm int cmi_no_mca_init = 0; 577aec1d6eScindi 587aec1d6eScindi /* 598a40a695Sgavinm * If cleared for debugging we will not attempt to load a model-specific 608a40a695Sgavinm * cpu module but will load the generic cpu module instead. 618a40a695Sgavinm */ 628a40a695Sgavinm int cmi_force_generic = 0; 638a40a695Sgavinm 648a40a695Sgavinm /* 657aec1d6eScindi * If cleared for debugging, we will suppress panicking on fatal hardware 667aec1d6eScindi * errors. This should *only* be used for debugging; it use can and will 677aec1d6eScindi * cause data corruption if actual hardware errors are detected by the system. 687aec1d6eScindi */ 697aec1d6eScindi int cmi_panic_on_uncorrectable_error = 1; 707aec1d6eScindi 7120c794b3Sgavinm /* 72*e3d60c9bSAdrian Frost * Set to indicate whether we are able to enable cmci interrupt. 73*e3d60c9bSAdrian Frost */ 74*e3d60c9bSAdrian Frost int cmi_enable_cmci = 0; 75*e3d60c9bSAdrian Frost 76*e3d60c9bSAdrian Frost /* 7720c794b3Sgavinm * Subdirectory (relative to the module search path) in which we will 7820c794b3Sgavinm * look for cpu modules. 7920c794b3Sgavinm */ 8020c794b3Sgavinm #define CPUMOD_SUBDIR "cpu" 8120c794b3Sgavinm 8220c794b3Sgavinm /* 8320c794b3Sgavinm * CPU modules have a filenames such as "cpu.AuthenticAMD.15" and 8420c794b3Sgavinm * "cpu.generic" - the "cpu" prefix is specified by the following. 8520c794b3Sgavinm */ 8620c794b3Sgavinm #define CPUMOD_PREFIX "cpu" 8720c794b3Sgavinm 8820c794b3Sgavinm /* 8920c794b3Sgavinm * Structure used to keep track of cpu modules we have loaded and their ops 9020c794b3Sgavinm */ 9120c794b3Sgavinm typedef struct cmi { 9220c794b3Sgavinm struct cmi *cmi_next; 9320c794b3Sgavinm struct cmi *cmi_prev; 9420c794b3Sgavinm const cmi_ops_t *cmi_ops; 9520c794b3Sgavinm struct modctl *cmi_modp; 9620c794b3Sgavinm uint_t cmi_refcnt; 9720c794b3Sgavinm } cmi_t; 9820c794b3Sgavinm 997aec1d6eScindi static cmi_t *cmi_list; 1007aec1d6eScindi static kmutex_t cmi_load_lock; 1017aec1d6eScindi 10220c794b3Sgavinm /* 10320c794b3Sgavinm * Functions we need from cmi_hw.c that are not part of the cpu_module.h 10420c794b3Sgavinm * interface. 10520c794b3Sgavinm */ 106*e3d60c9bSAdrian Frost extern cmi_hdl_t cmi_hdl_create(enum cmi_hdl_class, uint_t, uint_t, uint_t, 107*e3d60c9bSAdrian Frost boolean_t); 10820c794b3Sgavinm extern void cmi_hdl_setcmi(cmi_hdl_t, void *, void *); 10920c794b3Sgavinm extern void *cmi_hdl_getcmi(cmi_hdl_t); 11020c794b3Sgavinm extern void cmi_hdl_setmc(cmi_hdl_t, const struct cmi_mc_ops *, void *); 11120c794b3Sgavinm 11220c794b3Sgavinm #define HDL2CMI(hdl) cmi_hdl_getcmi(hdl) 11320c794b3Sgavinm 11420c794b3Sgavinm #define CMI_OPS(cmi) (cmi)->cmi_ops 11520c794b3Sgavinm #define CMI_OP_PRESENT(cmi, op) ((cmi) && CMI_OPS(cmi)->op != NULL) 11620c794b3Sgavinm 11720c794b3Sgavinm #define CMI_MATCH_VENDOR 0 /* Just match on vendor */ 11820c794b3Sgavinm #define CMI_MATCH_FAMILY 1 /* Match down to family */ 11920c794b3Sgavinm #define CMI_MATCH_MODEL 2 /* Match down to model */ 12020c794b3Sgavinm #define CMI_MATCH_STEPPING 3 /* Match down to stepping */ 12120c794b3Sgavinm 12220c794b3Sgavinm static void 12320c794b3Sgavinm cmi_link(cmi_t *cmi) 1247aec1d6eScindi { 12520c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 12620c794b3Sgavinm 12720c794b3Sgavinm cmi->cmi_prev = NULL; 12820c794b3Sgavinm cmi->cmi_next = cmi_list; 12920c794b3Sgavinm if (cmi_list != NULL) 13020c794b3Sgavinm cmi_list->cmi_prev = cmi; 13120c794b3Sgavinm cmi_list = cmi; 13220c794b3Sgavinm } 13320c794b3Sgavinm 13420c794b3Sgavinm static void 13520c794b3Sgavinm cmi_unlink(cmi_t *cmi) 13620c794b3Sgavinm { 13720c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 13820c794b3Sgavinm ASSERT(cmi->cmi_refcnt == 0); 13920c794b3Sgavinm 14020c794b3Sgavinm if (cmi->cmi_prev != NULL) 14120c794b3Sgavinm cmi->cmi_prev = cmi->cmi_next; 14220c794b3Sgavinm 14320c794b3Sgavinm if (cmi->cmi_next != NULL) 14420c794b3Sgavinm cmi->cmi_next->cmi_prev = cmi->cmi_prev; 14520c794b3Sgavinm 14620c794b3Sgavinm if (cmi_list == cmi) 14720c794b3Sgavinm cmi_list = cmi->cmi_next; 14820c794b3Sgavinm } 14920c794b3Sgavinm 15020c794b3Sgavinm /* 15120c794b3Sgavinm * Hold the module in memory. We call to CPU modules without using the 15220c794b3Sgavinm * stubs mechanism, so these modules must be manually held in memory. 15320c794b3Sgavinm * The mod_ref acts as if another loaded module has a dependency on us. 15420c794b3Sgavinm */ 15520c794b3Sgavinm static void 15620c794b3Sgavinm cmi_hold(cmi_t *cmi) 15720c794b3Sgavinm { 15820c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 15920c794b3Sgavinm 16020c794b3Sgavinm mutex_enter(&mod_lock); 16120c794b3Sgavinm cmi->cmi_modp->mod_ref++; 16220c794b3Sgavinm mutex_exit(&mod_lock); 16320c794b3Sgavinm cmi->cmi_refcnt++; 16420c794b3Sgavinm } 16520c794b3Sgavinm 16620c794b3Sgavinm static void 16720c794b3Sgavinm cmi_rele(cmi_t *cmi) 16820c794b3Sgavinm { 16920c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 17020c794b3Sgavinm 17120c794b3Sgavinm mutex_enter(&mod_lock); 17220c794b3Sgavinm cmi->cmi_modp->mod_ref--; 17320c794b3Sgavinm mutex_exit(&mod_lock); 17420c794b3Sgavinm 17520c794b3Sgavinm if (--cmi->cmi_refcnt == 0) { 17620c794b3Sgavinm cmi_unlink(cmi); 17720c794b3Sgavinm kmem_free(cmi, sizeof (cmi_t)); 17820c794b3Sgavinm } 17920c794b3Sgavinm } 18020c794b3Sgavinm 18120c794b3Sgavinm static cmi_ops_t * 18220c794b3Sgavinm cmi_getops(modctl_t *modp) 18320c794b3Sgavinm { 18420c794b3Sgavinm cmi_ops_t *ops; 18520c794b3Sgavinm 18620c794b3Sgavinm if ((ops = (cmi_ops_t *)modlookup_by_modctl(modp, "_cmi_ops")) == 18720c794b3Sgavinm NULL) { 18820c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' is invalid: no _cmi_ops " 18920c794b3Sgavinm "found", modp->mod_modname); 19020c794b3Sgavinm return (NULL); 19120c794b3Sgavinm } 19220c794b3Sgavinm 19320c794b3Sgavinm if (ops->cmi_init == NULL) { 19420c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' is invalid: no cmi_init " 19520c794b3Sgavinm "entry point", modp->mod_modname); 19620c794b3Sgavinm return (NULL); 19720c794b3Sgavinm } 19820c794b3Sgavinm 19920c794b3Sgavinm return (ops); 2007aec1d6eScindi } 2017aec1d6eScindi 2027aec1d6eScindi static cmi_t * 2037aec1d6eScindi cmi_load_modctl(modctl_t *modp) 2047aec1d6eScindi { 20520c794b3Sgavinm cmi_ops_t *ops; 20620c794b3Sgavinm uintptr_t ver; 2077aec1d6eScindi cmi_t *cmi; 20820c794b3Sgavinm cmi_api_ver_t apiver; 2097aec1d6eScindi 2107aec1d6eScindi ASSERT(MUTEX_HELD(&cmi_load_lock)); 2117aec1d6eScindi 2127aec1d6eScindi for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 2137aec1d6eScindi if (cmi->cmi_modp == modp) 2147aec1d6eScindi return (cmi); 2157aec1d6eScindi } 2167aec1d6eScindi 21720c794b3Sgavinm if ((ver = modlookup_by_modctl(modp, "_cmi_api_version")) == NULL) { 21820c794b3Sgavinm /* 21920c794b3Sgavinm * Apparently a cpu module before versioning was introduced - 22020c794b3Sgavinm * we call this version 0. 22120c794b3Sgavinm */ 22220c794b3Sgavinm apiver = CMI_API_VERSION_0; 22320c794b3Sgavinm } else { 22420c794b3Sgavinm apiver = *((cmi_api_ver_t *)ver); 22520c794b3Sgavinm if (!CMI_API_VERSION_CHKMAGIC(apiver)) { 22620c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' is invalid: " 22720c794b3Sgavinm "_cmi_api_version 0x%x has bad magic", 22820c794b3Sgavinm modp->mod_modname, apiver); 22920c794b3Sgavinm return (NULL); 23020c794b3Sgavinm } 23120c794b3Sgavinm } 23220c794b3Sgavinm 23320c794b3Sgavinm if (apiver != CMI_API_VERSION) { 23420c794b3Sgavinm cmn_err(CE_WARN, "cpu module '%s' has API version %d, " 23520c794b3Sgavinm "kernel requires API version %d", modp->mod_modname, 23620c794b3Sgavinm CMI_API_VERSION_TOPRINT(apiver), 23720c794b3Sgavinm CMI_API_VERSION_TOPRINT(CMI_API_VERSION)); 2387aec1d6eScindi return (NULL); 2397aec1d6eScindi } 2407aec1d6eScindi 24120c794b3Sgavinm if ((ops = cmi_getops(modp)) == NULL) 24220c794b3Sgavinm return (NULL); 2437aec1d6eScindi 24420c794b3Sgavinm cmi = kmem_zalloc(sizeof (*cmi), KM_SLEEP); 24520c794b3Sgavinm cmi->cmi_ops = ops; 2467aec1d6eScindi cmi->cmi_modp = modp; 2477aec1d6eScindi 24820c794b3Sgavinm cmi_link(cmi); 24920c794b3Sgavinm 25020c794b3Sgavinm return (cmi); 25120c794b3Sgavinm } 25220c794b3Sgavinm 25320c794b3Sgavinm static int 25420c794b3Sgavinm cmi_cpu_match(cmi_hdl_t hdl1, cmi_hdl_t hdl2, int match) 25520c794b3Sgavinm { 25620c794b3Sgavinm if (match >= CMI_MATCH_VENDOR && 25720c794b3Sgavinm cmi_hdl_vendor(hdl1) != cmi_hdl_vendor(hdl2)) 25820c794b3Sgavinm return (0); 25920c794b3Sgavinm 26020c794b3Sgavinm if (match >= CMI_MATCH_FAMILY && 26120c794b3Sgavinm cmi_hdl_family(hdl1) != cmi_hdl_family(hdl2)) 26220c794b3Sgavinm return (0); 26320c794b3Sgavinm 26420c794b3Sgavinm if (match >= CMI_MATCH_MODEL && 26520c794b3Sgavinm cmi_hdl_model(hdl1) != cmi_hdl_model(hdl2)) 26620c794b3Sgavinm return (0); 26720c794b3Sgavinm 26820c794b3Sgavinm if (match >= CMI_MATCH_STEPPING && 26920c794b3Sgavinm cmi_hdl_stepping(hdl1) != cmi_hdl_stepping(hdl2)) 27020c794b3Sgavinm return (0); 27120c794b3Sgavinm 27220c794b3Sgavinm return (1); 27320c794b3Sgavinm } 27420c794b3Sgavinm 27520c794b3Sgavinm static int 27620c794b3Sgavinm cmi_search_list_cb(cmi_hdl_t whdl, void *arg1, void *arg2, void *arg3) 27720c794b3Sgavinm { 27820c794b3Sgavinm cmi_hdl_t thdl = (cmi_hdl_t)arg1; 27920c794b3Sgavinm int match = *((int *)arg2); 28020c794b3Sgavinm cmi_hdl_t *rsltp = (cmi_hdl_t *)arg3; 28120c794b3Sgavinm 28220c794b3Sgavinm if (cmi_cpu_match(thdl, whdl, match)) { 28320c794b3Sgavinm cmi_hdl_hold(whdl); /* short-term hold */ 28420c794b3Sgavinm *rsltp = whdl; 28520c794b3Sgavinm return (CMI_HDL_WALK_DONE); 28620c794b3Sgavinm } else { 28720c794b3Sgavinm return (CMI_HDL_WALK_NEXT); 28820c794b3Sgavinm } 28920c794b3Sgavinm } 29020c794b3Sgavinm 29120c794b3Sgavinm static cmi_t * 29220c794b3Sgavinm cmi_search_list(cmi_hdl_t hdl, int match) 29320c794b3Sgavinm { 29420c794b3Sgavinm cmi_hdl_t dhdl = NULL; 29520c794b3Sgavinm cmi_t *cmi = NULL; 29620c794b3Sgavinm 29720c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 29820c794b3Sgavinm 29920c794b3Sgavinm cmi_hdl_walk(cmi_search_list_cb, (void *)hdl, (void *)&match, &dhdl); 30020c794b3Sgavinm if (dhdl) { 30120c794b3Sgavinm cmi = HDL2CMI(dhdl); 30220c794b3Sgavinm cmi_hdl_rele(dhdl); /* held in cmi_search_list_cb */ 30320c794b3Sgavinm } 3047aec1d6eScindi 3057aec1d6eScindi return (cmi); 3067aec1d6eScindi } 3077aec1d6eScindi 3087aec1d6eScindi static cmi_t * 30920c794b3Sgavinm cmi_load_module(cmi_hdl_t hdl, int match, int *chosenp) 3107aec1d6eScindi { 3117aec1d6eScindi modctl_t *modp; 3127aec1d6eScindi cmi_t *cmi; 31320c794b3Sgavinm int modid; 3147aec1d6eScindi uint_t s[3]; 3157aec1d6eScindi 3167aec1d6eScindi ASSERT(MUTEX_HELD(&cmi_load_lock)); 31720c794b3Sgavinm ASSERT(match == CMI_MATCH_STEPPING || match == CMI_MATCH_MODEL || 31820c794b3Sgavinm match == CMI_MATCH_FAMILY || match == CMI_MATCH_VENDOR); 3197aec1d6eScindi 3207aec1d6eScindi /* 32120c794b3Sgavinm * Have we already loaded a module for a cpu with the same 32220c794b3Sgavinm * vendor/family/model/stepping? 3237aec1d6eScindi */ 32420c794b3Sgavinm if ((cmi = cmi_search_list(hdl, match)) != NULL) { 32520c794b3Sgavinm cmi_hold(cmi); 32620c794b3Sgavinm return (cmi); 32720c794b3Sgavinm } 3287aec1d6eScindi 32920c794b3Sgavinm s[0] = cmi_hdl_family(hdl); 33020c794b3Sgavinm s[1] = cmi_hdl_model(hdl); 33120c794b3Sgavinm s[2] = cmi_hdl_stepping(hdl); 3327aec1d6eScindi modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 33320c794b3Sgavinm cmi_hdl_vendorstr(hdl), ".", s, match, chosenp); 3347aec1d6eScindi 3357aec1d6eScindi if (modid == -1) 3367aec1d6eScindi return (NULL); 3377aec1d6eScindi 3387aec1d6eScindi modp = mod_hold_by_id(modid); 3397aec1d6eScindi cmi = cmi_load_modctl(modp); 34020c794b3Sgavinm if (cmi) 34120c794b3Sgavinm cmi_hold(cmi); 3427aec1d6eScindi mod_release_mod(modp); 3437aec1d6eScindi 3447aec1d6eScindi return (cmi); 3457aec1d6eScindi } 3467aec1d6eScindi 34720c794b3Sgavinm /* 34820c794b3Sgavinm * Try to load a cpu module with specific support for this chip type. 34920c794b3Sgavinm */ 3507aec1d6eScindi static cmi_t * 35120c794b3Sgavinm cmi_load_specific(cmi_hdl_t hdl, void **datap) 35220c794b3Sgavinm { 35320c794b3Sgavinm cmi_t *cmi; 35420c794b3Sgavinm int err; 35520c794b3Sgavinm int i; 35620c794b3Sgavinm 35720c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 35820c794b3Sgavinm 35920c794b3Sgavinm for (i = CMI_MATCH_STEPPING; i >= CMI_MATCH_VENDOR; i--) { 36020c794b3Sgavinm int suffixlevel; 36120c794b3Sgavinm 36220c794b3Sgavinm if ((cmi = cmi_load_module(hdl, i, &suffixlevel)) == NULL) 36320c794b3Sgavinm return (NULL); 36420c794b3Sgavinm 36520c794b3Sgavinm /* 36620c794b3Sgavinm * A module has loaded and has a _cmi_ops structure, and the 36720c794b3Sgavinm * module has been held for this instance. Call its cmi_init 36820c794b3Sgavinm * entry point - we expect success (0) or ENOTSUP. 36920c794b3Sgavinm */ 37020c794b3Sgavinm if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) == 0) { 37120c794b3Sgavinm if (boothowto & RB_VERBOSE) { 37220c794b3Sgavinm printf("initialized cpu module '%s' on " 37320c794b3Sgavinm "chip %d core %d strand %d\n", 37420c794b3Sgavinm cmi->cmi_modp->mod_modname, 37520c794b3Sgavinm cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 37620c794b3Sgavinm cmi_hdl_strandid(hdl)); 37720c794b3Sgavinm } 37820c794b3Sgavinm return (cmi); 37920c794b3Sgavinm } else if (err != ENOTSUP) { 38020c794b3Sgavinm cmn_err(CE_WARN, "failed to init cpu module '%s' on " 38120c794b3Sgavinm "chip %d core %d strand %d: err=%d\n", 38220c794b3Sgavinm cmi->cmi_modp->mod_modname, 38320c794b3Sgavinm cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), 38420c794b3Sgavinm cmi_hdl_strandid(hdl), err); 38520c794b3Sgavinm } 38620c794b3Sgavinm 38720c794b3Sgavinm /* 38820c794b3Sgavinm * The module failed or declined to init, so release 38920c794b3Sgavinm * it and update i to be equal to the number 39020c794b3Sgavinm * of suffices actually used in the last module path. 39120c794b3Sgavinm */ 39220c794b3Sgavinm cmi_rele(cmi); 39320c794b3Sgavinm i = suffixlevel; 39420c794b3Sgavinm } 39520c794b3Sgavinm 39620c794b3Sgavinm return (NULL); 39720c794b3Sgavinm } 39820c794b3Sgavinm 39920c794b3Sgavinm /* 40020c794b3Sgavinm * Load the generic IA32 MCA cpu module, which may still supplement 40120c794b3Sgavinm * itself with model-specific support through cpu model-specific modules. 40220c794b3Sgavinm */ 40320c794b3Sgavinm static cmi_t * 40420c794b3Sgavinm cmi_load_generic(cmi_hdl_t hdl, void **datap) 4057aec1d6eScindi { 4067aec1d6eScindi modctl_t *modp; 4077aec1d6eScindi cmi_t *cmi; 4087aec1d6eScindi int modid; 40920c794b3Sgavinm int err; 41020c794b3Sgavinm 41120c794b3Sgavinm ASSERT(MUTEX_HELD(&cmi_load_lock)); 4127aec1d6eScindi 4137aec1d6eScindi if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 4147aec1d6eScindi return (NULL); 4157aec1d6eScindi 4167aec1d6eScindi modp = mod_hold_by_id(modid); 4177aec1d6eScindi cmi = cmi_load_modctl(modp); 41820c794b3Sgavinm if (cmi) 41920c794b3Sgavinm cmi_hold(cmi); 4207aec1d6eScindi mod_release_mod(modp); 4217aec1d6eScindi 42220c794b3Sgavinm if (cmi == NULL) 42320c794b3Sgavinm return (NULL); 42420c794b3Sgavinm 42520c794b3Sgavinm if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) != 0) { 42620c794b3Sgavinm if (err != ENOTSUP) 42720c794b3Sgavinm cmn_err(CE_WARN, CPUMOD_PREFIX ".generic failed to " 42820c794b3Sgavinm "init: err=%d", err); 42920c794b3Sgavinm cmi_rele(cmi); 43020c794b3Sgavinm return (NULL); 43120c794b3Sgavinm } 43220c794b3Sgavinm 4337aec1d6eScindi return (cmi); 4347aec1d6eScindi } 4357aec1d6eScindi 43620c794b3Sgavinm cmi_hdl_t 43720c794b3Sgavinm cmi_init(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 438*e3d60c9bSAdrian Frost uint_t strandid, boolean_t mstrand) 4397aec1d6eScindi { 44020c794b3Sgavinm cmi_t *cmi = NULL; 44120c794b3Sgavinm cmi_hdl_t hdl; 4427aec1d6eScindi void *data; 4437aec1d6eScindi 44420c794b3Sgavinm if (cmi_no_init) { 44520c794b3Sgavinm cmi_no_mca_init = 1; 44620c794b3Sgavinm return (NULL); 44720c794b3Sgavinm } 44820c794b3Sgavinm 4497aec1d6eScindi mutex_enter(&cmi_load_lock); 4507aec1d6eScindi 451*e3d60c9bSAdrian Frost if ((hdl = cmi_hdl_create(class, chipid, coreid, strandid, 452*e3d60c9bSAdrian Frost mstrand)) == NULL) { 4537aec1d6eScindi mutex_exit(&cmi_load_lock); 45420c794b3Sgavinm cmn_err(CE_WARN, "There will be no MCA support on chip %d " 45520c794b3Sgavinm "core %d strand %d (cmi_hdl_create returned NULL)\n", 45620c794b3Sgavinm chipid, coreid, strandid); 45720c794b3Sgavinm return (NULL); 4587aec1d6eScindi } 4597aec1d6eScindi 46020c794b3Sgavinm if (!cmi_force_generic) 46120c794b3Sgavinm cmi = cmi_load_specific(hdl, &data); 46220c794b3Sgavinm 46320c794b3Sgavinm if (cmi == NULL && (cmi = cmi_load_generic(hdl, &data)) == NULL) { 46420c794b3Sgavinm cmn_err(CE_WARN, "There will be no MCA support on chip %d " 46520c794b3Sgavinm "core %d strand %d\n", chipid, coreid, strandid); 46620c794b3Sgavinm cmi_hdl_rele(hdl); 4677aec1d6eScindi mutex_exit(&cmi_load_lock); 46820c794b3Sgavinm return (NULL); 4697aec1d6eScindi } 4707aec1d6eScindi 47120c794b3Sgavinm cmi_hdl_setcmi(hdl, cmi, data); 4727aec1d6eScindi 47320c794b3Sgavinm cms_init(hdl); 47420c794b3Sgavinm 4757aec1d6eScindi mutex_exit(&cmi_load_lock); 4767aec1d6eScindi 47720c794b3Sgavinm return (hdl); 4787aec1d6eScindi } 4797aec1d6eScindi 48020c794b3Sgavinm /* 48120c794b3Sgavinm * cmi_fini is not called at the moment. It is intended to be called 48220c794b3Sgavinm * on DR deconfigure of a cpu resource. It should not be called at 48320c794b3Sgavinm * simple offline of a cpu. 48420c794b3Sgavinm */ 4857aec1d6eScindi void 48620c794b3Sgavinm cmi_fini(cmi_hdl_t hdl) 4877aec1d6eScindi { 48820c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 48920c794b3Sgavinm 49020c794b3Sgavinm if (cms_present(hdl)) 49120c794b3Sgavinm cms_fini(hdl); 49220c794b3Sgavinm 49320c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_fini)) 49420c794b3Sgavinm CMI_OPS(cmi)->cmi_fini(hdl); 49520c794b3Sgavinm 49620c794b3Sgavinm cmi_hdl_rele(hdl); /* release hold obtained in cmi_hdl_create */ 4977aec1d6eScindi } 4987aec1d6eScindi 49920c794b3Sgavinm /* 50020c794b3Sgavinm * cmi_post_startup is called from post_startup for the boot cpu only. 50120c794b3Sgavinm */ 5027aec1d6eScindi void 50320c794b3Sgavinm cmi_post_startup(void) 5047aec1d6eScindi { 50520c794b3Sgavinm cmi_hdl_t hdl; 50620c794b3Sgavinm cmi_t *cmi; 50720c794b3Sgavinm 50820c794b3Sgavinm if (cmi_no_mca_init != 0 || 50920c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 51020c794b3Sgavinm return; 51120c794b3Sgavinm 51220c794b3Sgavinm cmi = HDL2CMI(hdl); 51320c794b3Sgavinm 51420c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_post_startup)) 51520c794b3Sgavinm CMI_OPS(cmi)->cmi_post_startup(hdl); 51620c794b3Sgavinm 51720c794b3Sgavinm cmi_hdl_rele(hdl); 5187aec1d6eScindi } 5197aec1d6eScindi 5208a40a695Sgavinm /* 5218a40a695Sgavinm * Called just once from start_other_cpus when all processors are started. 5228a40a695Sgavinm * This will not be called for each cpu, so the registered op must not 5238a40a695Sgavinm * assume it is called as such. 5248a40a695Sgavinm */ 5257aec1d6eScindi void 5263ad553a7Sgavinm cmi_post_mpstartup(void) 5273ad553a7Sgavinm { 52820c794b3Sgavinm cmi_hdl_t hdl; 52920c794b3Sgavinm cmi_t *cmi; 53020c794b3Sgavinm 53120c794b3Sgavinm if (cmi_no_mca_init != 0 || 53220c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 53320c794b3Sgavinm return; 53420c794b3Sgavinm 53520c794b3Sgavinm cmi = HDL2CMI(hdl); 53620c794b3Sgavinm 53720c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_post_mpstartup)) 53820c794b3Sgavinm CMI_OPS(cmi)->cmi_post_mpstartup(hdl); 53920c794b3Sgavinm 54020c794b3Sgavinm cmi_hdl_rele(hdl); 5413ad553a7Sgavinm } 5423ad553a7Sgavinm 5433ad553a7Sgavinm void 54420c794b3Sgavinm cmi_faulted_enter(cmi_hdl_t hdl) 5457aec1d6eScindi { 54620c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 54720c794b3Sgavinm 54820c794b3Sgavinm if (cmi_no_mca_init != 0) 54920c794b3Sgavinm return; 55020c794b3Sgavinm 55120c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_faulted_enter)) 55220c794b3Sgavinm CMI_OPS(cmi)->cmi_faulted_enter(hdl); 5537aec1d6eScindi } 5547aec1d6eScindi 5557aec1d6eScindi void 55620c794b3Sgavinm cmi_faulted_exit(cmi_hdl_t hdl) 5577aec1d6eScindi { 55820c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 55920c794b3Sgavinm 56020c794b3Sgavinm if (cmi_no_mca_init != 0) 56120c794b3Sgavinm return; 56220c794b3Sgavinm 56320c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_faulted_exit)) 56420c794b3Sgavinm CMI_OPS(cmi)->cmi_faulted_exit(hdl); 5657aec1d6eScindi } 5667aec1d6eScindi 56720c794b3Sgavinm void 56820c794b3Sgavinm cmi_mca_init(cmi_hdl_t hdl) 56920c794b3Sgavinm { 57020c794b3Sgavinm cmi_t *cmi; 57120c794b3Sgavinm 57220c794b3Sgavinm if (cmi_no_mca_init != 0) 57320c794b3Sgavinm return; 57420c794b3Sgavinm 57520c794b3Sgavinm cmi = HDL2CMI(hdl); 57620c794b3Sgavinm 57720c794b3Sgavinm if (CMI_OP_PRESENT(cmi, cmi_mca_init)) 57820c794b3Sgavinm CMI_OPS(cmi)->cmi_mca_init(hdl); 57920c794b3Sgavinm } 58020c794b3Sgavinm 58120c794b3Sgavinm #define CMI_RESPONSE_PANIC 0x0 /* panic must have value 0 */ 58220c794b3Sgavinm #define CMI_RESPONSE_NONE 0x1 58320c794b3Sgavinm #define CMI_RESPONSE_CKILL 0x2 58420c794b3Sgavinm #define CMI_RESPONSE_REBOOT 0x3 /* not implemented */ 58520c794b3Sgavinm #define CMI_RESPONSE_ONTRAP_PROT 0x4 58620c794b3Sgavinm #define CMI_RESPONSE_LOFAULT_PROT 0x5 58720c794b3Sgavinm 58820c794b3Sgavinm /* 58920c794b3Sgavinm * Return 0 if we will panic in response to this machine check, otherwise 59020c794b3Sgavinm * non-zero. If the caller is cmi_mca_trap in this file then the nonzero 59120c794b3Sgavinm * return values are to be interpreted from CMI_RESPONSE_* above. 59220c794b3Sgavinm * 59320c794b3Sgavinm * This function must just return what will be done without actually 59420c794b3Sgavinm * doing anything; this includes not changing the regs. 59520c794b3Sgavinm */ 5967aec1d6eScindi int 59720c794b3Sgavinm cmi_mce_response(struct regs *rp, uint64_t disp) 5987aec1d6eScindi { 59920c794b3Sgavinm int panicrsp = cmi_panic_on_uncorrectable_error ? CMI_RESPONSE_PANIC : 60020c794b3Sgavinm CMI_RESPONSE_NONE; 60120c794b3Sgavinm on_trap_data_t *otp; 60220c794b3Sgavinm 60320c794b3Sgavinm ASSERT(rp != NULL); /* don't call for polling, only on #MC */ 60420c794b3Sgavinm 60520c794b3Sgavinm /* 60620c794b3Sgavinm * If no bits are set in the disposition then there is nothing to 60720c794b3Sgavinm * worry about and we do not need to trampoline to ontrap or 60820c794b3Sgavinm * lofault handlers. 60920c794b3Sgavinm */ 61020c794b3Sgavinm if (disp == 0) 61120c794b3Sgavinm return (CMI_RESPONSE_NONE); 61220c794b3Sgavinm 61320c794b3Sgavinm /* 61420c794b3Sgavinm * Unconstrained errors cannot be forgiven, even by ontrap or 61520c794b3Sgavinm * lofault protection. The data is not poisoned and may not 61620c794b3Sgavinm * even belong to the trapped context - eg a writeback of 61720c794b3Sgavinm * data that is found to be bad. 61820c794b3Sgavinm */ 61920c794b3Sgavinm if (disp & CMI_ERRDISP_UC_UNCONSTRAINED) 62020c794b3Sgavinm return (panicrsp); 62120c794b3Sgavinm 62220c794b3Sgavinm /* 62320c794b3Sgavinm * ontrap OT_DATA_EC and lofault protection forgive any disposition 62420c794b3Sgavinm * other than unconstrained, even those normally forced fatal. 62520c794b3Sgavinm */ 62620c794b3Sgavinm if ((otp = curthread->t_ontrap) != NULL && otp->ot_prot & OT_DATA_EC) 62720c794b3Sgavinm return (CMI_RESPONSE_ONTRAP_PROT); 62820c794b3Sgavinm else if (curthread->t_lofault) 62920c794b3Sgavinm return (CMI_RESPONSE_LOFAULT_PROT); 63020c794b3Sgavinm 63120c794b3Sgavinm /* 63220c794b3Sgavinm * Forced-fatal errors are terminal even in user mode. 63320c794b3Sgavinm */ 63420c794b3Sgavinm if (disp & CMI_ERRDISP_FORCEFATAL) 63520c794b3Sgavinm return (panicrsp); 63620c794b3Sgavinm 63720c794b3Sgavinm /* 63820c794b3Sgavinm * If the trapped context is corrupt or we have no instruction pointer 63920c794b3Sgavinm * to resume at (and aren't trampolining to a fault handler) 64020c794b3Sgavinm * then in the kernel case we must panic and in usermode we 64120c794b3Sgavinm * kill the affected contract. 64220c794b3Sgavinm */ 64320c794b3Sgavinm if (disp & (CMI_ERRDISP_CURCTXBAD | CMI_ERRDISP_RIPV_INVALID)) 64420c794b3Sgavinm return (USERMODE(rp->r_cs) ? CMI_RESPONSE_CKILL : panicrsp); 64520c794b3Sgavinm 64620c794b3Sgavinm /* 64720c794b3Sgavinm * Anything else is harmless 64820c794b3Sgavinm */ 64920c794b3Sgavinm return (CMI_RESPONSE_NONE); 6507aec1d6eScindi } 6517aec1d6eScindi 65220c794b3Sgavinm int cma_mca_trap_panic_suppressed = 0; 65320c794b3Sgavinm 65420c794b3Sgavinm static void 65520c794b3Sgavinm cmi_mca_panic(void) 6567aec1d6eScindi { 65720c794b3Sgavinm if (cmi_panic_on_uncorrectable_error) { 65820c794b3Sgavinm fm_panic("Unrecoverable Machine-Check Exception"); 65920c794b3Sgavinm } else { 66020c794b3Sgavinm cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 66120c794b3Sgavinm cma_mca_trap_panic_suppressed++; 66220c794b3Sgavinm } 6637aec1d6eScindi } 6647aec1d6eScindi 66520c794b3Sgavinm 66620c794b3Sgavinm int cma_mca_trap_contract_kills = 0; 66720c794b3Sgavinm int cma_mca_trap_ontrap_forgiven = 0; 66820c794b3Sgavinm int cma_mca_trap_lofault_forgiven = 0; 66920c794b3Sgavinm 67020c794b3Sgavinm /* 67120c794b3Sgavinm * Native #MC handler - we branch to here from mcetrap 67220c794b3Sgavinm */ 67320c794b3Sgavinm /*ARGSUSED*/ 6747aec1d6eScindi void 6757aec1d6eScindi cmi_mca_trap(struct regs *rp) 6767aec1d6eScindi { 67720c794b3Sgavinm #ifndef __xpv 67820c794b3Sgavinm cmi_hdl_t hdl = NULL; 67920c794b3Sgavinm uint64_t disp; 68020c794b3Sgavinm cmi_t *cmi; 68120c794b3Sgavinm int s; 68220c794b3Sgavinm 68320c794b3Sgavinm if (cmi_no_mca_init != 0) 68420c794b3Sgavinm return; 68520c794b3Sgavinm 68620c794b3Sgavinm /* 68720c794b3Sgavinm * This function can call cmn_err, and the cpu module cmi_mca_trap 68820c794b3Sgavinm * entry point may also elect to call cmn_err (e.g., if it can't 68920c794b3Sgavinm * log the error onto an errorq, say very early in boot). 69020c794b3Sgavinm * We need to let cprintf know that we must not block. 69120c794b3Sgavinm */ 69220c794b3Sgavinm s = spl8(); 69320c794b3Sgavinm 69420c794b3Sgavinm if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 69520c794b3Sgavinm cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 69620c794b3Sgavinm (cmi = HDL2CMI(hdl)) == NULL || 69720c794b3Sgavinm !CMI_OP_PRESENT(cmi, cmi_mca_trap)) { 69820c794b3Sgavinm 69920c794b3Sgavinm cmn_err(CE_WARN, "#MC exception on cpuid %d: %s", 70020c794b3Sgavinm CPU->cpu_id, 70120c794b3Sgavinm hdl ? "handle lookup ok but no #MC handler found" : 70220c794b3Sgavinm "handle lookup failed"); 70320c794b3Sgavinm 70420c794b3Sgavinm if (hdl != NULL) 70520c794b3Sgavinm cmi_hdl_rele(hdl); 70620c794b3Sgavinm 70720c794b3Sgavinm splx(s); 70820c794b3Sgavinm return; 7097aec1d6eScindi } 7107aec1d6eScindi 71120c794b3Sgavinm disp = CMI_OPS(cmi)->cmi_mca_trap(hdl, rp); 7127aec1d6eScindi 71320c794b3Sgavinm switch (cmi_mce_response(rp, disp)) { 71420c794b3Sgavinm default: 71520c794b3Sgavinm cmn_err(CE_WARN, "Invalid response from cmi_mce_response"); 71620c794b3Sgavinm /*FALLTHRU*/ 7177aec1d6eScindi 71820c794b3Sgavinm case CMI_RESPONSE_PANIC: 71920c794b3Sgavinm cmi_mca_panic(); 72020c794b3Sgavinm break; 72120c794b3Sgavinm 72220c794b3Sgavinm case CMI_RESPONSE_NONE: 72320c794b3Sgavinm break; 72420c794b3Sgavinm 72520c794b3Sgavinm case CMI_RESPONSE_CKILL: 72620c794b3Sgavinm ttolwp(curthread)->lwp_pcb.pcb_flags |= ASYNC_HWERR; 72720c794b3Sgavinm aston(curthread); 72820c794b3Sgavinm cma_mca_trap_contract_kills++; 72920c794b3Sgavinm break; 73020c794b3Sgavinm 73120c794b3Sgavinm case CMI_RESPONSE_ONTRAP_PROT: { 73220c794b3Sgavinm on_trap_data_t *otp = curthread->t_ontrap; 73320c794b3Sgavinm otp->ot_trap = OT_DATA_EC; 73420c794b3Sgavinm rp->r_pc = otp->ot_trampoline; 73520c794b3Sgavinm cma_mca_trap_ontrap_forgiven++; 73620c794b3Sgavinm break; 73720c794b3Sgavinm } 73820c794b3Sgavinm 73920c794b3Sgavinm case CMI_RESPONSE_LOFAULT_PROT: 74020c794b3Sgavinm rp->r_r0 = EFAULT; 74120c794b3Sgavinm rp->r_pc = curthread->t_lofault; 74220c794b3Sgavinm cma_mca_trap_lofault_forgiven++; 74320c794b3Sgavinm break; 74420c794b3Sgavinm } 74520c794b3Sgavinm 74620c794b3Sgavinm cmi_hdl_rele(hdl); 74720c794b3Sgavinm splx(s); 74820c794b3Sgavinm #endif /* __xpv */ 7497aec1d6eScindi } 7507aec1d6eScindi 7517aec1d6eScindi void 75220c794b3Sgavinm cmi_hdl_poke(cmi_hdl_t hdl) 7537aec1d6eScindi { 75420c794b3Sgavinm cmi_t *cmi = HDL2CMI(hdl); 75520c794b3Sgavinm 75620c794b3Sgavinm if (!CMI_OP_PRESENT(cmi, cmi_hdl_poke)) 75720c794b3Sgavinm return; 75820c794b3Sgavinm 75920c794b3Sgavinm CMI_OPS(cmi)->cmi_hdl_poke(hdl); 7607aec1d6eScindi } 7617aec1d6eScindi 7627aec1d6eScindi void 763*e3d60c9bSAdrian Frost cmi_cmci_trap() 764*e3d60c9bSAdrian Frost { 765*e3d60c9bSAdrian Frost #ifndef __xpv 766*e3d60c9bSAdrian Frost cmi_hdl_t hdl = NULL; 767*e3d60c9bSAdrian Frost cmi_t *cmi; 768*e3d60c9bSAdrian Frost 769*e3d60c9bSAdrian Frost if (cmi_no_mca_init != 0) 770*e3d60c9bSAdrian Frost return; 771*e3d60c9bSAdrian Frost 772*e3d60c9bSAdrian Frost if ((hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 773*e3d60c9bSAdrian Frost cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) == NULL || 774*e3d60c9bSAdrian Frost (cmi = HDL2CMI(hdl)) == NULL || 775*e3d60c9bSAdrian Frost !CMI_OP_PRESENT(cmi, cmi_cmci_trap)) { 776*e3d60c9bSAdrian Frost 777*e3d60c9bSAdrian Frost cmn_err(CE_WARN, "CMCI interrupt on cpuid %d: %s", 778*e3d60c9bSAdrian Frost CPU->cpu_id, 779*e3d60c9bSAdrian Frost hdl ? "handle lookup ok but no CMCI handler found" : 780*e3d60c9bSAdrian Frost "handle lookup failed"); 781*e3d60c9bSAdrian Frost 782*e3d60c9bSAdrian Frost if (hdl != NULL) 783*e3d60c9bSAdrian Frost cmi_hdl_rele(hdl); 784*e3d60c9bSAdrian Frost 785*e3d60c9bSAdrian Frost return; 786*e3d60c9bSAdrian Frost } 787*e3d60c9bSAdrian Frost 788*e3d60c9bSAdrian Frost CMI_OPS(cmi)->cmi_cmci_trap(hdl); 789*e3d60c9bSAdrian Frost 790*e3d60c9bSAdrian Frost cmi_hdl_rele(hdl); 791*e3d60c9bSAdrian Frost #endif /* __xpv */ 792*e3d60c9bSAdrian Frost } 793*e3d60c9bSAdrian Frost 794*e3d60c9bSAdrian Frost void 79520c794b3Sgavinm cmi_mc_register(cmi_hdl_t hdl, const cmi_mc_ops_t *mcops, void *mcdata) 7967aec1d6eScindi { 79720c794b3Sgavinm if (!cmi_no_mca_init) 79820c794b3Sgavinm cmi_hdl_setmc(hdl, mcops, mcdata); 7997aec1d6eScindi } 8007aec1d6eScindi 80120c794b3Sgavinm cmi_errno_t 8024156fc34Sgavinm cmi_mc_patounum(uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, uint32_t synd, 8034156fc34Sgavinm int syndtype, mc_unum_t *up) 8047aec1d6eScindi { 8057aec1d6eScindi const struct cmi_mc_ops *mcops; 80620c794b3Sgavinm cmi_hdl_t hdl; 80720c794b3Sgavinm cmi_errno_t rv; 8087aec1d6eScindi 80920c794b3Sgavinm if (cmi_no_mca_init || 81020c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 81120c794b3Sgavinm return (CMIERR_MC_ABSENT); 8127aec1d6eScindi 81320c794b3Sgavinm if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 81420c794b3Sgavinm mcops->cmi_mc_patounum == NULL) { 81520c794b3Sgavinm cmi_hdl_rele(hdl); 81620c794b3Sgavinm return (CMIERR_MC_NOTSUP); 8177aec1d6eScindi } 8187aec1d6eScindi 81920c794b3Sgavinm rv = mcops->cmi_mc_patounum(cmi_hdl_getmcdata(hdl), pa, valid_hi, 82020c794b3Sgavinm valid_lo, synd, syndtype, up); 82120c794b3Sgavinm 82220c794b3Sgavinm cmi_hdl_rele(hdl); 82320c794b3Sgavinm 82420c794b3Sgavinm return (rv); 82520c794b3Sgavinm } 82620c794b3Sgavinm 82720c794b3Sgavinm cmi_errno_t 8287aec1d6eScindi cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 8297aec1d6eScindi { 8307aec1d6eScindi const struct cmi_mc_ops *mcops; 83120c794b3Sgavinm cmi_hdl_t hdl; 83220c794b3Sgavinm cmi_errno_t rv; 8337aec1d6eScindi 8347aec1d6eScindi if (up != NULL && nvl != NULL) 83520c794b3Sgavinm return (CMIERR_API); /* convert from just one form */ 8367aec1d6eScindi 83720c794b3Sgavinm if (cmi_no_mca_init || 83820c794b3Sgavinm (hdl = cmi_hdl_any()) == NULL) /* short-term hold */ 83920c794b3Sgavinm return (CMIERR_MC_ABSENT); 8407aec1d6eScindi 84120c794b3Sgavinm if ((mcops = cmi_hdl_getmcops(hdl)) == NULL || 84220c794b3Sgavinm mcops->cmi_mc_unumtopa == NULL) { 84320c794b3Sgavinm cmi_hdl_rele(hdl); 84420c794b3Sgavinm 84520c794b3Sgavinm if (nvl != NULL && nvlist_lookup_uint64(nvl, 84620c794b3Sgavinm FM_FMRI_MEM_PHYSADDR, pap) == 0) { 84720c794b3Sgavinm return (CMIERR_MC_PARTIALUNUMTOPA); 84820c794b3Sgavinm } else { 84920c794b3Sgavinm return (mcops && mcops->cmi_mc_unumtopa ? 85020c794b3Sgavinm CMIERR_MC_NOTSUP : CMIERR_MC_ABSENT); 85120c794b3Sgavinm } 85220c794b3Sgavinm } 85320c794b3Sgavinm 85420c794b3Sgavinm rv = mcops->cmi_mc_unumtopa(cmi_hdl_getmcdata(hdl), up, nvl, pap); 85520c794b3Sgavinm 85620c794b3Sgavinm cmi_hdl_rele(hdl); 85720c794b3Sgavinm 85820c794b3Sgavinm return (rv); 85920c794b3Sgavinm } 86020c794b3Sgavinm 86120c794b3Sgavinm void 86220c794b3Sgavinm cmi_mc_logout(cmi_hdl_t hdl, boolean_t ismc, boolean_t sync) 86320c794b3Sgavinm { 86420c794b3Sgavinm const struct cmi_mc_ops *mcops; 86520c794b3Sgavinm 86620c794b3Sgavinm if (cmi_no_mca_init || (mcops = cmi_hdl_getmcops(hdl)) == NULL) 86720c794b3Sgavinm return; 86820c794b3Sgavinm 86920c794b3Sgavinm if (mcops->cmi_mc_logout != NULL) 87020c794b3Sgavinm mcops->cmi_mc_logout(hdl, ismc, sync); 87120c794b3Sgavinm } 87220c794b3Sgavinm 87320c794b3Sgavinm cmi_errno_t 87420c794b3Sgavinm cmi_hdl_msrinject(cmi_hdl_t hdl, cmi_mca_regs_t *regs, uint_t nregs, 87520c794b3Sgavinm int force) 87620c794b3Sgavinm { 87720c794b3Sgavinm cmi_t *cmi = cmi_hdl_getcmi(hdl); 87820c794b3Sgavinm 87920c794b3Sgavinm if (!CMI_OP_PRESENT(cmi, cmi_msrinject)) 88020c794b3Sgavinm return (CMIERR_NOTSUP); 88120c794b3Sgavinm 88220c794b3Sgavinm return (CMI_OPS(cmi)->cmi_msrinject(hdl, regs, nregs, force)); 88320c794b3Sgavinm } 88420c794b3Sgavinm 88520c794b3Sgavinm boolean_t 88620c794b3Sgavinm cmi_panic_on_ue(void) 88720c794b3Sgavinm { 88820c794b3Sgavinm return (cmi_panic_on_uncorrectable_error ? B_TRUE : B_FALSE); 8897aec1d6eScindi } 890