1 /* 2 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 #pragma ident "%Z%%M% %I% %E% SMI" 7 8 /* 9 * Public interface to routines implemented by CPU modules 10 */ 11 12 #include <sys/x86_archext.h> 13 #include <sys/cpu_module_impl.h> 14 #include <sys/fm/util.h> 15 #include <sys/reboot.h> 16 #include <sys/modctl.h> 17 #include <sys/param.h> 18 #include <sys/cmn_err.h> 19 #include <sys/systm.h> 20 #include <sys/types.h> 21 22 #define CPUMOD_SUBDIR "cpu" 23 #define CPUMOD_PREFIX "cpu" 24 25 #define CMI_OPS(cpu) \ 26 (cpu)->cpu_m.mcpu_cmi->cmi_ops 27 #define CMI_DATA(cpu) \ 28 (cpu)->cpu_m.mcpu_cmidata 29 30 /* 31 * If cleared for debugging, we will suppress panicking on fatal hardware 32 * errors. This should *only* be used for debugging; it use can and will 33 * cause data corruption if actual hardware errors are detected by the system. 34 */ 35 int cmi_panic_on_uncorrectable_error = 1; 36 37 static cmi_t *cmi_list; 38 static kmutex_t cmi_load_lock; 39 40 static int 41 cmi_cpu_match(cpu_t *c1, cpu_t *c2) 42 { 43 return (cpuid_getfamily(c1) == cpuid_getfamily(c2) && 44 cpuid_getmodel(c1) == cpuid_getmodel(c2) && 45 cpuid_getstep(c1) == cpuid_getstep(c2) && 46 strcmp(cpuid_getvendorstr(c1), cpuid_getvendorstr(c2)) == 0); 47 } 48 49 static cmi_t * 50 cmi_load_modctl(modctl_t *modp) 51 { 52 uintptr_t ops; 53 cmi_t *cmi; 54 55 ASSERT(MUTEX_HELD(&cmi_load_lock)); 56 57 for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 58 if (cmi->cmi_modp == modp) 59 return (cmi); 60 } 61 62 if ((ops = modlookup_by_modctl(modp, "_cmi_ops")) == NULL) { 63 cmn_err(CE_WARN, "CPU module %s is invalid: no _cmi_ops " 64 "found\n", modp->mod_modname); 65 return (NULL); 66 } 67 68 /* 69 * Hold the module in memory. We call to CPU modules without using the 70 * stubs mechanism, so these modules must be manually held in memory. 71 * The mod_ref acts as if another loaded module has a dependency on us. 72 */ 73 mutex_enter(&mod_lock); 74 modp->mod_ref++; 75 mutex_exit(&mod_lock); 76 77 cmi = kmem_zalloc(sizeof (cmi_t), KM_SLEEP); 78 cmi->cmi_ops = (const cmi_ops_t *)ops; 79 cmi->cmi_modp = modp; 80 81 cmi->cmi_next = cmi_list; 82 cmi_list = cmi; 83 84 return (cmi); 85 } 86 87 static cmi_t * 88 cmi_load_module(cpu_t *cp) 89 { 90 modctl_t *modp; 91 cmi_t *cmi; 92 int i, modid; 93 uint_t s[3]; 94 95 /* 96 * Look to see if we've already got a module loaded for a CPU just 97 * like this one. If we do, then we'll re-use it. 98 */ 99 ASSERT(MUTEX_HELD(&cmi_load_lock)); 100 mutex_enter(&cpu_lock); 101 102 for (i = 0; i < NCPU; i++) { 103 cpu_t *cp2 = cpu[i]; 104 105 if (cp2 != NULL && cp2 != cp && 106 cp2->cpu_m.mcpu_cmi != NULL && cmi_cpu_match(cp, cp2)) { 107 mutex_exit(&cpu_lock); 108 return (cp2->cpu_m.mcpu_cmi); 109 } 110 } 111 112 mutex_exit(&cpu_lock); 113 114 /* 115 * If we can't find a match, attempt to load the appropriate module. 116 * If that also fails, try to load the generic CPU module. 117 */ 118 s[0] = cpuid_getfamily(cp); 119 s[1] = cpuid_getmodel(cp); 120 s[2] = cpuid_getstep(cp); 121 122 modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 123 cpuid_getvendorstr(cp), ".", s, sizeof (s) / sizeof (s[0])); 124 125 if (modid == -1) 126 modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic"); 127 128 if (modid == -1) 129 return (NULL); 130 131 modp = mod_hold_by_id(modid); 132 cmi = cmi_load_modctl(modp); 133 mod_release_mod(modp); 134 135 return (cmi); 136 } 137 138 static cmi_t * 139 cmi_load_generic(void) 140 { 141 modctl_t *modp; 142 cmi_t *cmi; 143 int modid; 144 145 if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 146 return (NULL); 147 148 modp = mod_hold_by_id(modid); 149 cmi = cmi_load_modctl(modp); 150 mod_release_mod(modp); 151 152 return (cmi); 153 } 154 155 /* 156 * Load a CPU module for the specified CPU, and then call its cmi_init routine. 157 * If the module returns ENOTSUP, try using the generic CPU module instead. 158 * If all else fails, we return -1 and the caller will panic or halt. 159 */ 160 int 161 cmi_load(cpu_t *cp) 162 { 163 int err = ENOENT; 164 cmi_t *cmi; 165 void *data; 166 167 mutex_enter(&cmi_load_lock); 168 169 if ((cmi = cmi_load_module(cp)) == NULL || ( 170 (err = cmi->cmi_ops->cmi_init(cp, &data)) != 0 && err != ENOTSUP)) { 171 cmn_err(CE_WARN, "CPU module %s failed to init CPU %d: err=%d", 172 cmi ? cmi->cmi_modp->mod_modname : "<>", cp->cpu_id, err); 173 mutex_exit(&cmi_load_lock); 174 return (-1); 175 } 176 177 if (err != 0 && ((cmi = cmi_load_generic()) == NULL || 178 (err = cmi->cmi_ops->cmi_init(cp, &data)) != 0)) { 179 cmn_err(CE_WARN, "CPU module %s failed to init CPU %d: err=%d", 180 cmi ? cmi->cmi_modp->mod_modname : "<>", cp->cpu_id, err); 181 mutex_exit(&cmi_load_lock); 182 return (-1); 183 } 184 185 ASSERT(cp->cpu_m.mcpu_cmi == NULL); 186 cp->cpu_m.mcpu_cmi = cmi; 187 cp->cpu_m.mcpu_cmidata = data; 188 189 cmi->cmi_refcnt++; 190 mutex_exit(&cmi_load_lock); 191 192 if (boothowto & RB_VERBOSE) { 193 printf("cpuid %d: initialized cpumod: %s\n", 194 cp->cpu_id, cmi->cmi_modp->mod_modname); 195 } 196 197 return (0); 198 } 199 200 void 201 cmi_init(void) 202 { 203 if (cmi_load(CPU) < 0) 204 panic("failed to load module for CPU %u", CPU->cpu_id); 205 } 206 207 void 208 cmi_post_init(void) 209 { 210 CMI_OPS(CPU)->cmi_post_init(CMI_DATA(CPU)); 211 } 212 213 void 214 cmi_faulted_enter(cpu_t *cp) 215 { 216 CMI_OPS(cp)->cmi_faulted_enter(CMI_DATA(cp)); 217 } 218 219 void 220 cmi_faulted_exit(cpu_t *cp) 221 { 222 CMI_OPS(cp)->cmi_faulted_exit(CMI_DATA(cp)); 223 } 224 225 int 226 cmi_scrubber_enable(cpu_t *cp, uint64_t base, uint64_t ilen) 227 { 228 return (CMI_OPS(cp)->cmi_scrubber_enable(CMI_DATA(cp), base, ilen)); 229 } 230 231 void 232 cmi_mca_init(void) 233 { 234 CMI_OPS(CPU)->cmi_mca_init(CMI_DATA(CPU)); 235 } 236 237 void 238 cmi_mca_trap(struct regs *rp) 239 { 240 if (CMI_OPS(CPU)->cmi_mca_trap(CMI_DATA(CPU), rp)) { 241 if (cmi_panic_on_uncorrectable_error) 242 fm_panic("Unrecoverable Machine-Check Exception"); 243 else 244 cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 245 } 246 } 247 248 int 249 cmi_mca_inject(cmi_mca_regs_t *regs, uint_t nregs) 250 { 251 int err; 252 253 kpreempt_disable(); 254 err = CMI_OPS(CPU)->cmi_mca_inject(CMI_DATA(CPU), regs, nregs); 255 kpreempt_enable(); 256 257 return (err); 258 } 259 260 void 261 cmi_mca_poke(void) 262 { 263 CMI_OPS(CPU)->cmi_mca_poke(CMI_DATA(CPU)); 264 } 265 266 void 267 cmi_mc_register(cpu_t *cp, const cmi_mc_ops_t *mcops, void *mcdata) 268 { 269 CMI_OPS(cp)->cmi_mc_register(CMI_DATA(cp), mcops, mcdata); 270 } 271 272 int 273 cmi_mc_patounum(uint64_t pa, uint32_t synd, int syndtype, mc_unum_t *up) 274 { 275 const struct cmi_mc_ops *mcops; 276 cpu_t *cp = CPU; 277 278 if (CMI_OPS(cp) == NULL || 279 (mcops = CMI_OPS(cp)->cmi_mc_getops(CMI_DATA(cp))) == NULL) 280 return (-1); /* not registered yet */ 281 282 return (mcops->cmi_mc_patounum(CMI_DATA(cp), pa, synd, syndtype, up)); 283 } 284 285 int 286 cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 287 { 288 const struct cmi_mc_ops *mcops; 289 cpu_t *cp = CPU; 290 291 if (up != NULL && nvl != NULL) 292 return (-1); /* only convert from one or the other form */ 293 294 if (CMI_OPS(cp) == NULL || 295 (mcops = CMI_OPS(cp)->cmi_mc_getops(CMI_DATA(cp))) == NULL) 296 return (-1); /* not registered yet */ 297 298 return (mcops->cmi_mc_unumtopa(CMI_DATA(cp), up, nvl, pap)); 299 } 300