1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Public interface to routines implemented by CPU modules 31 */ 32 33 #include <sys/x86_archext.h> 34 #include <sys/cpu_module_impl.h> 35 #include <sys/fm/util.h> 36 #include <sys/reboot.h> 37 #include <sys/modctl.h> 38 #include <sys/param.h> 39 #include <sys/cmn_err.h> 40 #include <sys/systm.h> 41 #include <sys/types.h> 42 43 #define CPUMOD_SUBDIR "cpu" 44 #define CPUMOD_PREFIX "cpu" 45 46 #define CMI_OPS(cpu) \ 47 (cpu)->cpu_m.mcpu_cmi->cmi_ops 48 #define CMI_DATA(cpu) \ 49 (cpu)->cpu_m.mcpu_cmidata 50 51 /* 52 * If cleared for debugging we will not attempt to load a model-specific 53 * cpu module but will load the generic cpu module instead. 54 */ 55 int cmi_force_generic = 0; 56 57 /* 58 * If cleared for debugging, we will suppress panicking on fatal hardware 59 * errors. This should *only* be used for debugging; it use can and will 60 * cause data corruption if actual hardware errors are detected by the system. 61 */ 62 int cmi_panic_on_uncorrectable_error = 1; 63 64 static cmi_t *cmi_list; 65 static kmutex_t cmi_load_lock; 66 67 static int 68 cmi_cpu_match(cpu_t *c1, cpu_t *c2) 69 { 70 return (cpuid_getfamily(c1) == cpuid_getfamily(c2) && 71 cpuid_getmodel(c1) == cpuid_getmodel(c2) && 72 cpuid_getstep(c1) == cpuid_getstep(c2) && 73 strcmp(cpuid_getvendorstr(c1), cpuid_getvendorstr(c2)) == 0); 74 } 75 76 static cmi_t * 77 cmi_load_modctl(modctl_t *modp) 78 { 79 uintptr_t ops; 80 cmi_t *cmi; 81 82 ASSERT(MUTEX_HELD(&cmi_load_lock)); 83 84 for (cmi = cmi_list; cmi != NULL; cmi = cmi->cmi_next) { 85 if (cmi->cmi_modp == modp) 86 return (cmi); 87 } 88 89 if ((ops = modlookup_by_modctl(modp, "_cmi_ops")) == NULL) { 90 cmn_err(CE_WARN, "CPU module %s is invalid: no _cmi_ops " 91 "found\n", modp->mod_modname); 92 return (NULL); 93 } 94 95 /* 96 * Hold the module in memory. We call to CPU modules without using the 97 * stubs mechanism, so these modules must be manually held in memory. 98 * The mod_ref acts as if another loaded module has a dependency on us. 99 */ 100 mutex_enter(&mod_lock); 101 modp->mod_ref++; 102 mutex_exit(&mod_lock); 103 104 cmi = kmem_zalloc(sizeof (cmi_t), KM_SLEEP); 105 cmi->cmi_ops = (const cmi_ops_t *)ops; 106 cmi->cmi_modp = modp; 107 108 cmi->cmi_next = cmi_list; 109 cmi_list = cmi; 110 111 return (cmi); 112 } 113 114 static cmi_t * 115 cmi_load_module(cpu_t *cp) 116 { 117 modctl_t *modp; 118 cmi_t *cmi; 119 int i, modid; 120 uint_t s[3]; 121 122 /* 123 * Look to see if we've already got a module loaded for a CPU just 124 * like this one. If we do, then we'll re-use it. 125 */ 126 ASSERT(MUTEX_HELD(&cmi_load_lock)); 127 mutex_enter(&cpu_lock); 128 129 for (i = 0; i < NCPU; i++) { 130 cpu_t *cp2 = cpu[i]; 131 132 if (cp2 != NULL && cp2 != cp && 133 cp2->cpu_m.mcpu_cmi != NULL && cmi_cpu_match(cp, cp2)) { 134 mutex_exit(&cpu_lock); 135 return (cp2->cpu_m.mcpu_cmi); 136 } 137 } 138 139 mutex_exit(&cpu_lock); 140 141 /* 142 * If we can't find a match, attempt to load the appropriate module. 143 * If that also fails, try to load the generic CPU module. 144 */ 145 s[0] = cpuid_getfamily(cp); 146 s[1] = cpuid_getmodel(cp); 147 s[2] = cpuid_getstep(cp); 148 149 modid = modload_qualified(CPUMOD_SUBDIR, CPUMOD_PREFIX, 150 cpuid_getvendorstr(cp), ".", s, sizeof (s) / sizeof (s[0])); 151 152 if (modid == -1) 153 modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic"); 154 155 if (modid == -1) 156 return (NULL); 157 158 modp = mod_hold_by_id(modid); 159 cmi = cmi_load_modctl(modp); 160 mod_release_mod(modp); 161 162 return (cmi); 163 } 164 165 static cmi_t * 166 cmi_load_generic(void) 167 { 168 modctl_t *modp; 169 cmi_t *cmi; 170 int modid; 171 172 if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) 173 return (NULL); 174 175 modp = mod_hold_by_id(modid); 176 cmi = cmi_load_modctl(modp); 177 mod_release_mod(modp); 178 179 return (cmi); 180 } 181 182 /* 183 * Load a CPU module for the specified CPU, and then call its cmi_init routine. 184 * If the module returns ENOTSUP, try using the generic CPU module instead. 185 * If all else fails, we return -1 and the caller will panic or halt. 186 */ 187 int 188 cmi_load(cpu_t *cp) 189 { 190 int err = ENOENT; 191 cmi_t *cmi; 192 void *data; 193 194 mutex_enter(&cmi_load_lock); 195 196 if (!cmi_force_generic && ( 197 ((cmi = cmi_load_module(cp)) == NULL) || 198 ((err = cmi->cmi_ops->cmi_init(cp, &data)) != 0 && 199 err != ENOTSUP))) { 200 cmn_err(CE_WARN, "CPU module %s failed to init CPU %d: err=%d", 201 cmi ? cmi->cmi_modp->mod_modname : "<>", cp->cpu_id, err); 202 mutex_exit(&cmi_load_lock); 203 return (-1); 204 } 205 206 if ((cmi_force_generic || err != 0) && 207 ((cmi = cmi_load_generic()) == NULL || 208 (err = cmi->cmi_ops->cmi_init(cp, &data)) != 0)) { 209 cmn_err(CE_WARN, "CPU module %s failed to init CPU %d: err=%d", 210 cmi ? cmi->cmi_modp->mod_modname : "<>", cp->cpu_id, err); 211 mutex_exit(&cmi_load_lock); 212 return (-1); 213 } 214 215 ASSERT(cp->cpu_m.mcpu_cmi == NULL); 216 cp->cpu_m.mcpu_cmi = cmi; 217 cp->cpu_m.mcpu_cmidata = data; 218 219 cmi->cmi_refcnt++; 220 mutex_exit(&cmi_load_lock); 221 222 if (boothowto & RB_VERBOSE) { 223 printf("cpuid %d: initialized cpumod: %s\n", 224 cp->cpu_id, cmi->cmi_modp->mod_modname); 225 } 226 227 return (0); 228 } 229 230 void 231 cmi_init(void) 232 { 233 if (cmi_load(CPU) < 0) 234 panic("failed to load module for CPU %u", CPU->cpu_id); 235 } 236 237 void 238 cmi_post_init(void) 239 { 240 CMI_OPS(CPU)->cmi_post_init(CMI_DATA(CPU)); 241 } 242 243 /* 244 * Called just once from start_other_cpus when all processors are started. 245 * This will not be called for each cpu, so the registered op must not 246 * assume it is called as such. 247 */ 248 void 249 cmi_post_mpstartup(void) 250 { 251 CMI_OPS(CPU)->cmi_post_mpstartup(CMI_DATA(CPU)); 252 } 253 254 void 255 cmi_faulted_enter(cpu_t *cp) 256 { 257 CMI_OPS(cp)->cmi_faulted_enter(CMI_DATA(cp)); 258 } 259 260 void 261 cmi_faulted_exit(cpu_t *cp) 262 { 263 CMI_OPS(cp)->cmi_faulted_exit(CMI_DATA(cp)); 264 } 265 266 int 267 cmi_scrubber_enable(cpu_t *cp, uint64_t base, uint64_t ilen, int cscontig) 268 { 269 return (CMI_OPS(cp)->cmi_scrubber_enable(CMI_DATA(cp), base, ilen, 270 cscontig)); 271 } 272 273 void 274 cmi_mca_init(void) 275 { 276 CMI_OPS(CPU)->cmi_mca_init(CMI_DATA(CPU)); 277 } 278 279 void 280 cmi_mca_trap(struct regs *rp) 281 { 282 if (CMI_OPS(CPU)->cmi_mca_trap(CMI_DATA(CPU), rp)) { 283 if (cmi_panic_on_uncorrectable_error) 284 fm_panic("Unrecoverable Machine-Check Exception"); 285 else 286 cmn_err(CE_WARN, "suppressing panic from fatal #mc"); 287 } 288 } 289 290 int 291 cmi_mca_inject(cmi_mca_regs_t *regs, uint_t nregs) 292 { 293 int err; 294 295 kpreempt_disable(); 296 err = CMI_OPS(CPU)->cmi_mca_inject(CMI_DATA(CPU), regs, nregs); 297 kpreempt_enable(); 298 299 return (err); 300 } 301 302 void 303 cmi_mca_poke(void) 304 { 305 CMI_OPS(CPU)->cmi_mca_poke(CMI_DATA(CPU)); 306 } 307 308 void 309 cmi_mc_register(cpu_t *cp, const cmi_mc_ops_t *mcops, void *mcdata) 310 { 311 CMI_OPS(cp)->cmi_mc_register(CMI_DATA(cp), mcops, mcdata); 312 } 313 314 int 315 cmi_mc_patounum(uint64_t pa, uint32_t synd, int syndtype, mc_unum_t *up) 316 { 317 const struct cmi_mc_ops *mcops; 318 cpu_t *cp = CPU; 319 320 if (CMI_OPS(cp) == NULL || 321 (mcops = CMI_OPS(cp)->cmi_mc_getops(CMI_DATA(cp))) == NULL) 322 return (-1); /* not registered yet */ 323 324 return (mcops->cmi_mc_patounum(CMI_DATA(cp), pa, synd, syndtype, up)); 325 } 326 327 int 328 cmi_mc_unumtopa(mc_unum_t *up, nvlist_t *nvl, uint64_t *pap) 329 { 330 const struct cmi_mc_ops *mcops; 331 cpu_t *cp = CPU; 332 333 if (up != NULL && nvl != NULL) 334 return (-1); /* only convert from one or the other form */ 335 336 if (CMI_OPS(cp) == NULL || 337 (mcops = CMI_OPS(cp)->cmi_mc_getops(CMI_DATA(cp))) == NULL) 338 return (-1); /* not registered yet */ 339 340 return (mcops->cmi_mc_unumtopa(CMI_DATA(cp), up, nvl, pap)); 341 } 342