1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright (c) 2018, Joyent, Inc. 26 */ 27 /* 28 * Copyright (c) 2010, Intel Corporation. 29 * All rights reserved. 30 */ 31 32 /* 33 * Copyright (c) 2018, Joyent, Inc. 34 * Copyright 2020 RackTop Systems, Inc. 35 */ 36 37 /* 38 * Generic x86 CPU Module 39 * 40 * This CPU module is used for generic x86 CPUs when Solaris has no other 41 * CPU-specific support module available. Code in this module should be the 42 * absolute bare-bones support and must be cognizant of both Intel and AMD etc. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/cpu_module_impl.h> 47 #include <sys/cpuvar.h> 48 #include <sys/kmem.h> 49 #include <sys/modctl.h> 50 #include <sys/pghw.h> 51 #include <sys/x86_archext.h> 52 53 #include "gcpu.h" 54 55 /* 56 * Prevent generic cpu support from loading. 57 */ 58 int gcpu_disable = 0; 59 60 #define GCPU_MAX_CHIPID 32 61 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID]; 62 #ifdef DEBUG 63 int gcpu_id_disable = 0; 64 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL }; 65 #endif 66 67 #ifndef __xpv 68 69 /* 70 * The purpose of this is to construct a unique identifier for a given processor 71 * that can be used by things like FMA to determine when a FRU has been 72 * replaced. It is supported on Intel Xeon Platforms since Ivy Bridge and AMD 73 * 17h processors since Rome. See cpuid_pass1_ppin() for how we determine if a 74 * CPU is supported. 75 * 76 * The protected processor inventory number (PPIN) can be used to create a 77 * unique identifier when combined with the processor's cpuid signature. We 78 * create a versioned, synthetic ID using the following scheme for the 79 * identifier: iv0-<vendor>-<signature>-<PPIN>. The iv0 is the illumos version 80 * zero of the ID. If we have a new scheme for a new generation of processors, 81 * then that should rev the version field, otherwise for a given processor, this 82 * synthetic ID should not change. 83 * 84 * We use the string "INTC" for Intel and "AMD" for AMD. None of these or the 85 * formatting of the values can change without changing the version string. 86 */ 87 static char * 88 gcpu_init_ident_ppin(cmi_hdl_t hdl) 89 { 90 uint_t ppin_ctl_msr, ppin_msr; 91 uint64_t value; 92 const char *vendor; 93 94 /* 95 * This list should be extended as new Intel Xeon family processors come 96 * out. 97 */ 98 switch (cmi_hdl_vendor(hdl)) { 99 case X86_VENDOR_Intel: 100 ppin_ctl_msr = MSR_PPIN_CTL_INTC; 101 ppin_msr = MSR_PPIN_INTC; 102 vendor = "INTC"; 103 break; 104 case X86_VENDOR_AMD: 105 ppin_ctl_msr = MSR_PPIN_CTL_AMD; 106 ppin_msr = MSR_PPIN_AMD; 107 vendor = "AMD"; 108 break; 109 default: 110 return (NULL); 111 } 112 113 if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) { 114 return (NULL); 115 } 116 117 /* 118 * If the PPIN is not enabled and not locked, attempt to enable it. 119 * Note: in some environments such as Amazon EC2 the PPIN appears 120 * to be disabled and unlocked but our attempts to enable it don't 121 * stick, and when we attempt to read the PPIN we get an uncaught 122 * #GP. To avoid that happening we read the MSR back and verify it 123 * has taken the new value. 124 */ 125 if ((value & MSR_PPIN_CTL_ENABLED) == 0) { 126 if ((value & MSR_PPIN_CTL_LOCKED) != 0) { 127 return (NULL); 128 } 129 130 if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_ENABLED) != 131 CMI_SUCCESS) { 132 return (NULL); 133 } 134 135 if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) { 136 return (NULL); 137 } 138 139 if ((value & MSR_PPIN_CTL_ENABLED) == 0) { 140 return (NULL); 141 } 142 } 143 144 if (cmi_hdl_rdmsr(hdl, ppin_msr, &value) != CMI_SUCCESS) { 145 return (NULL); 146 } 147 148 /* 149 * Now that we've read data, lock the PPIN. Don't worry about success or 150 * failure of this part, as we will have gotten everything that we need. 151 * It is possible that it locked open, for example. 152 */ 153 (void) cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_LOCKED); 154 155 return (kmem_asprintf("iv0-%s-%x-%llx", vendor, cmi_hdl_chipsig(hdl), 156 value)); 157 } 158 #endif /* __xpv */ 159 160 static void 161 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp) 162 { 163 #ifdef DEBUG 164 uint_t chipid; 165 166 /* 167 * On debug, allow a developer to override the string to more 168 * easily test CPU autoreplace without needing to physically 169 * replace a CPU. 170 */ 171 if (gcpu_id_disable != 0) { 172 return; 173 } 174 175 chipid = cmi_hdl_chipid(hdl); 176 if (gcpu_id_override[chipid] != NULL) { 177 sp->gcpus_ident = strdup(gcpu_id_override[chipid]); 178 return; 179 } 180 #endif 181 182 #ifndef __xpv 183 if (is_x86_feature(x86_featureset, X86FSET_PPIN)) { 184 sp->gcpus_ident = gcpu_init_ident_ppin(hdl); 185 } 186 #endif /* __xpv */ 187 } 188 189 /* 190 * Our cmi_init entry point, called during startup of each cpu instance. 191 */ 192 int 193 gcpu_init(cmi_hdl_t hdl, void **datap) 194 { 195 uint_t chipid = cmi_hdl_chipid(hdl); 196 struct gcpu_chipshared *sp, *osp; 197 gcpu_data_t *gcpu; 198 199 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID) 200 return (ENOTSUP); 201 202 /* 203 * Allocate the state structure for this cpu. We will only 204 * allocate the bank logout areas in gcpu_mca_init once we 205 * know how many banks there are. 206 */ 207 gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP); 208 cmi_hdl_hold(hdl); /* release in gcpu_fini */ 209 gcpu->gcpu_hdl = hdl; 210 211 /* 212 * Allocate a chipshared structure if no sibling cpu has already 213 * allocated it, but allow for the fact that a sibling core may 214 * be starting up in parallel. 215 */ 216 if ((sp = gcpu_shared[chipid]) == NULL) { 217 sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP); 218 mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL); 219 mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL); 220 osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp); 221 if (osp != NULL) { 222 mutex_destroy(&sp->gcpus_cfglock); 223 mutex_destroy(&sp->gcpus_poll_lock); 224 kmem_free(sp, sizeof (struct gcpu_chipshared)); 225 sp = osp; 226 } else { 227 gcpu_init_ident(hdl, sp); 228 } 229 } 230 231 atomic_inc_32(&sp->gcpus_actv_cnt); 232 gcpu->gcpu_shared = sp; 233 234 return (0); 235 } 236 237 /* 238 * deconfigure gcpu_init() 239 */ 240 void 241 gcpu_fini(cmi_hdl_t hdl) 242 { 243 uint_t chipid = cmi_hdl_chipid(hdl); 244 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl); 245 struct gcpu_chipshared *sp; 246 247 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID) 248 return; 249 250 gcpu_mca_fini(hdl); 251 252 /* 253 * Keep shared data in cache for reuse. 254 */ 255 sp = gcpu_shared[chipid]; 256 ASSERT(sp != NULL); 257 atomic_dec_32(&sp->gcpus_actv_cnt); 258 259 if (gcpu != NULL) 260 kmem_free(gcpu, sizeof (gcpu_data_t)); 261 262 /* Release reference count held in gcpu_init(). */ 263 cmi_hdl_rele(hdl); 264 } 265 266 void 267 gcpu_post_startup(cmi_hdl_t hdl) 268 { 269 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl); 270 271 if (gcpu_disable) 272 return; 273 274 if (gcpu != NULL) 275 cms_post_startup(hdl); 276 #ifdef __xpv 277 /* 278 * All cpu handles are initialized so we can begin polling now. 279 * Furthermore, our virq mechanism requires that everything 280 * be run on cpu 0 so we can assure that by starting from here. 281 */ 282 gcpu_mca_poll_start(hdl); 283 #else 284 /* 285 * The boot CPU has a bit of a chicken and egg problem for CMCI. Its MCA 286 * initialization is run before we have initialized the PSM module that 287 * we would use for enabling CMCI. Therefore, we use this as a chance to 288 * enable CMCI for the boot CPU. For all other CPUs, this chicken and 289 * egg problem will have already been solved. 290 */ 291 gcpu_mca_cmci_enable(hdl); 292 #endif 293 } 294 295 void 296 gcpu_post_mpstartup(cmi_hdl_t hdl) 297 { 298 if (gcpu_disable) 299 return; 300 301 cms_post_mpstartup(hdl); 302 303 #ifndef __xpv 304 /* 305 * All cpu handles are initialized only once all cpus are started, so we 306 * can begin polling post mp startup. 307 */ 308 gcpu_mca_poll_start(hdl); 309 #endif 310 } 311 312 const char * 313 gcpu_ident(cmi_hdl_t hdl) 314 { 315 uint_t chipid; 316 struct gcpu_chipshared *sp; 317 318 if (gcpu_disable) 319 return (NULL); 320 321 chipid = cmi_hdl_chipid(hdl); 322 if (chipid >= GCPU_MAX_CHIPID) 323 return (NULL); 324 325 if (cmi_hdl_getcmidata(hdl) == NULL) 326 return (NULL); 327 328 sp = gcpu_shared[cmi_hdl_chipid(hdl)]; 329 return (sp->gcpus_ident); 330 } 331 332 #ifdef __xpv 333 #define GCPU_OP(ntvop, xpvop) xpvop 334 #else 335 #define GCPU_OP(ntvop, xpvop) ntvop 336 #endif 337 338 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3; 339 340 const cmi_ops_t _cmi_ops = { 341 gcpu_init, /* cmi_init */ 342 gcpu_post_startup, /* cmi_post_startup */ 343 gcpu_post_mpstartup, /* cmi_post_mpstartup */ 344 gcpu_faulted_enter, /* cmi_faulted_enter */ 345 gcpu_faulted_exit, /* cmi_faulted_exit */ 346 gcpu_mca_init, /* cmi_mca_init */ 347 GCPU_OP(gcpu_mca_trap, NULL), /* cmi_mca_trap */ 348 GCPU_OP(gcpu_cmci_trap, NULL), /* cmi_cmci_trap */ 349 gcpu_msrinject, /* cmi_msrinject */ 350 GCPU_OP(gcpu_hdl_poke, NULL), /* cmi_hdl_poke */ 351 gcpu_fini, /* cmi_fini */ 352 GCPU_OP(NULL, gcpu_xpv_panic_callback), /* cmi_panic_callback */ 353 gcpu_ident /* cmi_ident */ 354 }; 355 356 static struct modlcpu modlcpu = { 357 &mod_cpuops, 358 "Generic x86 CPU Module" 359 }; 360 361 static struct modlinkage modlinkage = { 362 MODREV_1, 363 (void *)&modlcpu, 364 NULL 365 }; 366 367 int 368 _init(void) 369 { 370 return (mod_install(&modlinkage)); 371 } 372 373 int 374 _info(struct modinfo *modinfop) 375 { 376 return (mod_info(&modlinkage, modinfop)); 377 } 378 379 int 380 _fini(void) 381 { 382 return (mod_remove(&modlinkage)); 383 } 384