xref: /illumos-gate/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c (revision abe1d126b4a1229ee2861d65508e7a52bd8c2721)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright (c) 2018, Joyent, Inc.
26  */
27 /*
28  * Copyright (c) 2010, Intel Corporation.
29  * All rights reserved.
30  */
31 
32 /*
33  * Copyright (c) 2018, Joyent, Inc.
34  * Copyright 2020 RackTop Systems, Inc.
35  */
36 
37 /*
38  * Generic x86 CPU Module
39  *
40  * This CPU module is used for generic x86 CPUs when Solaris has no other
41  * CPU-specific support module available.  Code in this module should be the
42  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
43  */
44 
45 #include <sys/types.h>
46 #include <sys/cpu_module_impl.h>
47 #include <sys/cpuvar.h>
48 #include <sys/kmem.h>
49 #include <sys/modctl.h>
50 #include <sys/pghw.h>
51 #include <sys/x86_archext.h>
52 
53 #include "gcpu.h"
54 
55 /*
56  * Prevent generic cpu support from loading.
57  */
58 int gcpu_disable = 0;
59 
60 #define	GCPU_MAX_CHIPID		32
61 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
62 #ifdef	DEBUG
63 int gcpu_id_disable = 0;
64 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
65 #endif
66 
67 #ifndef	__xpv
68 
69 /*
70  * The purpose of this is to construct a unique identifier for a given processor
71  * that can be used by things like FMA to determine when a FRU has been
72  * replaced. It is supported on Intel Xeon Platforms since Ivy Bridge and AMD
73  * 17h processors since Rome. See cpuid_pass1_ppin() for how we determine if a
74  * CPU is supported.
75  *
76  * The protected processor inventory number (PPIN) can be used to create a
77  * unique identifier when combined with the processor's cpuid signature. We
78  * create a versioned, synthetic ID using the following scheme for the
79  * identifier: iv0-<vendor>-<signature>-<PPIN>. The iv0 is the illumos version
80  * zero of the ID. If we have a new scheme for a new generation of processors,
81  * then that should rev the version field, otherwise for a given processor, this
82  * synthetic ID should not change.
83  *
84  * We use the string "INTC" for Intel and "AMD" for AMD. None of these or the
85  * formatting of the values can change without changing the version string.
86  */
87 static char *
gcpu_init_ident_ppin(cmi_hdl_t hdl)88 gcpu_init_ident_ppin(cmi_hdl_t hdl)
89 {
90 	uint_t ppin_ctl_msr, ppin_msr;
91 	uint64_t value;
92 	const char *vendor;
93 
94 	/*
95 	 * This list should be extended as new Intel Xeon family processors come
96 	 * out.
97 	 */
98 	switch (cmi_hdl_vendor(hdl)) {
99 	case X86_VENDOR_Intel:
100 		ppin_ctl_msr = MSR_PPIN_CTL_INTC;
101 		ppin_msr = MSR_PPIN_INTC;
102 		vendor = "INTC";
103 		break;
104 	case X86_VENDOR_AMD:
105 		ppin_ctl_msr = MSR_PPIN_CTL_AMD;
106 		ppin_msr = MSR_PPIN_AMD;
107 		vendor = "AMD";
108 		break;
109 	default:
110 		return (NULL);
111 	}
112 
113 	if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) {
114 		return (NULL);
115 	}
116 
117 	/*
118 	 * If the PPIN is not enabled and not locked, attempt to enable it.
119 	 * Note: in some environments such as Amazon EC2 the PPIN appears
120 	 * to be disabled and unlocked but our attempts to enable it don't
121 	 * stick, and when we attempt to read the PPIN we get an uncaught
122 	 * #GP. To avoid that happening we read the MSR back and verify it
123 	 * has taken the new value.
124 	 */
125 	if ((value & MSR_PPIN_CTL_ENABLED) == 0) {
126 		if ((value & MSR_PPIN_CTL_LOCKED) != 0) {
127 			return (NULL);
128 		}
129 
130 		if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_ENABLED) !=
131 		    CMI_SUCCESS) {
132 			return (NULL);
133 		}
134 
135 		if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) {
136 			return (NULL);
137 		}
138 
139 		if ((value & MSR_PPIN_CTL_ENABLED) == 0) {
140 			return (NULL);
141 		}
142 	}
143 
144 	if (cmi_hdl_rdmsr(hdl, ppin_msr, &value) != CMI_SUCCESS) {
145 		return (NULL);
146 	}
147 
148 	/*
149 	 * Now that we've read data, lock the PPIN. Don't worry about success or
150 	 * failure of this part, as we will have gotten everything that we need.
151 	 * It is possible that it locked open, for example.
152 	 */
153 	if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_DISABLED) ==
154 	    CMI_SUCCESS) {
155 		(void) cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_LOCKED);
156 	}
157 
158 	return (kmem_asprintf("iv0-%s-%x-%llx", vendor, cmi_hdl_chipsig(hdl),
159 	    value));
160 }
161 #endif	/* __xpv */
162 
163 static void
gcpu_init_ident(cmi_hdl_t hdl,struct gcpu_chipshared * sp)164 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
165 {
166 #ifdef	DEBUG
167 	uint_t chipid;
168 
169 	/*
170 	 * On debug, allow a developer to override the string to more
171 	 * easily test CPU autoreplace without needing to physically
172 	 * replace a CPU.
173 	 */
174 	if (gcpu_id_disable != 0) {
175 		return;
176 	}
177 
178 	chipid = cmi_hdl_chipid(hdl);
179 	if (gcpu_id_override[chipid] != NULL) {
180 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
181 		return;
182 	}
183 #endif
184 
185 #ifndef __xpv
186 	if (is_x86_feature(x86_featureset, X86FSET_PPIN)) {
187 		sp->gcpus_ident = gcpu_init_ident_ppin(hdl);
188 	}
189 #endif	/* __xpv */
190 }
191 
192 /*
193  * Our cmi_init entry point, called during startup of each cpu instance.
194  */
195 int
gcpu_init(cmi_hdl_t hdl,void ** datap)196 gcpu_init(cmi_hdl_t hdl, void **datap)
197 {
198 	uint_t chipid = cmi_hdl_chipid(hdl);
199 	struct gcpu_chipshared *sp, *osp;
200 	gcpu_data_t *gcpu;
201 
202 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
203 		return (ENOTSUP);
204 
205 	/*
206 	 * Allocate the state structure for this cpu.  We will only
207 	 * allocate the bank logout areas in gcpu_mca_init once we
208 	 * know how many banks there are.
209 	 */
210 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
211 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
212 	gcpu->gcpu_hdl = hdl;
213 
214 	/*
215 	 * Allocate a chipshared structure if no sibling cpu has already
216 	 * allocated it, but allow for the fact that a sibling core may
217 	 * be starting up in parallel.
218 	 */
219 	if ((sp = gcpu_shared[chipid]) == NULL) {
220 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
221 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
222 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
223 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
224 		if (osp != NULL) {
225 			mutex_destroy(&sp->gcpus_cfglock);
226 			mutex_destroy(&sp->gcpus_poll_lock);
227 			kmem_free(sp, sizeof (struct gcpu_chipshared));
228 			sp = osp;
229 		} else {
230 			gcpu_init_ident(hdl, sp);
231 		}
232 	}
233 
234 	atomic_inc_32(&sp->gcpus_actv_cnt);
235 	gcpu->gcpu_shared = sp;
236 
237 	return (0);
238 }
239 
240 /*
241  * deconfigure gcpu_init()
242  */
243 void
gcpu_fini(cmi_hdl_t hdl)244 gcpu_fini(cmi_hdl_t hdl)
245 {
246 	uint_t chipid = cmi_hdl_chipid(hdl);
247 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
248 	struct gcpu_chipshared *sp;
249 
250 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
251 		return;
252 
253 	gcpu_mca_fini(hdl);
254 
255 	/*
256 	 * Keep shared data in cache for reuse.
257 	 */
258 	sp = gcpu_shared[chipid];
259 	ASSERT(sp != NULL);
260 	atomic_dec_32(&sp->gcpus_actv_cnt);
261 
262 	if (gcpu != NULL)
263 		kmem_free(gcpu, sizeof (gcpu_data_t));
264 
265 	/* Release reference count held in gcpu_init(). */
266 	cmi_hdl_rele(hdl);
267 }
268 
269 void
gcpu_post_startup(cmi_hdl_t hdl)270 gcpu_post_startup(cmi_hdl_t hdl)
271 {
272 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
273 
274 	if (gcpu_disable)
275 		return;
276 
277 	if (gcpu != NULL)
278 		cms_post_startup(hdl);
279 #ifdef __xpv
280 	/*
281 	 * All cpu handles are initialized so we can begin polling now.
282 	 * Furthermore, our virq mechanism requires that everything
283 	 * be run on cpu 0 so we can assure that by starting from here.
284 	 */
285 	gcpu_mca_poll_start(hdl);
286 #else
287 	/*
288 	 * The boot CPU has a bit of a chicken and egg problem for CMCI. Its MCA
289 	 * initialization is run before we have initialized the PSM module that
290 	 * we would use for enabling CMCI. Therefore, we use this as a chance to
291 	 * enable CMCI for the boot CPU. For all other CPUs, this chicken and
292 	 * egg problem will have already been solved.
293 	 */
294 	gcpu_mca_cmci_enable(hdl);
295 #endif
296 }
297 
298 void
gcpu_post_mpstartup(cmi_hdl_t hdl)299 gcpu_post_mpstartup(cmi_hdl_t hdl)
300 {
301 	if (gcpu_disable)
302 		return;
303 
304 	cms_post_mpstartup(hdl);
305 
306 #ifndef __xpv
307 	/*
308 	 * All cpu handles are initialized only once all cpus are started, so we
309 	 * can begin polling post mp startup.
310 	 */
311 	gcpu_mca_poll_start(hdl);
312 #endif
313 }
314 
315 const char *
gcpu_ident(cmi_hdl_t hdl)316 gcpu_ident(cmi_hdl_t hdl)
317 {
318 	uint_t chipid;
319 	struct gcpu_chipshared *sp;
320 
321 	if (gcpu_disable)
322 		return (NULL);
323 
324 	chipid = cmi_hdl_chipid(hdl);
325 	if (chipid >= GCPU_MAX_CHIPID)
326 		return (NULL);
327 
328 	if (cmi_hdl_getcmidata(hdl) == NULL)
329 		return (NULL);
330 
331 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
332 	return (sp->gcpus_ident);
333 }
334 
335 #ifdef __xpv
336 #define	GCPU_OP(ntvop, xpvop)	xpvop
337 #else
338 #define	GCPU_OP(ntvop, xpvop)	ntvop
339 #endif
340 
341 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
342 
343 const cmi_ops_t _cmi_ops = {
344 	gcpu_init,				/* cmi_init */
345 	gcpu_post_startup,			/* cmi_post_startup */
346 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
347 	gcpu_faulted_enter,			/* cmi_faulted_enter */
348 	gcpu_faulted_exit,			/* cmi_faulted_exit */
349 	gcpu_mca_init,				/* cmi_mca_init */
350 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
351 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
352 	gcpu_msrinject,				/* cmi_msrinject */
353 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
354 	gcpu_fini,				/* cmi_fini */
355 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
356 	gcpu_ident				/* cmi_ident */
357 };
358 
359 static struct modlcpu modlcpu = {
360 	&mod_cpuops,
361 	"Generic x86 CPU Module"
362 };
363 
364 static struct modlinkage modlinkage = {
365 	MODREV_1,
366 	(void *)&modlcpu,
367 	NULL
368 };
369 
370 int
_init(void)371 _init(void)
372 {
373 	return (mod_install(&modlinkage));
374 }
375 
376 int
_info(struct modinfo * modinfop)377 _info(struct modinfo *modinfop)
378 {
379 	return (mod_info(&modlinkage, modinfop));
380 }
381 
382 int
_fini(void)383 _fini(void)
384 {
385 	return (mod_remove(&modlinkage));
386 }
387