xref: /illumos-gate/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c (revision 668deb93650906efec36a69b7d09c98435d9cf24)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright (c) 2018, Joyent, Inc.
26  */
27 /*
28  * Copyright (c) 2010, Intel Corporation.
29  * All rights reserved.
30  */
31 
32 /*
33  * Copyright (c) 2018, Joyent, Inc.
34  */
35 
36 /*
37  * Generic x86 CPU Module
38  *
39  * This CPU module is used for generic x86 CPUs when Solaris has no other
40  * CPU-specific support module available.  Code in this module should be the
41  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
42  */
43 
44 #include <sys/types.h>
45 #include <sys/cpu_module_impl.h>
46 #include <sys/cpuvar.h>
47 #include <sys/kmem.h>
48 #include <sys/modctl.h>
49 #include <sys/pghw.h>
50 #include <sys/x86_archext.h>
51 
52 #include "gcpu.h"
53 
54 /*
55  * Prevent generic cpu support from loading.
56  */
57 int gcpu_disable = 0;
58 
59 #define	GCPU_MAX_CHIPID		32
60 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
61 #ifdef	DEBUG
62 int gcpu_id_disable = 0;
63 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
64 #endif
65 
66 #ifndef	__xpv
67 
68 /*
69  * The purpose of this is to construct a unique identifier for a given processor
70  * that can be used by things like FMA to determine when a FRU has been
71  * replaced. It is supported on Intel Xeon Platforms since Ivy Bridge and AMD
72  * 17h processors since Rome. See cpuid_pass1_ppin() for how we determine if a
73  * CPU is supported.
74  *
75  * The protected processor inventory number (PPIN) can be used to create a
76  * unique identifier when combined with the processor's cpuid signature. We
77  * create a versioned, synthetic ID using the following scheme for the
78  * identifier: iv0-<vendor>-<signature>-<PPIN>. The iv0 is the illumos version
79  * zero of the ID. If we have a new scheme for a new generation of processors,
80  * then that should rev the version field, otherwise for a given processor, this
81  * synthetic ID should not change.
82  *
83  * We use the string "INTC" for Intel and "AMD" for AMD. None of these or the
84  * formatting of the values can change without changing the version string.
85  */
86 static char *
87 gcpu_init_ident_ppin(cmi_hdl_t hdl)
88 {
89 	uint_t ppin_ctl_msr, ppin_msr;
90 	uint64_t value;
91 	const char *vendor;
92 
93 	/*
94 	 * This list should be extended as new Intel Xeon family processors come
95 	 * out.
96 	 */
97 	switch (cmi_hdl_vendor(hdl)) {
98 	case X86_VENDOR_Intel:
99 		ppin_ctl_msr = MSR_PPIN_CTL_INTC;
100 		ppin_msr = MSR_PPIN_INTC;
101 		vendor = "INTC";
102 		break;
103 	case X86_VENDOR_AMD:
104 		ppin_ctl_msr = MSR_PPIN_CTL_AMD;
105 		ppin_msr = MSR_PPIN_AMD;
106 		vendor = "AMD";
107 		break;
108 	default:
109 		return (NULL);
110 	}
111 
112 	if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) {
113 		return (NULL);
114 	}
115 
116 	if ((value & MSR_PPIN_CTL_ENABLED) == 0) {
117 		if ((value & MSR_PPIN_CTL_LOCKED) != 0) {
118 			return (NULL);
119 		}
120 
121 		if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_ENABLED) !=
122 		    CMI_SUCCESS) {
123 			return (NULL);
124 		}
125 	}
126 
127 	if (cmi_hdl_rdmsr(hdl, ppin_msr, &value) != CMI_SUCCESS) {
128 		return (NULL);
129 	}
130 
131 	/*
132 	 * Now that we've read data, lock the PPIN. Don't worry about success or
133 	 * failure of this part, as we will have gotten everything that we need.
134 	 * It is possible that it locked open, for example.
135 	 */
136 	(void) cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_LOCKED);
137 
138 	return (kmem_asprintf("iv0-%s-%x-%llx", vendor, cmi_hdl_chipsig(hdl),
139 	    value));
140 }
141 #endif	/* __xpv */
142 
143 static void
144 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
145 {
146 #ifdef	DEBUG
147 	uint_t chipid;
148 
149 	/*
150 	 * On debug, allow a developer to override the string to more
151 	 * easily test CPU autoreplace without needing to physically
152 	 * replace a CPU.
153 	 */
154 	if (gcpu_id_disable != 0) {
155 		return;
156 	}
157 
158 	chipid = cmi_hdl_chipid(hdl);
159 	if (gcpu_id_override[chipid] != NULL) {
160 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
161 		return;
162 	}
163 #endif
164 
165 #ifndef __xpv
166 	if (is_x86_feature(x86_featureset, X86FSET_PPIN)) {
167 		sp->gcpus_ident = gcpu_init_ident_ppin(hdl);
168 	}
169 #endif	/* __xpv */
170 }
171 
172 /*
173  * Our cmi_init entry point, called during startup of each cpu instance.
174  */
175 int
176 gcpu_init(cmi_hdl_t hdl, void **datap)
177 {
178 	uint_t chipid = cmi_hdl_chipid(hdl);
179 	struct gcpu_chipshared *sp, *osp;
180 	gcpu_data_t *gcpu;
181 
182 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
183 		return (ENOTSUP);
184 
185 	/*
186 	 * Allocate the state structure for this cpu.  We will only
187 	 * allocate the bank logout areas in gcpu_mca_init once we
188 	 * know how many banks there are.
189 	 */
190 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
191 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
192 	gcpu->gcpu_hdl = hdl;
193 
194 	/*
195 	 * Allocate a chipshared structure if no sibling cpu has already
196 	 * allocated it, but allow for the fact that a sibling core may
197 	 * be starting up in parallel.
198 	 */
199 	if ((sp = gcpu_shared[chipid]) == NULL) {
200 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
201 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
202 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
203 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
204 		if (osp != NULL) {
205 			mutex_destroy(&sp->gcpus_cfglock);
206 			mutex_destroy(&sp->gcpus_poll_lock);
207 			kmem_free(sp, sizeof (struct gcpu_chipshared));
208 			sp = osp;
209 		} else {
210 			gcpu_init_ident(hdl, sp);
211 		}
212 	}
213 
214 	atomic_inc_32(&sp->gcpus_actv_cnt);
215 	gcpu->gcpu_shared = sp;
216 
217 	return (0);
218 }
219 
220 /*
221  * deconfigure gcpu_init()
222  */
223 void
224 gcpu_fini(cmi_hdl_t hdl)
225 {
226 	uint_t chipid = cmi_hdl_chipid(hdl);
227 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
228 	struct gcpu_chipshared *sp;
229 
230 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
231 		return;
232 
233 	gcpu_mca_fini(hdl);
234 
235 	/*
236 	 * Keep shared data in cache for reuse.
237 	 */
238 	sp = gcpu_shared[chipid];
239 	ASSERT(sp != NULL);
240 	atomic_dec_32(&sp->gcpus_actv_cnt);
241 
242 	if (gcpu != NULL)
243 		kmem_free(gcpu, sizeof (gcpu_data_t));
244 
245 	/* Release reference count held in gcpu_init(). */
246 	cmi_hdl_rele(hdl);
247 }
248 
249 void
250 gcpu_post_startup(cmi_hdl_t hdl)
251 {
252 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
253 
254 	if (gcpu_disable)
255 		return;
256 
257 	if (gcpu != NULL)
258 		cms_post_startup(hdl);
259 #ifdef __xpv
260 	/*
261 	 * All cpu handles are initialized so we can begin polling now.
262 	 * Furthermore, our virq mechanism requires that everything
263 	 * be run on cpu 0 so we can assure that by starting from here.
264 	 */
265 	gcpu_mca_poll_start(hdl);
266 #else
267 	/*
268 	 * The boot CPU has a bit of a chicken and egg problem for CMCI. Its MCA
269 	 * initialization is run before we have initialized the PSM module that
270 	 * we would use for enabling CMCI. Therefore, we use this as a chance to
271 	 * enable CMCI for the boot CPU. For all other CPUs, this chicken and
272 	 * egg problem will have already been solved.
273 	 */
274 	gcpu_mca_cmci_enable(hdl);
275 #endif
276 }
277 
278 void
279 gcpu_post_mpstartup(cmi_hdl_t hdl)
280 {
281 	if (gcpu_disable)
282 		return;
283 
284 	cms_post_mpstartup(hdl);
285 
286 #ifndef __xpv
287 	/*
288 	 * All cpu handles are initialized only once all cpus are started, so we
289 	 * can begin polling post mp startup.
290 	 */
291 	gcpu_mca_poll_start(hdl);
292 #endif
293 }
294 
295 const char *
296 gcpu_ident(cmi_hdl_t hdl)
297 {
298 	uint_t chipid;
299 	struct gcpu_chipshared *sp;
300 
301 	if (gcpu_disable)
302 		return (NULL);
303 
304 	chipid = cmi_hdl_chipid(hdl);
305 	if (chipid >= GCPU_MAX_CHIPID)
306 		return (NULL);
307 
308 	if (cmi_hdl_getcmidata(hdl) == NULL)
309 		return (NULL);
310 
311 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
312 	return (sp->gcpus_ident);
313 }
314 
315 #ifdef __xpv
316 #define	GCPU_OP(ntvop, xpvop)	xpvop
317 #else
318 #define	GCPU_OP(ntvop, xpvop)	ntvop
319 #endif
320 
321 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
322 
323 const cmi_ops_t _cmi_ops = {
324 	gcpu_init,				/* cmi_init */
325 	gcpu_post_startup,			/* cmi_post_startup */
326 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
327 	gcpu_faulted_enter,			/* cmi_faulted_enter */
328 	gcpu_faulted_exit,			/* cmi_faulted_exit */
329 	gcpu_mca_init,				/* cmi_mca_init */
330 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
331 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
332 	gcpu_msrinject,				/* cmi_msrinject */
333 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
334 	gcpu_fini,				/* cmi_fini */
335 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
336 	gcpu_ident				/* cmi_ident */
337 };
338 
339 static struct modlcpu modlcpu = {
340 	&mod_cpuops,
341 	"Generic x86 CPU Module"
342 };
343 
344 static struct modlinkage modlinkage = {
345 	MODREV_1,
346 	(void *)&modlcpu,
347 	NULL
348 };
349 
350 int
351 _init(void)
352 {
353 	return (mod_install(&modlinkage));
354 }
355 
356 int
357 _info(struct modinfo *modinfop)
358 {
359 	return (mod_info(&modlinkage, modinfop));
360 }
361 
362 int
363 _fini(void)
364 {
365 	return (mod_remove(&modlinkage));
366 }
367