xref: /illumos-gate/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright (c) 2018, Joyent, Inc.
26  */
27 /*
28  * Copyright (c) 2010, Intel Corporation.
29  * All rights reserved.
30  */
31 
32 /*
33  * Copyright (c) 2018, Joyent, Inc.
34  */
35 
36 /*
37  * Generic x86 CPU Module
38  *
39  * This CPU module is used for generic x86 CPUs when Solaris has no other
40  * CPU-specific support module available.  Code in this module should be the
41  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
42  */
43 
44 #include <sys/types.h>
45 #include <sys/cpu_module_impl.h>
46 #include <sys/cpuvar.h>
47 #include <sys/kmem.h>
48 #include <sys/modctl.h>
49 #include <sys/pghw.h>
50 #include <sys/x86_archext.h>
51 
52 #include "gcpu.h"
53 
54 /*
55  * Prevent generic cpu support from loading.
56  */
57 int gcpu_disable = 0;
58 
59 #define	GCPU_MAX_CHIPID		32
60 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
61 #ifdef	DEBUG
62 int gcpu_id_disable = 0;
63 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
64 #endif
65 
66 #ifndef	__xpv
67 /*
68  * This should probably be delegated to a CPU specific module. However, as those
69  * haven't been developed as actively for recent CPUs, we should revisit this
70  * when we do have it and move this out of gcpu.
71  *
72  * This method is only supported on Intel Xeon platforms. It relies on a
73  * combination of the PPIN and the cpuid signature. Both are required to form
74  * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
75  * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
76  * Intel. If we have a new scheme for a new generation of processors, then that
77  * should rev the version field, otherwise for a given processor, this synthetic
78  * ID should not change. For more information on PPIN and these MSRS, see the
79  * relevant processor external design specification.
80  */
81 static char *
82 gcpu_init_ident_intc(cmi_hdl_t hdl)
83 {
84 	uint64_t msr;
85 
86 	/*
87 	 * This list should be extended as new Intel Xeon family processors come
88 	 * out.
89 	 */
90 	switch (cmi_hdl_model(hdl)) {
91 	case INTC_MODEL_IVYBRIDGE_XEON:
92 	case INTC_MODEL_HASWELL_XEON:
93 	case INTC_MODEL_BROADWELL_XEON:
94 	case INTC_MODEL_BROADWELL_XEON_D:
95 	case INTC_MODEL_SKYLAKE_XEON:
96 		break;
97 	default:
98 		return (NULL);
99 	}
100 
101 	if (cmi_hdl_rdmsr(hdl, MSR_PLATFORM_INFO, &msr) != CMI_SUCCESS) {
102 		return (NULL);
103 	}
104 
105 	if ((msr & MSR_PLATFORM_INFO_PPIN) == 0) {
106 		return (NULL);
107 	}
108 
109 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN_CTL, &msr) != CMI_SUCCESS) {
110 		return (NULL);
111 	}
112 
113 	if ((msr & MSR_PPIN_CTL_ENABLED) == 0) {
114 		if ((msr & MSR_PPIN_CTL_LOCKED) != 0) {
115 			return (NULL);
116 		}
117 
118 		if (cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_ENABLED) !=
119 		    CMI_SUCCESS) {
120 			return (NULL);
121 		}
122 	}
123 
124 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN, &msr) != CMI_SUCCESS) {
125 		return (NULL);
126 	}
127 
128 	/*
129 	 * Now that we've read data, lock the PPIN. Don't worry about success or
130 	 * failure of this part, as we will have gotten everything that we need.
131 	 * It is possible that it locked open, for example.
132 	 */
133 	(void) cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_LOCKED);
134 
135 	return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl), msr));
136 }
137 #endif	/* __xpv */
138 
139 static void
140 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
141 {
142 #ifdef	DEBUG
143 	uint_t chipid;
144 
145 	/*
146 	 * On debug, allow a developer to override the string to more
147 	 * easily test CPU autoreplace without needing to physically
148 	 * replace a CPU.
149 	 */
150 	if (gcpu_id_disable != 0) {
151 		return;
152 	}
153 
154 	chipid = cmi_hdl_chipid(hdl);
155 	if (gcpu_id_override[chipid] != NULL) {
156 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
157 		return;
158 	}
159 #endif
160 
161 #ifndef __xpv
162 	switch (cmi_hdl_vendor(hdl)) {
163 	case X86_VENDOR_Intel:
164 		sp->gcpus_ident = gcpu_init_ident_intc(hdl);
165 	default:
166 		break;
167 	}
168 #endif	/* __xpv */
169 }
170 
171 /*
172  * Our cmi_init entry point, called during startup of each cpu instance.
173  */
174 int
175 gcpu_init(cmi_hdl_t hdl, void **datap)
176 {
177 	uint_t chipid = cmi_hdl_chipid(hdl);
178 	struct gcpu_chipshared *sp, *osp;
179 	gcpu_data_t *gcpu;
180 
181 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
182 		return (ENOTSUP);
183 
184 	/*
185 	 * Allocate the state structure for this cpu.  We will only
186 	 * allocate the bank logout areas in gcpu_mca_init once we
187 	 * know how many banks there are.
188 	 */
189 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
190 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
191 	gcpu->gcpu_hdl = hdl;
192 
193 	/*
194 	 * Allocate a chipshared structure if no sibling cpu has already
195 	 * allocated it, but allow for the fact that a sibling core may
196 	 * be starting up in parallel.
197 	 */
198 	if ((sp = gcpu_shared[chipid]) == NULL) {
199 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
200 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
201 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
202 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
203 		if (osp != NULL) {
204 			mutex_destroy(&sp->gcpus_cfglock);
205 			mutex_destroy(&sp->gcpus_poll_lock);
206 			kmem_free(sp, sizeof (struct gcpu_chipshared));
207 			sp = osp;
208 		} else {
209 			gcpu_init_ident(hdl, sp);
210 		}
211 	}
212 
213 	atomic_inc_32(&sp->gcpus_actv_cnt);
214 	gcpu->gcpu_shared = sp;
215 
216 	return (0);
217 }
218 
219 /*
220  * deconfigure gcpu_init()
221  */
222 void
223 gcpu_fini(cmi_hdl_t hdl)
224 {
225 	uint_t chipid = cmi_hdl_chipid(hdl);
226 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
227 	struct gcpu_chipshared *sp;
228 
229 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
230 		return;
231 
232 	gcpu_mca_fini(hdl);
233 
234 	/*
235 	 * Keep shared data in cache for reuse.
236 	 */
237 	sp = gcpu_shared[chipid];
238 	ASSERT(sp != NULL);
239 	atomic_dec_32(&sp->gcpus_actv_cnt);
240 
241 	if (gcpu != NULL)
242 		kmem_free(gcpu, sizeof (gcpu_data_t));
243 
244 	/* Release reference count held in gcpu_init(). */
245 	cmi_hdl_rele(hdl);
246 }
247 
248 void
249 gcpu_post_startup(cmi_hdl_t hdl)
250 {
251 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
252 
253 	if (gcpu_disable)
254 		return;
255 
256 	if (gcpu != NULL)
257 		cms_post_startup(hdl);
258 #ifdef __xpv
259 	/*
260 	 * All cpu handles are initialized so we can begin polling now.
261 	 * Furthermore, our virq mechanism requires that everything
262 	 * be run on cpu 0 so we can assure that by starting from here.
263 	 */
264 	gcpu_mca_poll_start(hdl);
265 #else
266 	/*
267 	 * The boot CPU has a bit of a chicken and egg problem for CMCI. Its MCA
268 	 * initialization is run before we have initialized the PSM module that
269 	 * we would use for enabling CMCI. Therefore, we use this as a chance to
270 	 * enable CMCI for the boot CPU. For all other CPUs, this chicken and
271 	 * egg problem will have already been solved.
272 	 */
273 	gcpu_mca_cmci_enable(hdl);
274 #endif
275 }
276 
277 void
278 gcpu_post_mpstartup(cmi_hdl_t hdl)
279 {
280 	if (gcpu_disable)
281 		return;
282 
283 	cms_post_mpstartup(hdl);
284 
285 #ifndef __xpv
286 	/*
287 	 * All cpu handles are initialized only once all cpus are started, so we
288 	 * can begin polling post mp startup.
289 	 */
290 	gcpu_mca_poll_start(hdl);
291 #endif
292 }
293 
294 const char *
295 gcpu_ident(cmi_hdl_t hdl)
296 {
297 	uint_t chipid;
298 	struct gcpu_chipshared *sp;
299 
300 	if (gcpu_disable)
301 		return (NULL);
302 
303 	chipid = cmi_hdl_chipid(hdl);
304 	if (chipid >= GCPU_MAX_CHIPID)
305 		return (NULL);
306 
307 	if (cmi_hdl_getcmidata(hdl) == NULL)
308 		return (NULL);
309 
310 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
311 	return (sp->gcpus_ident);
312 }
313 
314 #ifdef __xpv
315 #define	GCPU_OP(ntvop, xpvop)	xpvop
316 #else
317 #define	GCPU_OP(ntvop, xpvop)	ntvop
318 #endif
319 
320 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
321 
322 const cmi_ops_t _cmi_ops = {
323 	gcpu_init,				/* cmi_init */
324 	gcpu_post_startup,			/* cmi_post_startup */
325 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
326 	gcpu_faulted_enter,			/* cmi_faulted_enter */
327 	gcpu_faulted_exit,			/* cmi_faulted_exit */
328 	gcpu_mca_init,				/* cmi_mca_init */
329 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
330 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
331 	gcpu_msrinject,				/* cmi_msrinject */
332 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
333 	gcpu_fini,				/* cmi_fini */
334 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
335 	gcpu_ident				/* cmi_ident */
336 };
337 
338 static struct modlcpu modlcpu = {
339 	&mod_cpuops,
340 	"Generic x86 CPU Module"
341 };
342 
343 static struct modlinkage modlinkage = {
344 	MODREV_1,
345 	(void *)&modlcpu,
346 	NULL
347 };
348 
349 int
350 _init(void)
351 {
352 	return (mod_install(&modlinkage));
353 }
354 
355 int
356 _info(struct modinfo *modinfop)
357 {
358 	return (mod_info(&modlinkage, modinfop));
359 }
360 
361 int
362 _fini(void)
363 {
364 	return (mod_remove(&modlinkage));
365 }
366