xref: /illumos-gate/usr/src/uts/intel/os/microcode.c (revision 42a10e597b973a5eba26215a58e4167c23772cf8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27  * Copyright (c) 2018, Joyent, Inc.
28  * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
29  * Copyright 2023 Oxide Computer Company
30  */
31 
32 #include <sys/bootconf.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/utsname.h>
36 #include <sys/debug.h>
37 #include <sys/kobj.h>
38 #include <sys/kobj_impl.h>
39 #include <sys/ontrap.h>
40 #include <sys/systeminfo.h>
41 #include <sys/systm.h>
42 #include <sys/ucode.h>
43 #include <sys/x86_archext.h>
44 #include <sys/x_call.h>
45 
46 /*
47  * mcpu_ucode_info for the boot CPU.  Statically allocated.
48  */
49 static struct cpu_ucode_info cpu_ucode_info0;
50 static const ucode_source_t *ucode;
51 static char *ucodepath;
52 static kmutex_t ucode_lock;
53 static bool ucode_cleanup_done = false;
54 
55 static const char ucode_failure_fmt[] =
56 	"cpu%d: failed to update microcode from version 0x%x to 0x%x";
57 static const char ucode_success_fmt[] =
58 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
59 
60 SET_DECLARE(ucode_source_set, ucode_source_t);
61 
62 /*
63  * Force flag.  If set, the first microcode binary that matches
64  * signature and platform id will be used for microcode update,
65  * regardless of version.  Should only be used for debugging.
66  */
67 int ucode_force_update = 0;
68 
69 void
ucode_init(void)70 ucode_init(void)
71 {
72 	mutex_init(&ucode_lock, NULL, MUTEX_DEFAULT, NULL);
73 }
74 
75 /*
76  * Allocate space for mcpu_ucode_info in the machcpu structure
77  * for all non-boot CPUs.
78  */
79 void
ucode_alloc_space(cpu_t * cp)80 ucode_alloc_space(cpu_t *cp)
81 {
82 	ASSERT(cp->cpu_id != 0);
83 	ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
84 	cp->cpu_m.mcpu_ucode_info =
85 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
86 }
87 
88 void
ucode_free_space(cpu_t * cp)89 ucode_free_space(cpu_t *cp)
90 {
91 	ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
92 	ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
93 	kmem_free(cp->cpu_m.mcpu_ucode_info,
94 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
95 	cp->cpu_m.mcpu_ucode_info = NULL;
96 }
97 
98 const char *
ucode_path(void)99 ucode_path(void)
100 {
101 	ASSERT(ucodepath != NULL);
102 	return (ucodepath);
103 }
104 
105 /*
106  * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
107  * allocated with BOP_ALLOC() and does not require a free.
108  */
109 void *
ucode_zalloc(processorid_t id,size_t size)110 ucode_zalloc(processorid_t id, size_t size)
111 {
112 	if (id != 0)
113 		return (kmem_zalloc(size, KM_NOSLEEP));
114 
115 	/* BOP_ALLOC() failure results in panic */
116 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
117 }
118 
119 void
ucode_free(processorid_t id,void * buf,size_t size)120 ucode_free(processorid_t id, void *buf, size_t size)
121 {
122 	if (id != 0 && buf != NULL)
123 		kmem_free(buf, size);
124 }
125 
126 /*
127  * Called to free up space allocated for the microcode file. This is called
128  * from start_other_cpus() after an update attempt has been performed on all
129  * CPUs.
130  */
131 void
ucode_cleanup(void)132 ucode_cleanup(void)
133 {
134 	mutex_enter(&ucode_lock);
135 	if (ucode != NULL)
136 		ucode->us_file_reset(-1);
137 	ucode_cleanup_done = true;
138 	mutex_exit(&ucode_lock);
139 
140 	/*
141 	 * We purposefully do not free 'ucodepath' here so that it persists for
142 	 * any future callers to ucode_check(), such as could occur on systems
143 	 * that support DR.
144 	 */
145 }
146 
147 static int
ucode_write(xc_arg_t arg1,xc_arg_t unused2,xc_arg_t unused3)148 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
149 {
150 	ucode_update_t *uusp = (ucode_update_t *)arg1;
151 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
152 	on_trap_data_t otd;
153 
154 	ASSERT(ucode != NULL);
155 	ASSERT(uusp->ucodep != NULL);
156 
157 	/*
158 	 * Check one more time to see if it is really necessary to update
159 	 * microcode just in case this is a hyperthreaded processor where
160 	 * the threads share the same microcode.
161 	 */
162 	if (!ucode_force_update) {
163 		ucode->us_read_rev(uinfop);
164 		uusp->new_rev = uinfop->cui_rev;
165 		if (uinfop->cui_rev >= uusp->expected_rev)
166 			return (0);
167 	}
168 
169 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
170 		if (ucode->us_invalidate) {
171 			/*
172 			 * On some platforms a cache invalidation is required
173 			 * for the ucode update to be successful due to the
174 			 * parts of the processor that the microcode is
175 			 * updating.
176 			 */
177 			invalidate_cache();
178 		}
179 		wrmsr(ucode->us_write_msr, (uintptr_t)uusp->ucodep);
180 	}
181 
182 	no_trap();
183 	ucode->us_read_rev(uinfop);
184 	uusp->new_rev = uinfop->cui_rev;
185 
186 	return (0);
187 }
188 
189 /*
190  * Entry points to microcode update from the 'ucode' driver.
191  */
192 
193 ucode_errno_t
ucode_validate(uint8_t * ucodep,int size)194 ucode_validate(uint8_t *ucodep, int size)
195 {
196 	if (ucode == NULL)
197 		return (EM_NOTSUP);
198 	return (ucode->us_validate(ucodep, size));
199 }
200 
201 ucode_errno_t
ucode_update(uint8_t * ucodep,int size)202 ucode_update(uint8_t *ucodep, int size)
203 {
204 	int		found = 0;
205 	ucode_update_t	cached = { 0 };
206 	ucode_update_t	*cachedp = NULL;
207 	ucode_errno_t	rc = EM_OK;
208 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
209 	cpuset_t cpuset;
210 
211 	ASSERT(ucode != 0);
212 	ASSERT(ucodep != 0);
213 	CPUSET_ZERO(cpuset);
214 
215 	if (!ucode->us_capable(CPU))
216 		return (EM_NOTSUP);
217 
218 	mutex_enter(&cpu_lock);
219 
220 	for (processorid_t id = 0; id < max_ncpus; id++) {
221 		cpu_t *cpu;
222 		ucode_update_t uus = { 0 };
223 		ucode_update_t *uusp = &uus;
224 
225 		/*
226 		 * If there is no such CPU or it is not xcall ready, skip it.
227 		 */
228 		if ((cpu = cpu_get(id)) == NULL ||
229 		    !(cpu->cpu_flags & CPU_READY)) {
230 			continue;
231 		}
232 
233 		uusp->sig = cpuid_getsig(cpu);
234 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
235 		    sizeof (uusp->info));
236 
237 		/*
238 		 * If the current CPU has the same signature and platform
239 		 * id as the previous one we processed, reuse the information.
240 		 */
241 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
242 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
243 			uusp->ucodep = cachedp->ucodep;
244 			uusp->expected_rev = cachedp->expected_rev;
245 			/*
246 			 * Intuitively we should check here to see whether the
247 			 * running microcode rev is >= the expected rev, and
248 			 * quit if it is.  But we choose to proceed with the
249 			 * xcall regardless of the running version so that
250 			 * the other threads in an HT processor can update
251 			 * the cpu_ucode_info structure in machcpu.
252 			 */
253 		} else if ((search_rc = ucode->us_extract(uusp, ucodep, size))
254 		    == EM_OK) {
255 			bcopy(uusp, &cached, sizeof (cached));
256 			cachedp = &cached;
257 			found = 1;
258 		}
259 
260 		/* Nothing to do */
261 		if (uusp->ucodep == NULL)
262 			continue;
263 
264 		CPUSET_ADD(cpuset, id);
265 		kpreempt_disable();
266 		xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
267 		kpreempt_enable();
268 		CPUSET_DEL(cpuset, id);
269 
270 		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
271 		    !ucode_force_update) {
272 			rc = EM_HIGHERREV;
273 		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
274 		    uusp->expected_rev != uusp->new_rev)) {
275 			cmn_err(CE_WARN, ucode_failure_fmt,
276 			    id, uusp->info.cui_rev, uusp->expected_rev);
277 			rc = EM_UPDATE;
278 		} else {
279 			cmn_err(CE_CONT, ucode_success_fmt,
280 			    id, uusp->info.cui_rev, uusp->new_rev);
281 		}
282 	}
283 
284 	mutex_exit(&cpu_lock);
285 
286 	if (!found) {
287 		rc = search_rc;
288 	} else if (rc == EM_OK) {
289 		cpuid_post_ucodeadm();
290 	}
291 
292 	return (rc);
293 }
294 
295 /*
296  * Entry point to microcode update from mlsetup() and mp_startup()
297  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
298  * cpuid_info must be initialized before ucode_check can be called.
299  */
300 void
ucode_check(cpu_t * cp)301 ucode_check(cpu_t *cp)
302 {
303 	cpu_ucode_info_t *uinfop;
304 	ucode_errno_t rc = EM_OK;
305 	bool bsp = (cp->cpu_id == 0);
306 
307 	ASSERT(cp != NULL);
308 
309 	mutex_enter(&ucode_lock);
310 
311 	if (bsp) {
312 		/* Space statically allocated for BSP; ensure pointer is set */
313 		if (cp->cpu_m.mcpu_ucode_info == NULL)
314 			cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
315 
316 		/* Set up function pointers if not already done */
317 		if (ucode == NULL) {
318 			ucode_source_t **src;
319 
320 			SET_FOREACH(src, ucode_source_set) {
321 				if ((*src)->us_select(cp)) {
322 					ucode = *src;
323 					break;
324 				}
325 			}
326 
327 			if (ucode == NULL)
328 				goto out;
329 
330 #ifdef DEBUG
331 			cmn_err(CE_CONT, "?ucode: selected %s\n",
332 			    ucode->us_name);
333 #endif
334 		}
335 	}
336 
337 	if (ucode == NULL)
338 		goto out;
339 
340 	if (ucodepath == NULL) {
341 		size_t sz;
342 		char *plat;
343 
344 		if (bsp) {
345 			const char *prop = "impl-arch-name";
346 			int len;
347 
348 			len = BOP_GETPROPLEN(bootops, prop);
349 
350 			if (len <= 0) {
351 				cmn_err(CE_WARN,
352 				    "ucode: could not find %s property", prop);
353 				goto out;
354 			}
355 
356 			/*
357 			 * On the BSP, this memory is allocated via BOP_ALLOC()
358 			 * -- which panics on failure -- and does not need to
359 			 * be explicitly freed.
360 			 */
361 			plat = ucode_zalloc(cp->cpu_id, len + 1);
362 			(void) BOP_GETPROP(bootops, prop, plat);
363 		} else {
364 			/*
365 			 * from common/conf/param.c, already filled in by
366 			 * setup_ddi() by this point.
367 			 */
368 			plat = platform;
369 		}
370 		if (plat[0] == '\0') {
371 			/*
372 			 * If we can't determine the architecture name,
373 			 * we cannot find microcode files for it.
374 			 * Return without setting 'ucode'.
375 			 */
376 			cmn_err(CE_WARN, "ucode: could not determine arch");
377 			goto out;
378 		}
379 
380 		sz = snprintf(NULL, 0, "/platform/%s/ucode", plat) + 1;
381 		/*
382 		 * Note that on the boot CPU, this allocation will be satisfied
383 		 * via BOP_ALLOC() and the returned address will not be valid
384 		 * once we come back into this function for the remaining CPUs.
385 		 * To deal with this, we throw the memory away at the end of
386 		 * this function if we are the BSP. The next CPU through here
387 		 * will re-create it using kmem and then it persists.
388 		 */
389 		ucodepath = ucode_zalloc(cp->cpu_id, sz);
390 		if (ucodepath == NULL) {
391 			cmn_err(CE_WARN,
392 			    "ucode: could not allocate memory for path");
393 			goto out;
394 		}
395 		(void) snprintf(ucodepath, sz, "/platform/%s/ucode", plat);
396 	}
397 
398 	uinfop = cp->cpu_m.mcpu_ucode_info;
399 	ASSERT(uinfop != NULL);
400 
401 	if (!ucode->us_capable(cp))
402 		goto out;
403 
404 	ucode->us_read_rev(uinfop);
405 
406 	/*
407 	 * Check to see if we need ucode update
408 	 */
409 	if ((rc = ucode->us_locate(cp, uinfop)) == EM_OK) {
410 		uint32_t old_rev, new_rev;
411 
412 		old_rev = uinfop->cui_rev;
413 		new_rev = ucode->us_load(uinfop);
414 
415 		if (uinfop->cui_rev != new_rev) {
416 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
417 			    old_rev, new_rev);
418 		} else {
419 			cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id,
420 			    old_rev, new_rev);
421 		}
422 	}
423 
424 	/*
425 	 * If we fail to find a match for any reason, free the file structure
426 	 * just in case we have read in a partial file.
427 	 *
428 	 * Since the scratch memory for holding the microcode for the boot CPU
429 	 * came from BOP_ALLOC, we will reset the data structure as if we
430 	 * never did the allocation so we don't have to keep track of this
431 	 * special chunk of memory.  We free the memory used for the rest
432 	 * of the CPUs in start_other_cpus().
433 	 *
434 	 * In case we end up here after ucode_cleanup() has been called, such
435 	 * as could occur with CPU hotplug, we also clear the memory and reset
436 	 * the data structure as nothing else will call ucode_cleanup() and we
437 	 * don't need to cache the data as we do during boot when starting the
438 	 * APs.
439 	 */
440 	if (rc != EM_OK || bsp || ucode_cleanup_done)
441 		ucode->us_file_reset(cp->cpu_id);
442 
443 out:
444 	/*
445 	 * If this is the boot CPU, discard the memory that came from BOP_ALLOC
446 	 * and was used to build the ucode path.
447 	 */
448 	if (bsp)
449 		ucodepath = NULL;
450 
451 	mutex_exit(&ucode_lock);
452 }
453 
454 /*
455  * Returns microcode revision from the machcpu structure.
456  */
457 ucode_errno_t
ucode_get_rev(uint32_t * revp)458 ucode_get_rev(uint32_t *revp)
459 {
460 	int i;
461 
462 	ASSERT(revp != NULL);
463 
464 	if (ucode == NULL || !ucode->us_capable(CPU))
465 		return (EM_NOTSUP);
466 
467 	mutex_enter(&cpu_lock);
468 	for (i = 0; i < max_ncpus; i++) {
469 		cpu_t *cpu;
470 
471 		if ((cpu = cpu_get(i)) == NULL)
472 			continue;
473 
474 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
475 	}
476 	mutex_exit(&cpu_lock);
477 
478 	return (EM_OK);
479 }
480