xref: /illumos-gate/usr/src/uts/intel/os/microcode.c (revision d39975c637b3d0cf582256e9d3084dc02c3ab68f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27  * Copyright (c) 2018, Joyent, Inc.
28  * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
29  * Copyright 2025 Oxide Computer Company
30  */
31 
32 #include <sys/bootconf.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/utsname.h>
36 #include <sys/debug.h>
37 #include <sys/kobj.h>
38 #include <sys/kobj_impl.h>
39 #include <sys/ontrap.h>
40 #include <sys/systeminfo.h>
41 #include <sys/systm.h>
42 #include <sys/ucode.h>
43 #include <sys/x86_archext.h>
44 #include <sys/x_call.h>
45 
46 /*
47  * mcpu_ucode_info for the boot CPU.  Statically allocated.
48  */
49 static struct cpu_ucode_info cpu_ucode_info0;
50 static const ucode_source_t *ucode;
51 static char *ucodepath;
52 static kmutex_t ucode_lock;
53 static bool ucode_cleanup_done = false;
54 
55 /*
56  * Flag for use by microcode impls to determine if they can use kmem.  Note this
57  * is meant primarily for gating use of functions like kobj_open_file() which
58  * allocate internally with kmem.  ucode_zalloc() and ucode_free() should
59  * otherwise be used.
60  */
61 bool ucode_use_kmem = false;
62 
63 static const char ucode_failure_fmt[] =
64 	"cpu%d: failed to update microcode from version 0x%x to 0x%x";
65 static const char ucode_success_fmt[] =
66 	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
67 
68 static const char ucode_path_fmt[] = "/platform/%s/ucode";
69 
70 SET_DECLARE(ucode_source_set, ucode_source_t);
71 
72 /*
73  * Force flag.  If set, the first microcode binary that matches
74  * signature and platform id will be used for microcode update,
75  * regardless of version.  Should only be used for debugging.
76  */
77 int ucode_force_update = 0;
78 
79 void
ucode_init(void)80 ucode_init(void)
81 {
82 	ucode_source_t **src;
83 
84 	mutex_init(&ucode_lock, NULL, MUTEX_DEFAULT, NULL);
85 
86 	/* Set up function pointers */
87 	SET_FOREACH(src, ucode_source_set) {
88 		if ((*src)->us_select(CPU)) {
89 			ucode = *src;
90 			break;
91 		}
92 	}
93 
94 	if (ucode == NULL)
95 		return;
96 
97 #ifdef DEBUG
98 	cmn_err(CE_CONT, "?ucode: selected %s\n", ucode->us_name);
99 
100 	if (!ucode->us_capable(CPU)) {
101 		cmn_err(CE_CONT,
102 		    "?ucode: microcode update not supported on CPU\n");
103 		return;
104 	}
105 #endif
106 }
107 
108 /*
109  * Allocate space for mcpu_ucode_info in the machcpu structure
110  * for all non-boot CPUs.
111  */
112 void
ucode_alloc_space(cpu_t * cp)113 ucode_alloc_space(cpu_t *cp)
114 {
115 	ASSERT(cp->cpu_id != 0);
116 	ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
117 	cp->cpu_m.mcpu_ucode_info =
118 	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
119 }
120 
121 void
ucode_free_space(cpu_t * cp)122 ucode_free_space(cpu_t *cp)
123 {
124 	ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
125 	ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
126 	kmem_free(cp->cpu_m.mcpu_ucode_info,
127 	    sizeof (*cp->cpu_m.mcpu_ucode_info));
128 	cp->cpu_m.mcpu_ucode_info = NULL;
129 }
130 
131 const char *
ucode_path(void)132 ucode_path(void)
133 {
134 	ASSERT(ucodepath != NULL);
135 	return (ucodepath);
136 }
137 
138 /*
139  * Allocate/free a buffer used to hold ucode data. Space allocated before kmem
140  * is available is allocated with BOP_ALLOC() and does not require a free.
141  */
142 void *
ucode_zalloc(size_t size)143 ucode_zalloc(size_t size)
144 {
145 	if (ucode_use_kmem)
146 		return (kmem_zalloc(size, KM_NOSLEEP));
147 
148 	/* BOP_ALLOC() failure results in panic */
149 	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
150 }
151 
152 void
ucode_free(void * buf,size_t size)153 ucode_free(void *buf, size_t size)
154 {
155 	if (ucode_use_kmem && buf != NULL)
156 		kmem_free(buf, size);
157 }
158 
159 /*
160  * Called to free up space allocated for the microcode file. This is called
161  * from start_other_cpus() after an update attempt has been performed on all
162  * CPUs.
163  */
164 void
ucode_cleanup(void)165 ucode_cleanup(void)
166 {
167 	mutex_enter(&ucode_lock);
168 	if (ucode != NULL)
169 		ucode->us_file_reset();
170 	ucode_cleanup_done = true;
171 	mutex_exit(&ucode_lock);
172 
173 	/*
174 	 * We purposefully do not free 'ucodepath' here so that it persists for
175 	 * any future callers to ucode_locate(), such as could occur on systems
176 	 * that support DR.
177 	 */
178 }
179 
180 static int
ucode_write(xc_arg_t arg1,xc_arg_t unused2,xc_arg_t unused3)181 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
182 {
183 	ucode_update_t *uusp = (ucode_update_t *)arg1;
184 	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
185 	on_trap_data_t otd;
186 
187 	ASSERT(ucode != NULL);
188 	ASSERT(uusp->ucodep != NULL);
189 
190 	/*
191 	 * Check one more time to see if it is really necessary to update
192 	 * microcode just in case this is a hyperthreaded processor where
193 	 * the threads share the same microcode.
194 	 */
195 	if (!ucode_force_update) {
196 		ucode->us_read_rev(uinfop);
197 		uusp->new_rev = uinfop->cui_rev;
198 		if (uinfop->cui_rev >= uusp->expected_rev)
199 			return (0);
200 	}
201 
202 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
203 		if (ucode->us_invalidate) {
204 			/*
205 			 * On some platforms a cache invalidation is required
206 			 * for the ucode update to be successful due to the
207 			 * parts of the processor that the microcode is
208 			 * updating.
209 			 */
210 			invalidate_cache();
211 		}
212 		wrmsr(ucode->us_write_msr, (uintptr_t)uusp->ucodep);
213 	}
214 
215 	no_trap();
216 	ucode->us_read_rev(uinfop);
217 	uusp->new_rev = uinfop->cui_rev;
218 
219 	return (0);
220 }
221 
222 /*
223  * Entry points to microcode update from the 'ucode' driver.
224  */
225 
226 ucode_errno_t
ucode_validate(uint8_t * ucodep,int size)227 ucode_validate(uint8_t *ucodep, int size)
228 {
229 	if (ucode == NULL)
230 		return (EM_NOTSUP);
231 	return (ucode->us_validate(ucodep, size));
232 }
233 
234 ucode_errno_t
ucode_update(uint8_t * ucodep,int size)235 ucode_update(uint8_t *ucodep, int size)
236 {
237 	int		found = 0;
238 	ucode_update_t	cached = { 0 };
239 	ucode_update_t	*cachedp = NULL;
240 	ucode_errno_t	rc = EM_OK;
241 	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
242 	cpuset_t cpuset;
243 
244 	ASSERT(ucode != 0);
245 	ASSERT(ucodep != 0);
246 	CPUSET_ZERO(cpuset);
247 
248 	if (!ucode->us_capable(CPU))
249 		return (EM_NOTSUP);
250 
251 	mutex_enter(&cpu_lock);
252 
253 	for (processorid_t id = 0; id < max_ncpus; id++) {
254 		cpu_t *cpu;
255 		ucode_update_t uus = { 0 };
256 		ucode_update_t *uusp = &uus;
257 
258 		/*
259 		 * If there is no such CPU or it is not xcall ready, skip it.
260 		 */
261 		if ((cpu = cpu_get(id)) == NULL ||
262 		    !(cpu->cpu_flags & CPU_READY)) {
263 			continue;
264 		}
265 
266 		uusp->sig = cpuid_getsig(cpu);
267 		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
268 		    sizeof (uusp->info));
269 
270 		/*
271 		 * If the current CPU has the same signature and platform
272 		 * id as the previous one we processed, reuse the information.
273 		 */
274 		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
275 		    cachedp->info.cui_platid == uusp->info.cui_platid) {
276 			uusp->ucodep = cachedp->ucodep;
277 			uusp->expected_rev = cachedp->expected_rev;
278 			/*
279 			 * Intuitively we should check here to see whether the
280 			 * running microcode rev is >= the expected rev, and
281 			 * quit if it is.  But we choose to proceed with the
282 			 * xcall regardless of the running version so that
283 			 * the other threads in an HT processor can update
284 			 * the cpu_ucode_info structure in machcpu.
285 			 */
286 		} else if ((search_rc = ucode->us_extract(uusp, ucodep, size))
287 		    == EM_OK) {
288 			bcopy(uusp, &cached, sizeof (cached));
289 			cachedp = &cached;
290 			found = 1;
291 		}
292 
293 		/* Nothing to do */
294 		if (uusp->ucodep == NULL)
295 			continue;
296 
297 		CPUSET_ADD(cpuset, id);
298 		kpreempt_disable();
299 		xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
300 		kpreempt_enable();
301 		CPUSET_DEL(cpuset, id);
302 
303 		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
304 		    !ucode_force_update) {
305 			rc = EM_HIGHERREV;
306 		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
307 		    uusp->expected_rev != uusp->new_rev)) {
308 			cmn_err(CE_WARN, ucode_failure_fmt,
309 			    id, uusp->info.cui_rev, uusp->expected_rev);
310 			rc = EM_UPDATE;
311 		} else {
312 			cmn_err(CE_CONT, ucode_success_fmt,
313 			    id, uusp->info.cui_rev, uusp->new_rev);
314 		}
315 	}
316 
317 	mutex_exit(&cpu_lock);
318 
319 	if (!found) {
320 		rc = search_rc;
321 	} else if (rc == EM_OK) {
322 		cpuid_post_ucodeadm();
323 	}
324 
325 	return (rc);
326 }
327 
328 /*
329  * Called when starting up non-boot CPUs from mp_startup() to read the current
330  * microcode revision before the control CPU calls ucode_locate().
331  */
332 void
ucode_read_rev(cpu_t * cp)333 ucode_read_rev(cpu_t *cp)
334 {
335 	cpu_ucode_info_t *uinfop;
336 
337 	ASSERT3P(cp, !=, NULL);
338 
339 	if (ucode == NULL || !ucode->us_capable(cp))
340 		return;
341 
342 	uinfop = cp->cpu_m.mcpu_ucode_info;
343 	ASSERT3P(uinfop, !=, NULL);
344 
345 	ucode->us_read_rev(uinfop);
346 }
347 
348 /*
349  * Called by the control CPU when starting up non-boot CPUs to find any
350  * applicable microcode updates. Initializes mcpu_ucode_info, which will contain
351  * the relevant update to be applied, via ucode_apply(), if one is found.
352  * ucode_read_rev() must be called before this function on the target CPU.
353  */
354 void
ucode_locate(cpu_t * cp)355 ucode_locate(cpu_t *cp)
356 {
357 	cpu_ucode_info_t *uinfop;
358 	ucode_errno_t rc;
359 	size_t sz;
360 
361 	ASSERT3P(cp, !=, NULL);
362 	ASSERT(ucode_use_kmem);
363 
364 	mutex_enter(&ucode_lock);
365 
366 	if (ucode == NULL || !ucode->us_capable(cp))
367 		goto out;
368 
369 	if (ucodepath == NULL) {
370 		sz = snprintf(NULL, 0, ucode_path_fmt, platform) + 1;
371 		ucodepath = kmem_zalloc(sz, KM_NOSLEEP);
372 		if (ucodepath == NULL) {
373 			cmn_err(CE_WARN,
374 			    "ucode: could not allocate memory for path");
375 			goto out;
376 		}
377 		(void) snprintf(ucodepath, sz, ucode_path_fmt, platform);
378 	}
379 
380 	uinfop = cp->cpu_m.mcpu_ucode_info;
381 	ASSERT3P(uinfop, !=, NULL);
382 
383 	/*
384 	 * Search for any applicable updates.
385 	 *
386 	 * A return value of EM_HIGHERREV indicates that no update was applied
387 	 * due to the CPU already being at that or a higher revision, but both
388 	 * EM_HIGHERREV and EM_OK indicate that some microcode that matches the
389 	 * CPU was successfully located. In either of these cases it's worth
390 	 * keeping it around in case it's useful for the next CPU -- and if it
391 	 * isn't it will end up being discarded. In all other cases we clear it
392 	 * out just in case we have read in a partial or invalid file.
393 	 *
394 	 * Architectural note:
395 	 *   Depending on the platform, the cpu_t being processed may represent
396 	 *   a thread within a CPU core. If updating one thread's microcode
397 	 *   implicitly updates all sibling threads in the core, it's normal to
398 	 *   see a mix of EM_OK and EM_HIGHERREV when iterating over those
399 	 *   threads.
400 	 *
401 	 * There's one additional consideration. If we are here after
402 	 * ucode_cleanup() has been called, such as could occur with CPU
403 	 * hotplug, we also clear the memory and reset the data structure as
404 	 * nothing else will call ucode_cleanup() and we don't need to cache
405 	 * the data as we do during boot when starting the APs.
406 	 */
407 	rc = ucode->us_locate(cp, uinfop);
408 	if ((rc != EM_OK && rc != EM_HIGHERREV) || ucode_cleanup_done)
409 		ucode->us_file_reset();
410 
411 out:
412 	mutex_exit(&ucode_lock);
413 }
414 
415 /*
416  * Called when starting up non-boot CPUs to load any pending microcode updates
417  * found in ucode_locate().  Note this is called very early in the startup
418  * process (before CPU_READY is set and while CPU_QUIESCED is) so we must be
419  * careful about what we do here, e.g., no kmem_free or anything that might call
420  * hat_unload; no kmem_alloc or anything which may cause thread context switch.
421  * We also don't take the ucode_lock here for similar reasons (if contended
422  * the idle thread will spin with CPU_QUIESCED set). This is fine though since
423  * we should not be updating any shared ucode state.
424  */
425 void
ucode_apply(cpu_t * cp)426 ucode_apply(cpu_t *cp)
427 {
428 	cpu_ucode_info_t *uinfop;
429 
430 	ASSERT3P(cp, !=, NULL);
431 
432 	if (ucode == NULL || !ucode->us_capable(cp))
433 		return;
434 
435 	uinfop = cp->cpu_m.mcpu_ucode_info;
436 	ASSERT3P(uinfop, !=, NULL);
437 
438 	/*
439 	 * No pending update -- nothing to do.
440 	 */
441 	if (uinfop->cui_pending_ucode == NULL)
442 		return;
443 
444 	/*
445 	 * Apply pending update.
446 	 */
447 	ucode->us_load(uinfop);
448 }
449 
450 /*
451  * Called when starting up non-boot CPUs to free any pending microcode updates
452  * found in ucode_locate() and print the result of the attempting to load it in
453  * ucode_apply().  This is separate from ucode_apply() as we can't yet call
454  * kmem_free() at that point in the startup process.
455  */
456 void
ucode_finish(cpu_t * cp)457 ucode_finish(cpu_t *cp)
458 {
459 	cpu_ucode_info_t *uinfop;
460 	uint32_t old_rev, new_rev;
461 
462 	ASSERT3P(cp, !=, NULL);
463 
464 	if (ucode == NULL || !ucode->us_capable(cp))
465 		return;
466 
467 	uinfop = cp->cpu_m.mcpu_ucode_info;
468 	ASSERT3P(uinfop, !=, NULL);
469 
470 	/*
471 	 * No pending update -- nothing to do.
472 	 */
473 	if (uinfop->cui_pending_ucode == NULL)
474 		return;
475 
476 	old_rev = uinfop->cui_rev;
477 	new_rev = uinfop->cui_pending_rev;
478 	ucode->us_read_rev(uinfop);
479 
480 	if (uinfop->cui_rev != new_rev) {
481 		ASSERT3U(uinfop->cui_rev, ==, old_rev);
482 		cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, old_rev,
483 		    new_rev);
484 	} else {
485 		cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id, old_rev,
486 		    new_rev);
487 	}
488 
489 	ucode_free(uinfop->cui_pending_ucode, uinfop->cui_pending_size);
490 	uinfop->cui_pending_ucode = NULL;
491 	uinfop->cui_pending_size = 0;
492 	uinfop->cui_pending_rev = 0;
493 }
494 
495 /*
496  * Entry point to microcode update from mlsetup() for boot CPU.
497  * Initialize mcpu_ucode_info, and perform microcode update if necessary.
498  * cpuid_info must be initialized before we can be called.
499  */
500 void
ucode_check_boot(void)501 ucode_check_boot(void)
502 {
503 	cpu_t *cp = CPU;
504 	cpu_ucode_info_t *uinfop;
505 	const char *prop;
506 	char *plat;
507 	int prop_len;
508 	size_t path_len;
509 
510 	ASSERT3U(cp->cpu_id, ==, 0);
511 	ASSERT(!ucode_use_kmem);
512 
513 	mutex_enter(&ucode_lock);
514 
515 	/* Space statically allocated for BSP; ensure pointer is set */
516 	ASSERT3P(cp->cpu_m.mcpu_ucode_info, ==, NULL);
517 	uinfop = cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
518 
519 	if (ucode == NULL || !ucode->us_capable(cp))
520 		goto out;
521 
522 	ASSERT3P(ucodepath, ==, NULL);
523 
524 	prop = "impl-arch-name";
525 	prop_len = BOP_GETPROPLEN(bootops, prop);
526 	if (prop_len <= 0) {
527 		cmn_err(CE_WARN, "ucode: could not find %s property", prop);
528 		goto out;
529 	}
530 
531 	/*
532 	 * We're running on the boot CPU before kmem is available so we make use
533 	 * of BOP_ALLOC() -- which panics on failure -- to allocate any memory
534 	 * we need.  That also means we don't need to explicity free it.
535 	 */
536 	plat = BOP_ALLOC(bootops, NULL, prop_len + 1, MMU_PAGESIZE);
537 	(void) BOP_GETPROP(bootops, prop, plat);
538 	if (plat[0] == '\0') {
539 		/*
540 		 * If we can't determine the architecture name,
541 		 * we cannot find microcode files for it.
542 		 * Return without setting 'ucodepath'.
543 		 */
544 		cmn_err(CE_WARN, "ucode: could not determine arch");
545 		goto out;
546 	}
547 
548 	path_len = snprintf(NULL, 0, ucode_path_fmt, plat) + 1;
549 	ucodepath = BOP_ALLOC(bootops, NULL, path_len, MMU_PAGESIZE);
550 	(void) snprintf(ucodepath, path_len, ucode_path_fmt, plat);
551 
552 	/*
553 	 * Check to see if we need ucode update
554 	 */
555 	ucode->us_read_rev(uinfop);
556 	if (ucode->us_locate(cp, uinfop) == EM_OK) {
557 		uint32_t old_rev, new_rev;
558 
559 		old_rev = uinfop->cui_rev;
560 		new_rev = uinfop->cui_pending_rev;
561 		ucode->us_load(uinfop);
562 		ucode->us_read_rev(uinfop);
563 
564 		if (uinfop->cui_rev != new_rev) {
565 			ASSERT3U(uinfop->cui_rev, ==, old_rev);
566 			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
567 			    old_rev, new_rev);
568 		} else {
569 			cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id,
570 			    old_rev, new_rev);
571 		}
572 	}
573 
574 	/*
575 	 * Regardless of whether we found a match or not, since the scratch
576 	 * memory for holding the microcode for the boot CPU came from
577 	 * BOP_ALLOC, we will reset the data structure as if we never did the
578 	 * allocation so we don't have to keep track of this special chunk of
579 	 * memory.
580 	 */
581 	ucode->us_file_reset();
582 
583 	/*
584 	 * Similarly clear any pending update that may have been found.
585 	 */
586 	uinfop->cui_pending_ucode = NULL;
587 	uinfop->cui_pending_size = 0;
588 	uinfop->cui_pending_rev = 0;
589 
590 out:
591 	/*
592 	 * Discard the memory that came from BOP_ALLOC and was used to build the
593 	 * ucode path.  Subsequent CPUs will be handled via ucode_locate() at
594 	 * which point kmem is available and we can cache the path.
595 	 */
596 	ucodepath = NULL;
597 	ucode_use_kmem = true;
598 
599 	mutex_exit(&ucode_lock);
600 }
601 
602 /*
603  * Returns microcode revision from the machcpu structure.
604  */
605 ucode_errno_t
ucode_get_rev(uint32_t * revp)606 ucode_get_rev(uint32_t *revp)
607 {
608 	int i;
609 
610 	ASSERT(revp != NULL);
611 
612 	if (ucode == NULL || !ucode->us_capable(CPU))
613 		return (EM_NOTSUP);
614 
615 	mutex_enter(&cpu_lock);
616 	for (i = 0; i < max_ncpus; i++) {
617 		cpu_t *cpu;
618 
619 		if ((cpu = cpu_get(i)) == NULL)
620 			continue;
621 
622 		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
623 	}
624 	mutex_exit(&cpu_lock);
625 
626 	return (EM_OK);
627 }
628