1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
28 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
29 * Copyright 2025 Oxide Computer Company
30 */
31
32 #include <sys/bootconf.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/utsname.h>
36 #include <sys/debug.h>
37 #include <sys/kobj.h>
38 #include <sys/kobj_impl.h>
39 #include <sys/ontrap.h>
40 #include <sys/stdbool.h>
41 #include <sys/systeminfo.h>
42 #include <sys/systm.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46
47 /*
48 * mcpu_ucode_info for the boot CPU. Statically allocated.
49 */
50 static struct cpu_ucode_info cpu_ucode_info0;
51 static const ucode_source_t *ucode;
52 static char *ucodepath;
53 static kmutex_t ucode_lock;
54 static bool ucode_cleanup_done = false;
55
56 /*
57 * Flag for use by microcode impls to determine if they can use kmem. Note this
58 * is meant primarily for gating use of functions like kobj_open_file() which
59 * allocate internally with kmem. ucode_zalloc() and ucode_free() should
60 * otherwise be used.
61 */
62 bool ucode_use_kmem = false;
63
64 static const char ucode_failure_fmt[] =
65 "cpu%d: failed to update microcode from version 0x%x to 0x%x";
66 static const char ucode_success_fmt[] =
67 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
68
69 static const char ucode_path_fmt[] = "/platform/%s/ucode";
70
71 SET_DECLARE(ucode_source_set, ucode_source_t);
72
73 /*
74 * Force flag. If set, the first microcode binary that matches
75 * signature and platform id will be used for microcode update,
76 * regardless of version. Should only be used for debugging.
77 */
78 int ucode_force_update = 0;
79
80 void
ucode_init(void)81 ucode_init(void)
82 {
83 ucode_source_t **src;
84
85 mutex_init(&ucode_lock, NULL, MUTEX_DEFAULT, NULL);
86
87 /* Set up function pointers */
88 SET_FOREACH(src, ucode_source_set) {
89 if ((*src)->us_select(CPU)) {
90 ucode = *src;
91 break;
92 }
93 }
94
95 if (ucode == NULL)
96 return;
97
98 #ifdef DEBUG
99 cmn_err(CE_CONT, "?ucode: selected %s\n", ucode->us_name);
100
101 if (!ucode->us_capable(CPU)) {
102 cmn_err(CE_CONT,
103 "?ucode: microcode update not supported on CPU\n");
104 return;
105 }
106 #endif
107 }
108
109 /*
110 * Allocate space for mcpu_ucode_info in the machcpu structure
111 * for all non-boot CPUs.
112 */
113 void
ucode_alloc_space(cpu_t * cp)114 ucode_alloc_space(cpu_t *cp)
115 {
116 ASSERT(cp->cpu_id != 0);
117 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
118 cp->cpu_m.mcpu_ucode_info =
119 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
120 }
121
122 void
ucode_free_space(cpu_t * cp)123 ucode_free_space(cpu_t *cp)
124 {
125 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
126 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
127 kmem_free(cp->cpu_m.mcpu_ucode_info,
128 sizeof (*cp->cpu_m.mcpu_ucode_info));
129 cp->cpu_m.mcpu_ucode_info = NULL;
130 }
131
132 const char *
ucode_path(void)133 ucode_path(void)
134 {
135 ASSERT(ucodepath != NULL);
136 return (ucodepath);
137 }
138
139 /*
140 * Allocate/free a buffer used to hold ucode data. Space allocated before kmem
141 * is available is allocated with BOP_ALLOC() and does not require a free.
142 */
143 void *
ucode_zalloc(size_t size)144 ucode_zalloc(size_t size)
145 {
146 if (ucode_use_kmem)
147 return (kmem_zalloc(size, KM_NOSLEEP));
148
149 /* BOP_ALLOC() failure results in panic */
150 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
151 }
152
153 void
ucode_free(void * buf,size_t size)154 ucode_free(void *buf, size_t size)
155 {
156 if (ucode_use_kmem && buf != NULL)
157 kmem_free(buf, size);
158 }
159
160 /*
161 * Called to free up space allocated for the microcode file. This is called
162 * from start_other_cpus() after an update attempt has been performed on all
163 * CPUs.
164 */
165 void
ucode_cleanup(void)166 ucode_cleanup(void)
167 {
168 mutex_enter(&ucode_lock);
169 if (ucode != NULL)
170 ucode->us_file_reset();
171 ucode_cleanup_done = true;
172 mutex_exit(&ucode_lock);
173
174 /*
175 * We purposefully do not free 'ucodepath' here so that it persists for
176 * any future callers to ucode_locate(), such as could occur on systems
177 * that support DR.
178 */
179 }
180
181 static int
ucode_write(xc_arg_t arg1,xc_arg_t unused2,xc_arg_t unused3)182 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
183 {
184 ucode_update_t *uusp = (ucode_update_t *)arg1;
185 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
186 on_trap_data_t otd;
187
188 ASSERT(ucode != NULL);
189 ASSERT(uusp->ucodep != NULL);
190
191 /*
192 * Check one more time to see if it is really necessary to update
193 * microcode just in case this is a hyperthreaded processor where
194 * the threads share the same microcode.
195 */
196 if (!ucode_force_update) {
197 ucode->us_read_rev(uinfop);
198 uusp->new_rev = uinfop->cui_rev;
199 if (uinfop->cui_rev >= uusp->expected_rev)
200 return (0);
201 }
202
203 if (!on_trap(&otd, OT_DATA_ACCESS)) {
204 if (ucode->us_invalidate) {
205 /*
206 * On some platforms a cache invalidation is required
207 * for the ucode update to be successful due to the
208 * parts of the processor that the microcode is
209 * updating.
210 */
211 invalidate_cache();
212 }
213 wrmsr(ucode->us_write_msr, (uintptr_t)uusp->ucodep);
214 }
215
216 no_trap();
217 ucode->us_read_rev(uinfop);
218 uusp->new_rev = uinfop->cui_rev;
219
220 return (0);
221 }
222
223 /*
224 * Entry points to microcode update from the 'ucode' driver.
225 */
226
227 ucode_errno_t
ucode_validate(uint8_t * ucodep,size_t size)228 ucode_validate(uint8_t *ucodep, size_t size)
229 {
230 if (ucode == NULL)
231 return (EM_NOTSUP);
232 return (ucode->us_validate(ucodep, size));
233 }
234
235 ucode_errno_t
ucode_update(uint8_t * ucodep,size_t size)236 ucode_update(uint8_t *ucodep, size_t size)
237 {
238 bool found = false;
239 ucode_update_t cached = { 0 };
240 ucode_update_t *cachedp = NULL;
241 ucode_errno_t rc = EM_OK;
242 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
243 cpuset_t cpuset;
244
245 ASSERT(ucode != 0);
246 ASSERT(ucodep != 0);
247 CPUSET_ZERO(cpuset);
248
249 if (!ucode->us_capable(CPU))
250 return (EM_NOTSUP);
251
252 mutex_enter(&cpu_lock);
253
254 for (processorid_t id = 0; id < max_ncpus; id++) {
255 cpu_t *cpu;
256 ucode_update_t uus = { 0 };
257 ucode_update_t *uusp = &uus;
258
259 /*
260 * If there is no such CPU or it is not xcall ready, skip it.
261 */
262 if ((cpu = cpu_get(id)) == NULL ||
263 !(cpu->cpu_flags & CPU_READY)) {
264 continue;
265 }
266
267 uusp->sig = cpuid_getsig(cpu);
268 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
269 sizeof (uusp->info));
270
271 /*
272 * If the current CPU has the same signature and platform
273 * id as the previous one we processed, reuse the information.
274 */
275 if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
276 cachedp->info.cui_platid == uusp->info.cui_platid) {
277 uusp->ucodep = cachedp->ucodep;
278 uusp->expected_rev = cachedp->expected_rev;
279 /*
280 * Intuitively we should check here to see whether the
281 * running microcode rev is >= the expected rev, and
282 * quit if it is. But we choose to proceed with the
283 * xcall regardless of the running version so that
284 * the other threads in an HT processor can update
285 * the cpu_ucode_info structure in machcpu.
286 */
287 } else if ((search_rc = ucode->us_extract(uusp, ucodep, size))
288 == EM_OK) {
289 bcopy(uusp, &cached, sizeof (cached));
290 cachedp = &cached;
291 found = true;
292 }
293
294 /* Nothing to do */
295 if (uusp->ucodep == NULL)
296 continue;
297
298 CPUSET_ADD(cpuset, id);
299 kpreempt_disable();
300 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
301 kpreempt_enable();
302 CPUSET_DEL(cpuset, id);
303
304 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
305 !ucode_force_update) {
306 rc = EM_HIGHERREV;
307 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
308 uusp->expected_rev != uusp->new_rev)) {
309 cmn_err(CE_WARN, ucode_failure_fmt,
310 id, uusp->info.cui_rev, uusp->expected_rev);
311 rc = EM_UPDATE;
312 } else {
313 cmn_err(CE_CONT, ucode_success_fmt,
314 id, uusp->info.cui_rev, uusp->new_rev);
315 }
316 }
317
318 mutex_exit(&cpu_lock);
319
320 if (!found) {
321 rc = search_rc;
322 } else if (rc == EM_OK) {
323 cpuid_post_ucodeadm();
324 }
325
326 return (rc);
327 }
328
329 /*
330 * Called when starting up non-boot CPUs from mp_startup() to read the current
331 * microcode revision before the control CPU calls ucode_locate().
332 */
333 void
ucode_read_rev(cpu_t * cp)334 ucode_read_rev(cpu_t *cp)
335 {
336 cpu_ucode_info_t *uinfop;
337
338 ASSERT3P(cp, !=, NULL);
339
340 if (ucode == NULL || !ucode->us_capable(cp))
341 return;
342
343 uinfop = cp->cpu_m.mcpu_ucode_info;
344 ASSERT3P(uinfop, !=, NULL);
345
346 ucode->us_read_rev(uinfop);
347 }
348
349 /*
350 * Called by the control CPU when starting up non-boot CPUs to find any
351 * applicable microcode updates. Initializes mcpu_ucode_info, which will contain
352 * the relevant update to be applied, via ucode_apply(), if one is found.
353 * ucode_read_rev() must be called before this function on the target CPU.
354 */
355 void
ucode_locate(cpu_t * cp)356 ucode_locate(cpu_t *cp)
357 {
358 cpu_ucode_info_t *uinfop;
359 ucode_errno_t rc;
360 size_t sz;
361
362 ASSERT3P(cp, !=, NULL);
363 ASSERT(ucode_use_kmem);
364
365 mutex_enter(&ucode_lock);
366
367 if (ucode == NULL || !ucode->us_capable(cp))
368 goto out;
369
370 if (ucodepath == NULL) {
371 sz = snprintf(NULL, 0, ucode_path_fmt, platform) + 1;
372 ucodepath = kmem_zalloc(sz, KM_NOSLEEP);
373 if (ucodepath == NULL) {
374 cmn_err(CE_WARN,
375 "ucode: could not allocate memory for path");
376 goto out;
377 }
378 (void) snprintf(ucodepath, sz, ucode_path_fmt, platform);
379 }
380
381 uinfop = cp->cpu_m.mcpu_ucode_info;
382 ASSERT3P(uinfop, !=, NULL);
383
384 /*
385 * Search for any applicable updates.
386 *
387 * A return value of EM_HIGHERREV indicates that no update was applied
388 * due to the CPU already being at that or a higher revision, but both
389 * EM_HIGHERREV and EM_OK indicate that some microcode that matches the
390 * CPU was successfully located. In either of these cases it's worth
391 * keeping it around in case it's useful for the next CPU -- and if it
392 * isn't it will end up being discarded. In all other cases we clear it
393 * out just in case we have read in a partial or invalid file.
394 *
395 * Architectural note:
396 * Depending on the platform, the cpu_t being processed may represent
397 * a thread within a CPU core. If updating one thread's microcode
398 * implicitly updates all sibling threads in the core, it's normal to
399 * see a mix of EM_OK and EM_HIGHERREV when iterating over those
400 * threads.
401 *
402 * There's one additional consideration. If we are here after
403 * ucode_cleanup() has been called, such as could occur with CPU
404 * hotplug, we also clear the memory and reset the data structure as
405 * nothing else will call ucode_cleanup() and we don't need to cache
406 * the data as we do during boot when starting the APs.
407 */
408 rc = ucode->us_locate(cp, uinfop);
409 if ((rc != EM_OK && rc != EM_HIGHERREV) || ucode_cleanup_done)
410 ucode->us_file_reset();
411
412 out:
413 mutex_exit(&ucode_lock);
414 }
415
416 /*
417 * Called when starting up non-boot CPUs to load any pending microcode updates
418 * found in ucode_locate(). Note this is called very early in the startup
419 * process (before CPU_READY is set and while CPU_QUIESCED is) so we must be
420 * careful about what we do here, e.g., no kmem_free or anything that might call
421 * hat_unload; no kmem_alloc or anything which may cause thread context switch.
422 * We also don't take the ucode_lock here for similar reasons (if contended
423 * the idle thread will spin with CPU_QUIESCED set). This is fine though since
424 * we should not be updating any shared ucode state.
425 */
426 void
ucode_apply(cpu_t * cp)427 ucode_apply(cpu_t *cp)
428 {
429 cpu_ucode_info_t *uinfop;
430
431 ASSERT3P(cp, !=, NULL);
432
433 if (ucode == NULL || !ucode->us_capable(cp))
434 return;
435
436 uinfop = cp->cpu_m.mcpu_ucode_info;
437 ASSERT3P(uinfop, !=, NULL);
438
439 /*
440 * No pending update -- nothing to do.
441 */
442 if (uinfop->cui_pending_ucode == NULL)
443 return;
444
445 /*
446 * Apply pending update.
447 */
448 ucode->us_load(uinfop);
449 }
450
451 /*
452 * Called when starting up non-boot CPUs to free any pending microcode updates
453 * found in ucode_locate() and print the result of the attempting to load it in
454 * ucode_apply(). This is separate from ucode_apply() as we can't yet call
455 * kmem_free() at that point in the startup process.
456 */
457 void
ucode_finish(cpu_t * cp)458 ucode_finish(cpu_t *cp)
459 {
460 cpu_ucode_info_t *uinfop;
461 uint32_t old_rev, new_rev;
462
463 ASSERT3P(cp, !=, NULL);
464
465 if (ucode == NULL || !ucode->us_capable(cp))
466 return;
467
468 uinfop = cp->cpu_m.mcpu_ucode_info;
469 ASSERT3P(uinfop, !=, NULL);
470
471 /*
472 * No pending update -- nothing to do.
473 */
474 if (uinfop->cui_pending_ucode == NULL)
475 return;
476
477 old_rev = uinfop->cui_rev;
478 new_rev = uinfop->cui_pending_rev;
479 ucode->us_read_rev(uinfop);
480
481 if (uinfop->cui_rev != new_rev) {
482 ASSERT3U(uinfop->cui_rev, ==, old_rev);
483 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, old_rev,
484 new_rev);
485 } else {
486 cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id, old_rev,
487 new_rev);
488 }
489
490 ucode_free(uinfop->cui_pending_ucode, uinfop->cui_pending_size);
491 uinfop->cui_pending_ucode = NULL;
492 uinfop->cui_pending_size = 0;
493 uinfop->cui_pending_rev = 0;
494 }
495
496 /*
497 * Entry point to microcode update from mlsetup() for boot CPU.
498 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
499 * cpuid_info must be initialized before we can be called.
500 */
501 void
ucode_check_boot(void)502 ucode_check_boot(void)
503 {
504 cpu_t *cp = CPU;
505 cpu_ucode_info_t *uinfop;
506 const char *prop;
507 char *plat;
508 int prop_len;
509 size_t path_len;
510
511 ASSERT3U(cp->cpu_id, ==, 0);
512 ASSERT(!ucode_use_kmem);
513
514 mutex_enter(&ucode_lock);
515
516 /* Space statically allocated for BSP; ensure pointer is set */
517 ASSERT3P(cp->cpu_m.mcpu_ucode_info, ==, NULL);
518 uinfop = cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
519
520 if (ucode == NULL || !ucode->us_capable(cp))
521 goto out;
522
523 ASSERT3P(ucodepath, ==, NULL);
524
525 prop = "impl-arch-name";
526 prop_len = BOP_GETPROPLEN(bootops, prop);
527 if (prop_len <= 0) {
528 cmn_err(CE_WARN, "ucode: could not find %s property", prop);
529 goto out;
530 }
531
532 /*
533 * We're running on the boot CPU before kmem is available so we make use
534 * of BOP_ALLOC() -- which panics on failure -- to allocate any memory
535 * we need. That also means we don't need to explicity free it.
536 */
537 plat = BOP_ALLOC(bootops, NULL, prop_len + 1, MMU_PAGESIZE);
538 (void) BOP_GETPROP(bootops, prop, plat);
539 if (plat[0] == '\0') {
540 /*
541 * If we can't determine the architecture name,
542 * we cannot find microcode files for it.
543 * Return without setting 'ucodepath'.
544 */
545 cmn_err(CE_WARN, "ucode: could not determine arch");
546 goto out;
547 }
548
549 path_len = snprintf(NULL, 0, ucode_path_fmt, plat) + 1;
550 ucodepath = BOP_ALLOC(bootops, NULL, path_len, MMU_PAGESIZE);
551 (void) snprintf(ucodepath, path_len, ucode_path_fmt, plat);
552
553 /*
554 * Check to see if we need ucode update
555 */
556 ucode->us_read_rev(uinfop);
557 if (ucode->us_locate(cp, uinfop) == EM_OK) {
558 uint32_t old_rev, new_rev;
559
560 old_rev = uinfop->cui_rev;
561 new_rev = uinfop->cui_pending_rev;
562 ucode->us_load(uinfop);
563 ucode->us_read_rev(uinfop);
564
565 if (uinfop->cui_rev != new_rev) {
566 ASSERT3U(uinfop->cui_rev, ==, old_rev);
567 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
568 old_rev, new_rev);
569 } else {
570 cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id,
571 old_rev, new_rev);
572 }
573 }
574
575 /*
576 * Regardless of whether we found a match or not, since the scratch
577 * memory for holding the microcode for the boot CPU came from
578 * BOP_ALLOC, we will reset the data structure as if we never did the
579 * allocation so we don't have to keep track of this special chunk of
580 * memory.
581 */
582 ucode->us_file_reset();
583
584 /*
585 * Similarly clear any pending update that may have been found.
586 */
587 uinfop->cui_pending_ucode = NULL;
588 uinfop->cui_pending_size = 0;
589 uinfop->cui_pending_rev = 0;
590
591 out:
592 /*
593 * Discard the memory that came from BOP_ALLOC and was used to build the
594 * ucode path. Subsequent CPUs will be handled via ucode_locate() at
595 * which point kmem is available and we can cache the path.
596 */
597 ucodepath = NULL;
598 ucode_use_kmem = true;
599
600 mutex_exit(&ucode_lock);
601 }
602
603 /*
604 * Returns microcode revision from the machcpu structure.
605 */
606 ucode_errno_t
ucode_get_rev(uint32_t * revp)607 ucode_get_rev(uint32_t *revp)
608 {
609 int i;
610
611 ASSERT(revp != NULL);
612
613 if (ucode == NULL || !ucode->us_capable(CPU))
614 return (EM_NOTSUP);
615
616 mutex_enter(&cpu_lock);
617 for (i = 0; i < max_ncpus; i++) {
618 cpu_t *cpu;
619
620 if ((cpu = cpu_get(i)) == NULL)
621 continue;
622
623 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
624 }
625 mutex_exit(&cpu_lock);
626
627 return (EM_OK);
628 }
629