1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2018, Joyent, Inc. 28 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 29 * Copyright 2024 Oxide Computer Company 30 */ 31 32 #include <sys/bootconf.h> 33 #include <sys/cmn_err.h> 34 #include <sys/controlregs.h> 35 #include <sys/utsname.h> 36 #include <sys/debug.h> 37 #include <sys/kobj.h> 38 #include <sys/kobj_impl.h> 39 #include <sys/ontrap.h> 40 #include <sys/systeminfo.h> 41 #include <sys/systm.h> 42 #include <sys/ucode.h> 43 #include <sys/x86_archext.h> 44 #include <sys/x_call.h> 45 46 /* 47 * mcpu_ucode_info for the boot CPU. Statically allocated. 48 */ 49 static struct cpu_ucode_info cpu_ucode_info0; 50 static const ucode_source_t *ucode; 51 static char *ucodepath; 52 static kmutex_t ucode_lock; 53 static bool ucode_cleanup_done = false; 54 55 /* 56 * Flag for use by microcode impls to determine if they can use kmem. Note this 57 * is meant primarily for gating use of functions like kobj_open_file() which 58 * allocate internally with kmem. ucode_zalloc() and ucode_free() should 59 * otherwise be used. 60 */ 61 bool ucode_use_kmem = false; 62 63 static const char ucode_failure_fmt[] = 64 "cpu%d: failed to update microcode from version 0x%x to 0x%x"; 65 static const char ucode_success_fmt[] = 66 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n"; 67 68 static const char ucode_path_fmt[] = "/platform/%s/ucode"; 69 70 SET_DECLARE(ucode_source_set, ucode_source_t); 71 72 /* 73 * Force flag. If set, the first microcode binary that matches 74 * signature and platform id will be used for microcode update, 75 * regardless of version. Should only be used for debugging. 76 */ 77 int ucode_force_update = 0; 78 79 void 80 ucode_init(void) 81 { 82 ucode_source_t **src; 83 84 mutex_init(&ucode_lock, NULL, MUTEX_DEFAULT, NULL); 85 86 /* Set up function pointers */ 87 SET_FOREACH(src, ucode_source_set) { 88 if ((*src)->us_select(CPU)) { 89 ucode = *src; 90 break; 91 } 92 } 93 94 if (ucode == NULL) 95 return; 96 97 #ifdef DEBUG 98 cmn_err(CE_CONT, "?ucode: selected %s\n", ucode->us_name); 99 100 if (!ucode->us_capable(CPU)) { 101 cmn_err(CE_CONT, 102 "?ucode: microcode update not supported on CPU\n"); 103 return; 104 } 105 #endif 106 } 107 108 /* 109 * Allocate space for mcpu_ucode_info in the machcpu structure 110 * for all non-boot CPUs. 111 */ 112 void 113 ucode_alloc_space(cpu_t *cp) 114 { 115 ASSERT(cp->cpu_id != 0); 116 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL); 117 cp->cpu_m.mcpu_ucode_info = 118 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP); 119 } 120 121 void 122 ucode_free_space(cpu_t *cp) 123 { 124 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL); 125 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0); 126 kmem_free(cp->cpu_m.mcpu_ucode_info, 127 sizeof (*cp->cpu_m.mcpu_ucode_info)); 128 cp->cpu_m.mcpu_ucode_info = NULL; 129 } 130 131 const char * 132 ucode_path(void) 133 { 134 ASSERT(ucodepath != NULL); 135 return (ucodepath); 136 } 137 138 /* 139 * Allocate/free a buffer used to hold ucode data. Space allocated before kmem 140 * is available is allocated with BOP_ALLOC() and does not require a free. 141 */ 142 void * 143 ucode_zalloc(size_t size) 144 { 145 if (ucode_use_kmem) 146 return (kmem_zalloc(size, KM_NOSLEEP)); 147 148 /* BOP_ALLOC() failure results in panic */ 149 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE)); 150 } 151 152 void 153 ucode_free(void *buf, size_t size) 154 { 155 if (ucode_use_kmem && buf != NULL) 156 kmem_free(buf, size); 157 } 158 159 /* 160 * Called to free up space allocated for the microcode file. This is called 161 * from start_other_cpus() after an update attempt has been performed on all 162 * CPUs. 163 */ 164 void 165 ucode_cleanup(void) 166 { 167 mutex_enter(&ucode_lock); 168 if (ucode != NULL) 169 ucode->us_file_reset(); 170 ucode_cleanup_done = true; 171 mutex_exit(&ucode_lock); 172 173 /* 174 * We purposefully do not free 'ucodepath' here so that it persists for 175 * any future callers to ucode_locate(), such as could occur on systems 176 * that support DR. 177 */ 178 } 179 180 static int 181 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3) 182 { 183 ucode_update_t *uusp = (ucode_update_t *)arg1; 184 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info; 185 on_trap_data_t otd; 186 187 ASSERT(ucode != NULL); 188 ASSERT(uusp->ucodep != NULL); 189 190 /* 191 * Check one more time to see if it is really necessary to update 192 * microcode just in case this is a hyperthreaded processor where 193 * the threads share the same microcode. 194 */ 195 if (!ucode_force_update) { 196 ucode->us_read_rev(uinfop); 197 uusp->new_rev = uinfop->cui_rev; 198 if (uinfop->cui_rev >= uusp->expected_rev) 199 return (0); 200 } 201 202 if (!on_trap(&otd, OT_DATA_ACCESS)) { 203 if (ucode->us_invalidate) { 204 /* 205 * On some platforms a cache invalidation is required 206 * for the ucode update to be successful due to the 207 * parts of the processor that the microcode is 208 * updating. 209 */ 210 invalidate_cache(); 211 } 212 wrmsr(ucode->us_write_msr, (uintptr_t)uusp->ucodep); 213 } 214 215 no_trap(); 216 ucode->us_read_rev(uinfop); 217 uusp->new_rev = uinfop->cui_rev; 218 219 return (0); 220 } 221 222 /* 223 * Entry points to microcode update from the 'ucode' driver. 224 */ 225 226 ucode_errno_t 227 ucode_validate(uint8_t *ucodep, int size) 228 { 229 if (ucode == NULL) 230 return (EM_NOTSUP); 231 return (ucode->us_validate(ucodep, size)); 232 } 233 234 ucode_errno_t 235 ucode_update(uint8_t *ucodep, int size) 236 { 237 int found = 0; 238 ucode_update_t cached = { 0 }; 239 ucode_update_t *cachedp = NULL; 240 ucode_errno_t rc = EM_OK; 241 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 242 cpuset_t cpuset; 243 244 ASSERT(ucode != 0); 245 ASSERT(ucodep != 0); 246 CPUSET_ZERO(cpuset); 247 248 if (!ucode->us_capable(CPU)) 249 return (EM_NOTSUP); 250 251 mutex_enter(&cpu_lock); 252 253 for (processorid_t id = 0; id < max_ncpus; id++) { 254 cpu_t *cpu; 255 ucode_update_t uus = { 0 }; 256 ucode_update_t *uusp = &uus; 257 258 /* 259 * If there is no such CPU or it is not xcall ready, skip it. 260 */ 261 if ((cpu = cpu_get(id)) == NULL || 262 !(cpu->cpu_flags & CPU_READY)) { 263 continue; 264 } 265 266 uusp->sig = cpuid_getsig(cpu); 267 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info, 268 sizeof (uusp->info)); 269 270 /* 271 * If the current CPU has the same signature and platform 272 * id as the previous one we processed, reuse the information. 273 */ 274 if (cachedp && cachedp->sig == cpuid_getsig(cpu) && 275 cachedp->info.cui_platid == uusp->info.cui_platid) { 276 uusp->ucodep = cachedp->ucodep; 277 uusp->expected_rev = cachedp->expected_rev; 278 /* 279 * Intuitively we should check here to see whether the 280 * running microcode rev is >= the expected rev, and 281 * quit if it is. But we choose to proceed with the 282 * xcall regardless of the running version so that 283 * the other threads in an HT processor can update 284 * the cpu_ucode_info structure in machcpu. 285 */ 286 } else if ((search_rc = ucode->us_extract(uusp, ucodep, size)) 287 == EM_OK) { 288 bcopy(uusp, &cached, sizeof (cached)); 289 cachedp = &cached; 290 found = 1; 291 } 292 293 /* Nothing to do */ 294 if (uusp->ucodep == NULL) 295 continue; 296 297 CPUSET_ADD(cpuset, id); 298 kpreempt_disable(); 299 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write); 300 kpreempt_enable(); 301 CPUSET_DEL(cpuset, id); 302 303 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev && 304 !ucode_force_update) { 305 rc = EM_HIGHERREV; 306 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 && 307 uusp->expected_rev != uusp->new_rev)) { 308 cmn_err(CE_WARN, ucode_failure_fmt, 309 id, uusp->info.cui_rev, uusp->expected_rev); 310 rc = EM_UPDATE; 311 } else { 312 cmn_err(CE_CONT, ucode_success_fmt, 313 id, uusp->info.cui_rev, uusp->new_rev); 314 } 315 } 316 317 mutex_exit(&cpu_lock); 318 319 if (!found) { 320 rc = search_rc; 321 } else if (rc == EM_OK) { 322 cpuid_post_ucodeadm(); 323 } 324 325 return (rc); 326 } 327 328 /* 329 * Called when starting up non-boot CPUs from mp_startup() to read the current 330 * microcode revision before the control CPU calls ucode_locate(). 331 */ 332 void 333 ucode_read_rev(cpu_t *cp) 334 { 335 cpu_ucode_info_t *uinfop; 336 337 ASSERT3P(cp, !=, NULL); 338 339 if (ucode == NULL || !ucode->us_capable(cp)) 340 return; 341 342 uinfop = cp->cpu_m.mcpu_ucode_info; 343 ASSERT3P(uinfop, !=, NULL); 344 345 ucode->us_read_rev(uinfop); 346 } 347 348 /* 349 * Called by the control CPU when starting up non-boot CPUs to find any 350 * applicable microcode updates. Initializes mcpu_ucode_info, which will contain 351 * the relevant update to be applied, via ucode_apply(), if one is found. 352 * ucode_read_rev() must be called before this function on the target CPU. 353 */ 354 void 355 ucode_locate(cpu_t *cp) 356 { 357 cpu_ucode_info_t *uinfop; 358 size_t sz; 359 360 ASSERT3P(cp, !=, NULL); 361 ASSERT(ucode_use_kmem); 362 363 mutex_enter(&ucode_lock); 364 365 if (ucode == NULL || !ucode->us_capable(cp)) 366 goto out; 367 368 if (ucodepath == NULL) { 369 sz = snprintf(NULL, 0, ucode_path_fmt, platform) + 1; 370 ucodepath = kmem_zalloc(sz, KM_NOSLEEP); 371 if (ucodepath == NULL) { 372 cmn_err(CE_WARN, 373 "ucode: could not allocate memory for path"); 374 goto out; 375 } 376 (void) snprintf(ucodepath, sz, ucode_path_fmt, platform); 377 } 378 379 uinfop = cp->cpu_m.mcpu_ucode_info; 380 ASSERT3P(uinfop, !=, NULL); 381 382 /* 383 * Search for any applicable updates. If we fail to find a match for 384 * any reason, free the file structure just in case we have read in a 385 * partial file. 386 * 387 * In case we end up here after ucode_cleanup() has been called, such 388 * as could occur with CPU hotplug, we also clear the memory and reset 389 * the data structure as nothing else will call ucode_cleanup() and we 390 * don't need to cache the data as we do during boot when starting the 391 * APs. 392 */ 393 if ((ucode->us_locate(cp, uinfop) != EM_OK) || ucode_cleanup_done) { 394 ucode->us_file_reset(); 395 } 396 397 out: 398 mutex_exit(&ucode_lock); 399 } 400 401 /* 402 * Called when starting up non-boot CPUs to load any pending microcode updates 403 * found in ucode_locate(). Note this is called very early in the startup 404 * process (before CPU_READY is set and while CPU_QUIESCED is) so we must be 405 * careful about what we do here, e.g., no kmem_free or anything that might call 406 * hat_unload; no kmem_alloc or anything which may cause thread context switch. 407 * We also don't take the ucode_lock here for similar reasons (if contended 408 * the idle thread will spin with CPU_QUIESCED set). This is fine though since 409 * we should not be updating any shared ucode state. 410 */ 411 void 412 ucode_apply(cpu_t *cp) 413 { 414 cpu_ucode_info_t *uinfop; 415 416 ASSERT3P(cp, !=, NULL); 417 418 if (ucode == NULL || !ucode->us_capable(cp)) 419 return; 420 421 uinfop = cp->cpu_m.mcpu_ucode_info; 422 ASSERT3P(uinfop, !=, NULL); 423 424 /* 425 * No pending update -- nothing to do. 426 */ 427 if (uinfop->cui_pending_ucode == NULL) 428 return; 429 430 /* 431 * Apply pending update. 432 */ 433 ucode->us_load(uinfop); 434 } 435 436 /* 437 * Called when starting up non-boot CPUs to free any pending microcode updates 438 * found in ucode_locate() and print the result of the attempting to load it in 439 * ucode_apply(). This is separate from ucode_apply() as we can't yet call 440 * kmem_free() at that point in the startup process. 441 */ 442 void 443 ucode_finish(cpu_t *cp) 444 { 445 cpu_ucode_info_t *uinfop; 446 uint32_t old_rev, new_rev; 447 448 ASSERT3P(cp, !=, NULL); 449 450 if (ucode == NULL || !ucode->us_capable(cp)) 451 return; 452 453 uinfop = cp->cpu_m.mcpu_ucode_info; 454 ASSERT3P(uinfop, !=, NULL); 455 456 /* 457 * No pending update -- nothing to do. 458 */ 459 if (uinfop->cui_pending_ucode == NULL) 460 return; 461 462 old_rev = uinfop->cui_rev; 463 new_rev = uinfop->cui_pending_rev; 464 ucode->us_read_rev(uinfop); 465 466 if (uinfop->cui_rev != new_rev) { 467 ASSERT3U(uinfop->cui_rev, ==, old_rev); 468 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, old_rev, 469 new_rev); 470 } else { 471 cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id, old_rev, 472 new_rev); 473 } 474 475 ucode_free(uinfop->cui_pending_ucode, uinfop->cui_pending_size); 476 uinfop->cui_pending_ucode = NULL; 477 uinfop->cui_pending_size = 0; 478 uinfop->cui_pending_rev = 0; 479 } 480 481 /* 482 * Entry point to microcode update from mlsetup() for boot CPU. 483 * Initialize mcpu_ucode_info, and perform microcode update if necessary. 484 * cpuid_info must be initialized before we can be called. 485 */ 486 void 487 ucode_check_boot(void) 488 { 489 cpu_t *cp = CPU; 490 cpu_ucode_info_t *uinfop; 491 const char *prop; 492 char *plat; 493 int prop_len; 494 size_t path_len; 495 496 ASSERT3U(cp->cpu_id, ==, 0); 497 ASSERT(!ucode_use_kmem); 498 499 mutex_enter(&ucode_lock); 500 501 /* Space statically allocated for BSP; ensure pointer is set */ 502 ASSERT3P(cp->cpu_m.mcpu_ucode_info, ==, NULL); 503 uinfop = cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0; 504 505 if (ucode == NULL || !ucode->us_capable(cp)) 506 goto out; 507 508 ASSERT3P(ucodepath, ==, NULL); 509 510 prop = "impl-arch-name"; 511 prop_len = BOP_GETPROPLEN(bootops, prop); 512 if (prop_len <= 0) { 513 cmn_err(CE_WARN, "ucode: could not find %s property", prop); 514 goto out; 515 } 516 517 /* 518 * We're running on the boot CPU before kmem is available so we make use 519 * of BOP_ALLOC() -- which panics on failure -- to allocate any memory 520 * we need. That also means we don't need to explicity free it. 521 */ 522 plat = BOP_ALLOC(bootops, NULL, prop_len + 1, MMU_PAGESIZE); 523 (void) BOP_GETPROP(bootops, prop, plat); 524 if (plat[0] == '\0') { 525 /* 526 * If we can't determine the architecture name, 527 * we cannot find microcode files for it. 528 * Return without setting 'ucodepath'. 529 */ 530 cmn_err(CE_WARN, "ucode: could not determine arch"); 531 goto out; 532 } 533 534 path_len = snprintf(NULL, 0, ucode_path_fmt, plat) + 1; 535 ucodepath = BOP_ALLOC(bootops, NULL, path_len, MMU_PAGESIZE); 536 (void) snprintf(ucodepath, path_len, ucode_path_fmt, plat); 537 538 /* 539 * Check to see if we need ucode update 540 */ 541 ucode->us_read_rev(uinfop); 542 if (ucode->us_locate(cp, uinfop) == EM_OK) { 543 uint32_t old_rev, new_rev; 544 545 old_rev = uinfop->cui_rev; 546 new_rev = uinfop->cui_pending_rev; 547 ucode->us_load(uinfop); 548 ucode->us_read_rev(uinfop); 549 550 if (uinfop->cui_rev != new_rev) { 551 ASSERT3U(uinfop->cui_rev, ==, old_rev); 552 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, 553 old_rev, new_rev); 554 } else { 555 cmn_err(CE_CONT, ucode_success_fmt, cp->cpu_id, 556 old_rev, new_rev); 557 } 558 } 559 560 /* 561 * Regardless of whether we found a match or not, since the scratch 562 * memory for holding the microcode for the boot CPU came from 563 * BOP_ALLOC, we will reset the data structure as if we never did the 564 * allocation so we don't have to keep track of this special chunk of 565 * memory. 566 */ 567 ucode->us_file_reset(); 568 569 /* 570 * Similarly clear any pending update that may have been found. 571 */ 572 uinfop->cui_pending_ucode = NULL; 573 uinfop->cui_pending_size = 0; 574 uinfop->cui_pending_rev = 0; 575 576 out: 577 /* 578 * Discard the memory that came from BOP_ALLOC and was used to build the 579 * ucode path. Subsequent CPUs will be handled via ucode_locate() at 580 * which point kmem is available and we can cache the path. 581 */ 582 ucodepath = NULL; 583 ucode_use_kmem = true; 584 585 mutex_exit(&ucode_lock); 586 } 587 588 /* 589 * Returns microcode revision from the machcpu structure. 590 */ 591 ucode_errno_t 592 ucode_get_rev(uint32_t *revp) 593 { 594 int i; 595 596 ASSERT(revp != NULL); 597 598 if (ucode == NULL || !ucode->us_capable(CPU)) 599 return (EM_NOTSUP); 600 601 mutex_enter(&cpu_lock); 602 for (i = 0; i < max_ncpus; i++) { 603 cpu_t *cpu; 604 605 if ((cpu = cpu_get(i)) == NULL) 606 continue; 607 608 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev; 609 } 610 mutex_exit(&cpu_lock); 611 612 return (EM_OK); 613 } 614