1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/asm_linkage.h> 28 #include <sys/bootconf.h> 29 #include <sys/cpuvar.h> 30 #include <sys/cmn_err.h> 31 #include <sys/controlregs.h> 32 #include <sys/debug.h> 33 #include <sys/kobj.h> 34 #include <sys/kobj_impl.h> 35 #include <sys/machsystm.h> 36 #include <sys/ontrap.h> 37 #include <sys/param.h> 38 #include <sys/machparam.h> 39 #include <sys/promif.h> 40 #include <sys/sysmacros.h> 41 #include <sys/systm.h> 42 #include <sys/types.h> 43 #include <sys/thread.h> 44 #include <sys/ucode.h> 45 #include <sys/x86_archext.h> 46 #include <sys/x_call.h> 47 #ifdef __xpv 48 #include <sys/hypervisor.h> 49 #endif 50 51 /* 52 * AMD-specific equivalence table 53 */ 54 static ucode_eqtbl_amd_t *ucode_eqtbl_amd; 55 56 /* 57 * mcpu_ucode_info for the boot CPU. Statically allocated. 58 */ 59 static struct cpu_ucode_info cpu_ucode_info0; 60 61 static ucode_file_t ucodefile; 62 63 static void* ucode_zalloc(processorid_t, size_t); 64 static void ucode_free(processorid_t, void *, size_t); 65 66 static int ucode_capable_amd(cpu_t *); 67 static int ucode_capable_intel(cpu_t *); 68 69 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int); 70 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *, 71 int); 72 73 static void ucode_file_reset_amd(ucode_file_t *, processorid_t); 74 static void ucode_file_reset_intel(ucode_file_t *, processorid_t); 75 76 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *); 77 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *); 78 79 #ifdef __xpv 80 static void ucode_load_xpv(ucode_update_t *); 81 static void ucode_chipset_amd(uint8_t *, int); 82 #endif 83 84 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *); 85 86 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *, 87 ucode_file_t *); 88 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *, 89 ucode_file_t *); 90 91 #ifndef __xpv 92 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *, 93 ucode_file_amd_t *, int); 94 #endif 95 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *, 96 ucode_header_intel_t *, ucode_ext_table_intel_t *); 97 98 static void ucode_read_rev_amd(cpu_ucode_info_t *); 99 static void ucode_read_rev_intel(cpu_ucode_info_t *); 100 101 static const struct ucode_ops ucode_amd = { 102 MSR_AMD_PATCHLOADER, 103 ucode_capable_amd, 104 ucode_file_reset_amd, 105 ucode_read_rev_amd, 106 ucode_load_amd, 107 ucode_validate_amd, 108 ucode_extract_amd, 109 ucode_locate_amd 110 }; 111 112 static const struct ucode_ops ucode_intel = { 113 MSR_INTC_UCODE_WRITE, 114 ucode_capable_intel, 115 ucode_file_reset_intel, 116 ucode_read_rev_intel, 117 ucode_load_intel, 118 ucode_validate_intel, 119 ucode_extract_intel, 120 ucode_locate_intel 121 }; 122 123 const struct ucode_ops *ucode; 124 125 static const char ucode_failure_fmt[] = 126 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n"; 127 static const char ucode_success_fmt[] = 128 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n"; 129 130 /* 131 * Force flag. If set, the first microcode binary that matches 132 * signature and platform id will be used for microcode update, 133 * regardless of version. Should only be used for debugging. 134 */ 135 int ucode_force_update = 0; 136 137 /* 138 * Allocate space for mcpu_ucode_info in the machcpu structure 139 * for all non-boot CPUs. 140 */ 141 void 142 ucode_alloc_space(cpu_t *cp) 143 { 144 ASSERT(cp->cpu_id != 0); 145 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL); 146 cp->cpu_m.mcpu_ucode_info = 147 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP); 148 } 149 150 void 151 ucode_free_space(cpu_t *cp) 152 { 153 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL); 154 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0); 155 kmem_free(cp->cpu_m.mcpu_ucode_info, 156 sizeof (*cp->cpu_m.mcpu_ucode_info)); 157 cp->cpu_m.mcpu_ucode_info = NULL; 158 } 159 160 /* 161 * Called when we are done with microcode update on all processors to free up 162 * space allocated for the microcode file. 163 */ 164 void 165 ucode_cleanup() 166 { 167 if (ucode == NULL) 168 return; 169 170 ucode->file_reset(&ucodefile, -1); 171 } 172 173 /* 174 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is 175 * allocated with BOP_ALLOC() and does not require a free. 176 */ 177 static void* 178 ucode_zalloc(processorid_t id, size_t size) 179 { 180 if (id) 181 return (kmem_zalloc(size, KM_NOSLEEP)); 182 183 /* BOP_ALLOC() failure results in panic */ 184 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE)); 185 } 186 187 static void 188 ucode_free(processorid_t id, void* buf, size_t size) 189 { 190 if (id) 191 kmem_free(buf, size); 192 } 193 194 /* 195 * Check whether or not a processor is capable of microcode operations 196 * Returns 1 if it is capable, 0 if not. 197 * 198 * At this point we only support microcode update for: 199 * - Intel processors family 6 and above, and 200 * - AMD processors family 0x10 and above. 201 * 202 * We also assume that we don't support a mix of Intel and 203 * AMD processors in the same box. 204 * 205 * An i86xpv guest domain can't update the microcode. 206 */ 207 /*ARGSUSED*/ 208 static int 209 ucode_capable_amd(cpu_t *cp) 210 { 211 int hwenv = get_hwenv(); 212 213 if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) { 214 return (0); 215 } 216 return (cpuid_getfamily(cp) >= 0x10); 217 } 218 219 static int 220 ucode_capable_intel(cpu_t *cp) 221 { 222 int hwenv = get_hwenv(); 223 224 if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) { 225 return (0); 226 } 227 return (cpuid_getfamily(cp) >= 6); 228 } 229 230 /* 231 * Called when it is no longer necessary to keep the microcode around, 232 * or when the cached microcode doesn't match the CPU being processed. 233 */ 234 static void 235 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id) 236 { 237 ucode_file_amd_t *ucodefp = ufp->amd; 238 239 if (ucodefp == NULL) 240 return; 241 242 ucode_free(id, ucodefp, sizeof (ucode_file_amd_t)); 243 ufp->amd = NULL; 244 } 245 246 static void 247 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id) 248 { 249 ucode_file_intel_t *ucodefp = &ufp->intel; 250 int total_size, body_size; 251 252 if (ucodefp == NULL || ucodefp->uf_header == NULL) 253 return; 254 255 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size); 256 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size); 257 if (ucodefp->uf_body) { 258 ucode_free(id, ucodefp->uf_body, body_size); 259 ucodefp->uf_body = NULL; 260 } 261 262 if (ucodefp->uf_ext_table) { 263 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL; 264 265 ucode_free(id, ucodefp->uf_ext_table, size); 266 ucodefp->uf_ext_table = NULL; 267 } 268 269 ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL); 270 ucodefp->uf_header = NULL; 271 } 272 273 /* 274 * Find the equivalent CPU id in the equivalence table. 275 */ 276 static int 277 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig) 278 { 279 char name[MAXPATHLEN]; 280 intptr_t fd; 281 int count; 282 int offset = 0, cpi_sig = cpuid_getsig(cp); 283 ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd; 284 285 (void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table", 286 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp)); 287 288 /* 289 * No kmem_zalloc() etc. available on boot cpu. 290 */ 291 if (cp->cpu_id == 0) { 292 if ((fd = kobj_open(name)) == -1) 293 return (EM_OPENFILE); 294 /* ucode_zalloc() cannot fail on boot cpu */ 295 eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl)); 296 ASSERT(eqtbl); 297 do { 298 count = kobj_read(fd, (int8_t *)eqtbl, 299 sizeof (*eqtbl), offset); 300 if (count != sizeof (*eqtbl)) { 301 (void) kobj_close(fd); 302 return (EM_HIGHERREV); 303 } 304 offset += count; 305 } while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig); 306 (void) kobj_close(fd); 307 } 308 309 /* 310 * If not already done, load the equivalence table. 311 * Not done on boot CPU. 312 */ 313 if (eqtbl == NULL) { 314 struct _buf *eq; 315 uint64_t size; 316 317 if ((eq = kobj_open_file(name)) == (struct _buf *)-1) 318 return (EM_OPENFILE); 319 320 if (kobj_get_filesize(eq, &size) < 0) { 321 kobj_close_file(eq); 322 return (EM_OPENFILE); 323 } 324 325 ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP); 326 if (ucode_eqtbl_amd == NULL) { 327 kobj_close_file(eq); 328 return (EM_NOMEM); 329 } 330 331 count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0); 332 kobj_close_file(eq); 333 334 if (count != size) 335 return (EM_FILESIZE); 336 } 337 338 /* Get the equivalent CPU id. */ 339 if (cp->cpu_id) 340 for (eqtbl = ucode_eqtbl_amd; 341 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig; 342 eqtbl++) 343 ; 344 345 *eq_sig = eqtbl->ue_equiv_cpu; 346 347 /* No equivalent CPU id found, assume outdated microcode file. */ 348 if (*eq_sig == 0) 349 return (EM_HIGHERREV); 350 351 return (EM_OK); 352 } 353 354 /* 355 * xVM cannot check for the presence of PCI devices. Look for chipset- 356 * specific microcode patches in the container file and disable them 357 * by setting their CPU revision to an invalid value. 358 */ 359 #ifdef __xpv 360 static void 361 ucode_chipset_amd(uint8_t *buf, int size) 362 { 363 ucode_header_amd_t *uh; 364 uint32_t *ptr = (uint32_t *)buf; 365 int len = 0; 366 367 /* skip to first microcode patch */ 368 ptr += 2; len = *ptr++; ptr += len >> 2; size -= len; 369 370 while (size >= sizeof (ucode_header_amd_t) + 8) { 371 ptr++; len = *ptr++; 372 uh = (ucode_header_amd_t *)ptr; 373 ptr += len >> 2; size -= len; 374 375 if (uh->uh_nb_id) { 376 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: " 377 "chipset id %x, revision %x", 378 uh->uh_nb_id, uh->uh_nb_rev); 379 uh->uh_cpu_rev = 0xffff; 380 } 381 382 if (uh->uh_sb_id) { 383 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: " 384 "chipset id %x, revision %x", 385 uh->uh_sb_id, uh->uh_sb_rev); 386 uh->uh_cpu_rev = 0xffff; 387 } 388 } 389 } 390 #endif 391 392 /* 393 * Populate the ucode file structure from microcode file corresponding to 394 * this CPU, if exists. 395 * 396 * Return EM_OK on success, corresponding error code on failure. 397 */ 398 /*ARGSUSED*/ 399 static ucode_errno_t 400 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp) 401 { 402 char name[MAXPATHLEN]; 403 intptr_t fd; 404 int count, rc; 405 ucode_file_amd_t *ucodefp = ufp->amd; 406 407 #ifndef __xpv 408 uint16_t eq_sig = 0; 409 int i; 410 411 /* get equivalent CPU id */ 412 if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK) 413 return (rc); 414 415 /* 416 * Allocate a buffer for the microcode patch. If the buffer has been 417 * allocated before, check for a matching microcode to avoid loading 418 * the file again. 419 */ 420 if (ucodefp == NULL) 421 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp)); 422 else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp)) 423 == EM_OK) 424 return (EM_OK); 425 426 if (ucodefp == NULL) 427 return (EM_NOMEM); 428 429 ufp->amd = ucodefp; 430 431 /* 432 * Find the patch for this CPU. The patch files are named XXXX-YY, where 433 * XXXX is the equivalent CPU id and YY is the running patch number. 434 * Patches specific to certain chipsets are guaranteed to have lower 435 * numbers than less specific patches, so we can just load the first 436 * patch that matches. 437 */ 438 439 for (i = 0; i < 0xff; i++) { 440 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X", 441 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i); 442 if ((fd = kobj_open(name)) == -1) 443 return (EM_NOMATCH); 444 count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0); 445 (void) kobj_close(fd); 446 447 if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK) 448 return (EM_OK); 449 } 450 return (EM_NOMATCH); 451 #else 452 int size = 0; 453 char c; 454 455 /* 456 * The xVM case is special. To support mixed-revision systems, the 457 * hypervisor will choose which patch to load for which CPU, so the 458 * whole microcode patch container file will have to be loaded. 459 * 460 * Since this code is only run on the boot cpu, we don't have to care 461 * about failing ucode_zalloc() or freeing allocated memory. 462 */ 463 if (cp->cpu_id != 0) 464 return (EM_INVALIDARG); 465 466 (void) snprintf(name, MAXPATHLEN, "/%s/%s/container", 467 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp)); 468 469 if ((fd = kobj_open(name)) == -1) 470 return (EM_OPENFILE); 471 472 /* get the file size by counting bytes */ 473 do { 474 count = kobj_read(fd, &c, 1, size); 475 size += count; 476 } while (count); 477 478 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp)); 479 ASSERT(ucodefp); 480 ufp->amd = ucodefp; 481 482 ucodefp->usize = size; 483 ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size); 484 ASSERT(ucodefp->ucodep); 485 486 /* load the microcode patch container file */ 487 count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0); 488 (void) kobj_close(fd); 489 490 if (count != size) 491 return (EM_FILESIZE); 492 493 /* make sure the container file is valid */ 494 rc = ucode->validate(ucodefp->ucodep, ucodefp->usize); 495 496 if (rc != EM_OK) 497 return (rc); 498 499 /* disable chipset-specific patches */ 500 ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize); 501 502 return (EM_OK); 503 #endif 504 } 505 506 static ucode_errno_t 507 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp) 508 { 509 char name[MAXPATHLEN]; 510 intptr_t fd; 511 int count; 512 int header_size = UCODE_HEADER_SIZE_INTEL; 513 int cpi_sig = cpuid_getsig(cp); 514 ucode_errno_t rc = EM_OK; 515 ucode_file_intel_t *ucodefp = &ufp->intel; 516 517 ASSERT(ucode); 518 519 /* 520 * If the microcode matches the CPU we are processing, use it. 521 */ 522 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header, 523 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) { 524 return (EM_OK); 525 } 526 527 /* 528 * Look for microcode file with the right name. 529 */ 530 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X", 531 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig, 532 uinfop->cui_platid); 533 if ((fd = kobj_open(name)) == -1) { 534 return (EM_OPENFILE); 535 } 536 537 /* 538 * We found a microcode file for the CPU we are processing, 539 * reset the microcode data structure and read in the new 540 * file. 541 */ 542 ucode->file_reset(ufp, cp->cpu_id); 543 544 ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size); 545 if (ucodefp->uf_header == NULL) 546 return (EM_NOMEM); 547 548 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0); 549 550 switch (count) { 551 case UCODE_HEADER_SIZE_INTEL: { 552 553 ucode_header_intel_t *uhp = ucodefp->uf_header; 554 uint32_t offset = header_size; 555 int total_size, body_size, ext_size; 556 uint32_t sum = 0; 557 558 /* 559 * Make sure that the header contains valid fields. 560 */ 561 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) { 562 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 563 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size); 564 ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size); 565 if (ucodefp->uf_body == NULL) { 566 rc = EM_NOMEM; 567 break; 568 } 569 570 if (kobj_read(fd, (char *)ucodefp->uf_body, 571 body_size, offset) != body_size) 572 rc = EM_FILESIZE; 573 } 574 575 if (rc) 576 break; 577 578 sum = ucode_checksum_intel(0, header_size, 579 (uint8_t *)ucodefp->uf_header); 580 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) { 581 rc = EM_CHECKSUM; 582 break; 583 } 584 585 /* 586 * Check to see if there is extended signature table. 587 */ 588 offset = body_size + header_size; 589 ext_size = total_size - offset; 590 591 if (ext_size <= 0) 592 break; 593 594 ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size); 595 if (ucodefp->uf_ext_table == NULL) { 596 rc = EM_NOMEM; 597 break; 598 } 599 600 if (kobj_read(fd, (char *)ucodefp->uf_ext_table, 601 ext_size, offset) != ext_size) { 602 rc = EM_FILESIZE; 603 } else if (ucode_checksum_intel(0, ext_size, 604 (uint8_t *)(ucodefp->uf_ext_table))) { 605 rc = EM_CHECKSUM; 606 } else { 607 int i; 608 609 ext_size -= UCODE_EXT_TABLE_SIZE_INTEL; 610 for (i = 0; i < ucodefp->uf_ext_table->uet_count; 611 i++) { 612 if (ucode_checksum_intel(0, 613 UCODE_EXT_SIG_SIZE_INTEL, 614 (uint8_t *)(&(ucodefp->uf_ext_table-> 615 uet_ext_sig[i])))) { 616 rc = EM_CHECKSUM; 617 break; 618 } 619 } 620 } 621 break; 622 } 623 624 default: 625 rc = EM_FILESIZE; 626 break; 627 } 628 629 kobj_close(fd); 630 631 if (rc != EM_OK) 632 return (rc); 633 634 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header, 635 ucodefp->uf_ext_table); 636 637 return (rc); 638 } 639 640 #ifndef __xpv 641 static ucode_errno_t 642 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop, 643 ucode_file_amd_t *ucodefp, int size) 644 { 645 ucode_header_amd_t *uh; 646 647 if (ucodefp == NULL || size < sizeof (ucode_header_amd_t)) 648 return (EM_NOMATCH); 649 650 uh = &ucodefp->uf_header; 651 652 /* 653 * Don't even think about loading patches that would require code 654 * execution. Does not apply to patches for family 0x14 and beyond. 655 */ 656 if (uh->uh_cpu_rev < 0x5000 && 657 size > offsetof(ucode_file_amd_t, uf_code_present) && 658 ucodefp->uf_code_present) 659 return (EM_NOMATCH); 660 661 if (eq_sig != uh->uh_cpu_rev) 662 return (EM_NOMATCH); 663 664 if (uh->uh_nb_id) { 665 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: " 666 "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev); 667 return (EM_NOMATCH); 668 } 669 670 if (uh->uh_sb_id) { 671 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: " 672 "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev); 673 return (EM_NOMATCH); 674 } 675 676 if (uh->uh_patch_id <= uinfop->cui_rev && !ucode_force_update) 677 return (EM_HIGHERREV); 678 679 return (EM_OK); 680 } 681 #endif 682 683 /* 684 * Returns 1 if the microcode is for this processor; 0 otherwise. 685 */ 686 static ucode_errno_t 687 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop, 688 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp) 689 { 690 if (uhp == NULL) 691 return (EM_NOMATCH); 692 693 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature, 694 uinfop->cui_platid, uhp->uh_proc_flags)) { 695 696 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update) 697 return (EM_HIGHERREV); 698 699 return (EM_OK); 700 } 701 702 if (uetp != NULL) { 703 int i; 704 705 for (i = 0; i < uetp->uet_count; i++) { 706 ucode_ext_sig_intel_t *uesp; 707 708 uesp = &uetp->uet_ext_sig[i]; 709 710 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature, 711 uinfop->cui_platid, uesp->ues_proc_flags)) { 712 713 if (uinfop->cui_rev >= uhp->uh_rev && 714 !ucode_force_update) 715 return (EM_HIGHERREV); 716 717 return (EM_OK); 718 } 719 } 720 } 721 722 return (EM_NOMATCH); 723 } 724 725 /*ARGSUSED*/ 726 static int 727 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3) 728 { 729 ucode_update_t *uusp = (ucode_update_t *)arg1; 730 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info; 731 #ifndef __xpv 732 on_trap_data_t otd; 733 #endif 734 735 ASSERT(ucode); 736 ASSERT(uusp->ucodep); 737 738 #ifndef __xpv 739 /* 740 * Check one more time to see if it is really necessary to update 741 * microcode just in case this is a hyperthreaded processor where 742 * the threads share the same microcode. 743 */ 744 if (!ucode_force_update) { 745 ucode->read_rev(uinfop); 746 uusp->new_rev = uinfop->cui_rev; 747 if (uinfop->cui_rev >= uusp->expected_rev) 748 return (0); 749 } 750 751 if (!on_trap(&otd, OT_DATA_ACCESS)) 752 wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep); 753 754 no_trap(); 755 #endif 756 ucode->read_rev(uinfop); 757 uusp->new_rev = uinfop->cui_rev; 758 759 return (0); 760 } 761 762 /*ARGSUSED*/ 763 static uint32_t 764 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp) 765 { 766 ucode_file_amd_t *ucodefp = ufp->amd; 767 #ifdef __xpv 768 ucode_update_t uus; 769 #else 770 on_trap_data_t otd; 771 #endif 772 773 ASSERT(ucode); 774 ASSERT(ucodefp); 775 776 #ifndef __xpv 777 kpreempt_disable(); 778 if (on_trap(&otd, OT_DATA_ACCESS)) { 779 no_trap(); 780 kpreempt_enable(); 781 return (0); 782 } 783 wrmsr(ucode->write_msr, (uintptr_t)ucodefp); 784 no_trap(); 785 ucode->read_rev(uinfop); 786 kpreempt_enable(); 787 788 return (ucodefp->uf_header.uh_patch_id); 789 #else 790 uus.ucodep = ucodefp->ucodep; 791 uus.usize = ucodefp->usize; 792 ucode_load_xpv(&uus); 793 ucode->read_rev(uinfop); 794 uus.new_rev = uinfop->cui_rev; 795 796 return (uus.new_rev); 797 #endif 798 } 799 800 /*ARGSUSED2*/ 801 static uint32_t 802 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp) 803 { 804 ucode_file_intel_t *ucodefp = &ufp->intel; 805 #ifdef __xpv 806 uint32_t ext_offset; 807 uint32_t body_size; 808 uint32_t ext_size; 809 uint8_t *ustart; 810 uint32_t usize; 811 ucode_update_t uus; 812 #endif 813 814 ASSERT(ucode); 815 816 #ifdef __xpv 817 /* 818 * the hypervisor wants the header, data, and extended 819 * signature tables. We can only get here from the boot 820 * CPU (cpu #0), we don't need to free as ucode_zalloc() will 821 * use BOP_ALLOC(). 822 */ 823 usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size); 824 ustart = ucode_zalloc(cp->cpu_id, usize); 825 ASSERT(ustart); 826 827 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size); 828 ext_offset = body_size + UCODE_HEADER_SIZE_INTEL; 829 ext_size = usize - ext_offset; 830 ASSERT(ext_size >= 0); 831 832 (void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL); 833 (void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body, 834 body_size); 835 if (ext_size > 0) { 836 (void) memcpy(&ustart[ext_offset], 837 ucodefp->uf_ext_table, ext_size); 838 } 839 uus.ucodep = ustart; 840 uus.usize = usize; 841 ucode_load_xpv(&uus); 842 ucode->read_rev(uinfop); 843 uus.new_rev = uinfop->cui_rev; 844 #else 845 kpreempt_disable(); 846 wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body); 847 ucode->read_rev(uinfop); 848 kpreempt_enable(); 849 #endif 850 851 return (ucodefp->uf_header->uh_rev); 852 } 853 854 855 #ifdef __xpv 856 static void 857 ucode_load_xpv(ucode_update_t *uusp) 858 { 859 xen_platform_op_t op; 860 int e; 861 862 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info)); 863 864 kpreempt_disable(); 865 op.cmd = XENPF_microcode_update; 866 op.interface_version = XENPF_INTERFACE_VERSION; 867 /*LINTED: constant in conditional context*/ 868 set_xen_guest_handle(op.u.microcode.data, uusp->ucodep); 869 op.u.microcode.length = uusp->usize; 870 e = HYPERVISOR_platform_op(&op); 871 if (e != 0) { 872 cmn_err(CE_WARN, "hypervisor failed to accept uCode update"); 873 } 874 kpreempt_enable(); 875 } 876 #endif /* __xpv */ 877 878 static void 879 ucode_read_rev_amd(cpu_ucode_info_t *uinfop) 880 { 881 uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL); 882 } 883 884 static void 885 ucode_read_rev_intel(cpu_ucode_info_t *uinfop) 886 { 887 struct cpuid_regs crs; 888 889 /* 890 * The Intel 64 and IA-32 Architecture Software Developer's Manual 891 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then 892 * execute cpuid to guarantee the correct reading of this register. 893 */ 894 wrmsr(MSR_INTC_UCODE_REV, 0); 895 (void) __cpuid_insn(&crs); 896 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT); 897 } 898 899 static ucode_errno_t 900 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size) 901 { 902 #ifndef __xpv 903 uint32_t *ptr = (uint32_t *)ucodep; 904 ucode_eqtbl_amd_t *eqtbl; 905 ucode_file_amd_t *ufp; 906 int count; 907 int higher = 0; 908 ucode_errno_t rc = EM_NOMATCH; 909 uint16_t eq_sig; 910 911 /* skip over magic number & equivalence table header */ 912 ptr += 2; size -= 8; 913 914 count = *ptr++; size -= 4; 915 for (eqtbl = (ucode_eqtbl_amd_t *)ptr; 916 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig; 917 eqtbl++) 918 ; 919 920 eq_sig = eqtbl->ue_equiv_cpu; 921 922 /* No equivalent CPU id found, assume outdated microcode file. */ 923 if (eq_sig == 0) 924 return (EM_HIGHERREV); 925 926 /* Use the first microcode patch that matches. */ 927 do { 928 ptr += count >> 2; size -= count; 929 930 if (!size) 931 return (higher ? EM_HIGHERREV : EM_NOMATCH); 932 933 ptr++; size -= 4; 934 count = *ptr++; size -= 4; 935 ufp = (ucode_file_amd_t *)ptr; 936 937 rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count); 938 if (rc == EM_HIGHERREV) 939 higher = 1; 940 } while (rc != EM_OK); 941 942 uusp->ucodep = (uint8_t *)ufp; 943 uusp->usize = count; 944 uusp->expected_rev = ufp->uf_header.uh_patch_id; 945 #else 946 /* 947 * The hypervisor will choose the patch to load, so there is no way to 948 * know the "expected revision" in advance. This is especially true on 949 * mixed-revision systems where more than one patch will be loaded. 950 */ 951 uusp->expected_rev = 0; 952 uusp->ucodep = ucodep; 953 uusp->usize = size; 954 955 ucode_chipset_amd(ucodep, size); 956 #endif 957 958 return (EM_OK); 959 } 960 961 static ucode_errno_t 962 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size) 963 { 964 uint32_t header_size = UCODE_HEADER_SIZE_INTEL; 965 int remaining; 966 int found = 0; 967 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 968 969 /* 970 * Go through the whole buffer in case there are 971 * multiple versions of matching microcode for this 972 * processor. 973 */ 974 for (remaining = size; remaining > 0; ) { 975 int total_size, body_size, ext_size; 976 uint8_t *curbuf = &ucodep[size - remaining]; 977 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf; 978 ucode_ext_table_intel_t *uetp = NULL; 979 ucode_errno_t tmprc; 980 981 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 982 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size); 983 ext_size = total_size - (header_size + body_size); 984 985 if (ext_size > 0) 986 uetp = (ucode_ext_table_intel_t *) 987 &curbuf[header_size + body_size]; 988 989 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp); 990 991 /* 992 * Since we are searching through a big file 993 * containing microcode for pretty much all the 994 * processors, we are bound to get EM_NOMATCH 995 * at one point. However, if we return 996 * EM_NOMATCH to users, it will really confuse 997 * them. Therefore, if we ever find a match of 998 * a lower rev, we will set return code to 999 * EM_HIGHERREV. 1000 */ 1001 if (tmprc == EM_HIGHERREV) 1002 search_rc = EM_HIGHERREV; 1003 1004 if (tmprc == EM_OK && 1005 uusp->expected_rev < uhp->uh_rev) { 1006 #ifndef __xpv 1007 uusp->ucodep = (uint8_t *)&curbuf[header_size]; 1008 #else 1009 uusp->ucodep = (uint8_t *)curbuf; 1010 #endif 1011 uusp->usize = 1012 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 1013 uusp->expected_rev = uhp->uh_rev; 1014 found = 1; 1015 } 1016 1017 remaining -= total_size; 1018 } 1019 1020 if (!found) 1021 return (search_rc); 1022 1023 return (EM_OK); 1024 } 1025 /* 1026 * Entry point to microcode update from the ucode_drv driver. 1027 * 1028 * Returns EM_OK on success, corresponding error code on failure. 1029 */ 1030 ucode_errno_t 1031 ucode_update(uint8_t *ucodep, int size) 1032 { 1033 int found = 0; 1034 processorid_t id; 1035 ucode_update_t cached = { 0 }; 1036 ucode_update_t *cachedp = NULL; 1037 ucode_errno_t rc = EM_OK; 1038 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 1039 cpuset_t cpuset; 1040 1041 ASSERT(ucode); 1042 ASSERT(ucodep); 1043 CPUSET_ZERO(cpuset); 1044 1045 if (!ucode->capable(CPU)) 1046 return (EM_NOTSUP); 1047 1048 mutex_enter(&cpu_lock); 1049 1050 for (id = 0; id < max_ncpus; id++) { 1051 cpu_t *cpu; 1052 ucode_update_t uus = { 0 }; 1053 ucode_update_t *uusp = &uus; 1054 1055 /* 1056 * If there is no such CPU or it is not xcall ready, skip it. 1057 */ 1058 if ((cpu = cpu_get(id)) == NULL || 1059 !(cpu->cpu_flags & CPU_READY)) 1060 continue; 1061 1062 uusp->sig = cpuid_getsig(cpu); 1063 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info, 1064 sizeof (uusp->info)); 1065 1066 /* 1067 * If the current CPU has the same signature and platform 1068 * id as the previous one we processed, reuse the information. 1069 */ 1070 if (cachedp && cachedp->sig == cpuid_getsig(cpu) && 1071 cachedp->info.cui_platid == uusp->info.cui_platid) { 1072 uusp->ucodep = cachedp->ucodep; 1073 uusp->expected_rev = cachedp->expected_rev; 1074 /* 1075 * Intuitively we should check here to see whether the 1076 * running microcode rev is >= the expected rev, and 1077 * quit if it is. But we choose to proceed with the 1078 * xcall regardless of the running version so that 1079 * the other threads in an HT processor can update 1080 * the cpu_ucode_info structure in machcpu. 1081 */ 1082 } else if ((search_rc = ucode->extract(uusp, ucodep, size)) 1083 == EM_OK) { 1084 bcopy(uusp, &cached, sizeof (cached)); 1085 cachedp = &cached; 1086 found = 1; 1087 } 1088 1089 /* Nothing to do */ 1090 if (uusp->ucodep == NULL) 1091 continue; 1092 1093 #ifdef __xpv 1094 /* 1095 * for i86xpv, the hypervisor will update all the CPUs. 1096 * the hypervisor wants the header, data, and extended 1097 * signature tables. ucode_write will just read in the 1098 * updated version on all the CPUs after the update has 1099 * completed. 1100 */ 1101 if (id == 0) { 1102 ucode_load_xpv(uusp); 1103 } 1104 #endif 1105 1106 CPUSET_ADD(cpuset, id); 1107 kpreempt_disable(); 1108 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write); 1109 kpreempt_enable(); 1110 CPUSET_DEL(cpuset, id); 1111 1112 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev && 1113 !ucode_force_update) { 1114 rc = EM_HIGHERREV; 1115 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 && 1116 uusp->expected_rev != uusp->new_rev)) { 1117 cmn_err(CE_WARN, ucode_failure_fmt, 1118 id, uusp->info.cui_rev, uusp->expected_rev); 1119 rc = EM_UPDATE; 1120 } else { 1121 cmn_err(CE_CONT, ucode_success_fmt, 1122 id, uusp->info.cui_rev, uusp->new_rev); 1123 } 1124 } 1125 1126 mutex_exit(&cpu_lock); 1127 1128 if (!found) 1129 rc = search_rc; 1130 1131 return (rc); 1132 } 1133 1134 /* 1135 * Initialize mcpu_ucode_info, and perform microcode update if necessary. 1136 * This is the entry point from boot path where pointer to CPU structure 1137 * is available. 1138 * 1139 * cpuid_info must be initialized before ucode_check can be called. 1140 */ 1141 void 1142 ucode_check(cpu_t *cp) 1143 { 1144 cpu_ucode_info_t *uinfop; 1145 ucode_errno_t rc = EM_OK; 1146 uint32_t new_rev = 0; 1147 1148 ASSERT(cp); 1149 /* 1150 * Space statically allocated for BSP, ensure pointer is set 1151 */ 1152 if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL) 1153 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0; 1154 1155 uinfop = cp->cpu_m.mcpu_ucode_info; 1156 ASSERT(uinfop); 1157 1158 /* set up function pointers if not already done */ 1159 if (!ucode) 1160 switch (cpuid_getvendor(cp)) { 1161 case X86_VENDOR_AMD: 1162 ucode = &ucode_amd; 1163 break; 1164 case X86_VENDOR_Intel: 1165 ucode = &ucode_intel; 1166 break; 1167 default: 1168 ucode = NULL; 1169 return; 1170 } 1171 1172 if (!ucode->capable(cp)) 1173 return; 1174 1175 /* 1176 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon 1177 * (Family 6, model 5 and above) and all processors after. 1178 */ 1179 if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) && 1180 ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) { 1181 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >> 1182 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK); 1183 } 1184 1185 ucode->read_rev(uinfop); 1186 1187 #ifdef __xpv 1188 /* 1189 * for i86xpv, the hypervisor will update all the CPUs. We only need 1190 * do do this on one of the CPUs (and there always is a CPU 0). 1191 */ 1192 if (cp->cpu_id != 0) { 1193 return; 1194 } 1195 #endif 1196 1197 /* 1198 * Check to see if we need ucode update 1199 */ 1200 if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) { 1201 new_rev = ucode->load(&ucodefile, uinfop, cp); 1202 1203 if (uinfop->cui_rev != new_rev) 1204 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, 1205 uinfop->cui_rev, new_rev); 1206 } 1207 1208 /* 1209 * If we fail to find a match for any reason, free the file structure 1210 * just in case we have read in a partial file. 1211 * 1212 * Since the scratch memory for holding the microcode for the boot CPU 1213 * came from BOP_ALLOC, we will reset the data structure as if we 1214 * never did the allocation so we don't have to keep track of this 1215 * special chunk of memory. We free the memory used for the rest 1216 * of the CPUs in start_other_cpus(). 1217 */ 1218 if (rc != EM_OK || cp->cpu_id == 0) 1219 ucode->file_reset(&ucodefile, cp->cpu_id); 1220 } 1221 1222 /* 1223 * Returns microcode revision from the machcpu structure. 1224 */ 1225 ucode_errno_t 1226 ucode_get_rev(uint32_t *revp) 1227 { 1228 int i; 1229 1230 ASSERT(ucode); 1231 ASSERT(revp); 1232 1233 if (!ucode->capable(CPU)) 1234 return (EM_NOTSUP); 1235 1236 mutex_enter(&cpu_lock); 1237 for (i = 0; i < max_ncpus; i++) { 1238 cpu_t *cpu; 1239 1240 if ((cpu = cpu_get(i)) == NULL) 1241 continue; 1242 1243 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev; 1244 } 1245 mutex_exit(&cpu_lock); 1246 1247 return (EM_OK); 1248 } 1249