1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 #include <sys/asm_linkage.h> 30 #include <sys/bootconf.h> 31 #include <sys/cpuvar.h> 32 #include <sys/cmn_err.h> 33 #include <sys/controlregs.h> 34 #include <sys/debug.h> 35 #include <sys/kobj.h> 36 #include <sys/kobj_impl.h> 37 #include <sys/machsystm.h> 38 #include <sys/ontrap.h> 39 #include <sys/param.h> 40 #include <sys/machparam.h> 41 #include <sys/promif.h> 42 #include <sys/sysmacros.h> 43 #include <sys/systm.h> 44 #include <sys/types.h> 45 #include <sys/thread.h> 46 #include <sys/ucode.h> 47 #include <sys/x86_archext.h> 48 #include <sys/x_call.h> 49 #ifdef __xpv 50 #include <sys/hypervisor.h> 51 #endif 52 53 /* 54 * AMD-specific equivalence table 55 */ 56 static ucode_eqtbl_amd_t *ucode_eqtbl_amd; 57 58 /* 59 * mcpu_ucode_info for the boot CPU. Statically allocated. 60 */ 61 static struct cpu_ucode_info cpu_ucode_info0; 62 63 static ucode_file_t ucodefile; 64 65 static void* ucode_zalloc(processorid_t, size_t); 66 static void ucode_free(processorid_t, void *, size_t); 67 68 static int ucode_capable_amd(cpu_t *); 69 static int ucode_capable_intel(cpu_t *); 70 71 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int); 72 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *, 73 int); 74 75 static void ucode_file_reset_amd(ucode_file_t *, processorid_t); 76 static void ucode_file_reset_intel(ucode_file_t *, processorid_t); 77 78 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *); 79 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *); 80 81 #ifdef __xpv 82 static void ucode_load_xpv(ucode_update_t *); 83 static void ucode_chipset_amd(uint8_t *, int); 84 #endif 85 86 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *); 87 88 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *, 89 ucode_file_t *); 90 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *, 91 ucode_file_t *); 92 93 #ifndef __xpv 94 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *, 95 ucode_file_amd_t *, int); 96 #endif 97 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *, 98 ucode_header_intel_t *, ucode_ext_table_intel_t *); 99 100 static void ucode_read_rev_amd(cpu_ucode_info_t *); 101 static void ucode_read_rev_intel(cpu_ucode_info_t *); 102 103 static const struct ucode_ops ucode_amd = { 104 MSR_AMD_PATCHLOADER, 105 ucode_capable_amd, 106 ucode_file_reset_amd, 107 ucode_read_rev_amd, 108 ucode_load_amd, 109 ucode_validate_amd, 110 ucode_extract_amd, 111 ucode_locate_amd 112 }; 113 114 static const struct ucode_ops ucode_intel = { 115 MSR_INTC_UCODE_WRITE, 116 ucode_capable_intel, 117 ucode_file_reset_intel, 118 ucode_read_rev_intel, 119 ucode_load_intel, 120 ucode_validate_intel, 121 ucode_extract_intel, 122 ucode_locate_intel 123 }; 124 125 const struct ucode_ops *ucode; 126 127 static const char ucode_failure_fmt[] = 128 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n"; 129 static const char ucode_success_fmt[] = 130 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n"; 131 132 /* 133 * Force flag. If set, the first microcode binary that matches 134 * signature and platform id will be used for microcode update, 135 * regardless of version. Should only be used for debugging. 136 */ 137 int ucode_force_update = 0; 138 139 /* 140 * Allocate space for mcpu_ucode_info in the machcpu structure 141 * for all non-boot CPUs. 142 */ 143 void 144 ucode_alloc_space(cpu_t *cp) 145 { 146 ASSERT(cp->cpu_id != 0); 147 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL); 148 cp->cpu_m.mcpu_ucode_info = 149 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP); 150 } 151 152 void 153 ucode_free_space(cpu_t *cp) 154 { 155 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL); 156 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0); 157 kmem_free(cp->cpu_m.mcpu_ucode_info, 158 sizeof (*cp->cpu_m.mcpu_ucode_info)); 159 cp->cpu_m.mcpu_ucode_info = NULL; 160 } 161 162 /* 163 * Called when we are done with microcode update on all processors to free up 164 * space allocated for the microcode file. 165 */ 166 void 167 ucode_cleanup() 168 { 169 if (ucode == NULL) 170 return; 171 172 ucode->file_reset(&ucodefile, -1); 173 } 174 175 /* 176 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is 177 * allocated with BOP_ALLOC() and does not require a free. 178 */ 179 static void* 180 ucode_zalloc(processorid_t id, size_t size) 181 { 182 if (id) 183 return (kmem_zalloc(size, KM_NOSLEEP)); 184 185 /* BOP_ALLOC() failure results in panic */ 186 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE)); 187 } 188 189 static void 190 ucode_free(processorid_t id, void* buf, size_t size) 191 { 192 if (id) 193 kmem_free(buf, size); 194 } 195 196 /* 197 * Check whether or not a processor is capable of microcode operations 198 * Returns 1 if it is capable, 0 if not. 199 * 200 * At this point we only support microcode update for: 201 * - Intel processors family 6 and above, and 202 * - AMD processors family 0x10 and above. 203 * 204 * We also assume that we don't support a mix of Intel and 205 * AMD processors in the same box. 206 * 207 * An i86xpv guest domain or VM can't update the microcode. 208 */ 209 210 #define XPVDOMU_OR_HVM \ 211 ((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0) 212 213 /*ARGSUSED*/ 214 static int 215 ucode_capable_amd(cpu_t *cp) 216 { 217 int hwenv = get_hwenv(); 218 219 if (XPVDOMU_OR_HVM) 220 return (0); 221 222 return (cpuid_getfamily(cp) >= 0x10); 223 } 224 225 static int 226 ucode_capable_intel(cpu_t *cp) 227 { 228 int hwenv = get_hwenv(); 229 230 if (XPVDOMU_OR_HVM) 231 return (0); 232 233 return (cpuid_getfamily(cp) >= 6); 234 } 235 236 /* 237 * Called when it is no longer necessary to keep the microcode around, 238 * or when the cached microcode doesn't match the CPU being processed. 239 */ 240 static void 241 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id) 242 { 243 ucode_file_amd_t *ucodefp = ufp->amd; 244 245 if (ucodefp == NULL) 246 return; 247 248 ucode_free(id, ucodefp, sizeof (ucode_file_amd_t)); 249 ufp->amd = NULL; 250 } 251 252 static void 253 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id) 254 { 255 ucode_file_intel_t *ucodefp = &ufp->intel; 256 int total_size, body_size; 257 258 if (ucodefp == NULL || ucodefp->uf_header == NULL) 259 return; 260 261 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size); 262 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size); 263 if (ucodefp->uf_body) { 264 ucode_free(id, ucodefp->uf_body, body_size); 265 ucodefp->uf_body = NULL; 266 } 267 268 if (ucodefp->uf_ext_table) { 269 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL; 270 271 ucode_free(id, ucodefp->uf_ext_table, size); 272 ucodefp->uf_ext_table = NULL; 273 } 274 275 ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL); 276 ucodefp->uf_header = NULL; 277 } 278 279 /* 280 * Find the equivalent CPU id in the equivalence table. 281 */ 282 static int 283 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig) 284 { 285 char name[MAXPATHLEN]; 286 intptr_t fd; 287 int count; 288 int offset = 0, cpi_sig = cpuid_getsig(cp); 289 ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd; 290 291 (void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table", 292 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp)); 293 294 /* 295 * No kmem_zalloc() etc. available on boot cpu. 296 */ 297 if (cp->cpu_id == 0) { 298 if ((fd = kobj_open(name)) == -1) 299 return (EM_OPENFILE); 300 /* ucode_zalloc() cannot fail on boot cpu */ 301 eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl)); 302 ASSERT(eqtbl); 303 do { 304 count = kobj_read(fd, (int8_t *)eqtbl, 305 sizeof (*eqtbl), offset); 306 if (count != sizeof (*eqtbl)) { 307 (void) kobj_close(fd); 308 return (EM_HIGHERREV); 309 } 310 offset += count; 311 } while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig); 312 (void) kobj_close(fd); 313 } 314 315 /* 316 * If not already done, load the equivalence table. 317 * Not done on boot CPU. 318 */ 319 if (eqtbl == NULL) { 320 struct _buf *eq; 321 uint64_t size; 322 323 if ((eq = kobj_open_file(name)) == (struct _buf *)-1) 324 return (EM_OPENFILE); 325 326 if (kobj_get_filesize(eq, &size) < 0) { 327 kobj_close_file(eq); 328 return (EM_OPENFILE); 329 } 330 331 ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP); 332 if (ucode_eqtbl_amd == NULL) { 333 kobj_close_file(eq); 334 return (EM_NOMEM); 335 } 336 337 count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0); 338 kobj_close_file(eq); 339 340 if (count != size) 341 return (EM_FILESIZE); 342 } 343 344 /* Get the equivalent CPU id. */ 345 if (cp->cpu_id) 346 for (eqtbl = ucode_eqtbl_amd; 347 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig; 348 eqtbl++) 349 ; 350 351 *eq_sig = eqtbl->ue_equiv_cpu; 352 353 /* No equivalent CPU id found, assume outdated microcode file. */ 354 if (*eq_sig == 0) 355 return (EM_HIGHERREV); 356 357 return (EM_OK); 358 } 359 360 /* 361 * xVM cannot check for the presence of PCI devices. Look for chipset- 362 * specific microcode patches in the container file and disable them 363 * by setting their CPU revision to an invalid value. 364 */ 365 #ifdef __xpv 366 static void 367 ucode_chipset_amd(uint8_t *buf, int size) 368 { 369 ucode_header_amd_t *uh; 370 uint32_t *ptr = (uint32_t *)buf; 371 int len = 0; 372 373 /* skip to first microcode patch */ 374 ptr += 2; len = *ptr++; ptr += len >> 2; size -= len; 375 376 while (size >= sizeof (ucode_header_amd_t) + 8) { 377 ptr++; len = *ptr++; 378 uh = (ucode_header_amd_t *)ptr; 379 ptr += len >> 2; size -= len; 380 381 if (uh->uh_nb_id) { 382 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: " 383 "chipset id %x, revision %x", 384 uh->uh_nb_id, uh->uh_nb_rev); 385 uh->uh_cpu_rev = 0xffff; 386 } 387 388 if (uh->uh_sb_id) { 389 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: " 390 "chipset id %x, revision %x", 391 uh->uh_sb_id, uh->uh_sb_rev); 392 uh->uh_cpu_rev = 0xffff; 393 } 394 } 395 } 396 #endif 397 398 /* 399 * Populate the ucode file structure from microcode file corresponding to 400 * this CPU, if exists. 401 * 402 * Return EM_OK on success, corresponding error code on failure. 403 */ 404 /*ARGSUSED*/ 405 static ucode_errno_t 406 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp) 407 { 408 char name[MAXPATHLEN]; 409 intptr_t fd; 410 int count, rc; 411 ucode_file_amd_t *ucodefp = ufp->amd; 412 413 #ifndef __xpv 414 uint16_t eq_sig = 0; 415 int i; 416 417 /* get equivalent CPU id */ 418 if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK) 419 return (rc); 420 421 /* 422 * Allocate a buffer for the microcode patch. If the buffer has been 423 * allocated before, check for a matching microcode to avoid loading 424 * the file again. 425 */ 426 if (ucodefp == NULL) 427 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp)); 428 else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp)) 429 == EM_OK) 430 return (EM_OK); 431 432 if (ucodefp == NULL) 433 return (EM_NOMEM); 434 435 ufp->amd = ucodefp; 436 437 /* 438 * Find the patch for this CPU. The patch files are named XXXX-YY, where 439 * XXXX is the equivalent CPU id and YY is the running patch number. 440 * Patches specific to certain chipsets are guaranteed to have lower 441 * numbers than less specific patches, so we can just load the first 442 * patch that matches. 443 */ 444 445 for (i = 0; i < 0xff; i++) { 446 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X", 447 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i); 448 if ((fd = kobj_open(name)) == -1) 449 return (EM_NOMATCH); 450 count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0); 451 (void) kobj_close(fd); 452 453 if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK) 454 return (EM_OK); 455 } 456 return (EM_NOMATCH); 457 #else 458 int size = 0; 459 char c; 460 461 /* 462 * The xVM case is special. To support mixed-revision systems, the 463 * hypervisor will choose which patch to load for which CPU, so the 464 * whole microcode patch container file will have to be loaded. 465 * 466 * Since this code is only run on the boot cpu, we don't have to care 467 * about failing ucode_zalloc() or freeing allocated memory. 468 */ 469 if (cp->cpu_id != 0) 470 return (EM_INVALIDARG); 471 472 (void) snprintf(name, MAXPATHLEN, "/%s/%s/container", 473 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp)); 474 475 if ((fd = kobj_open(name)) == -1) 476 return (EM_OPENFILE); 477 478 /* get the file size by counting bytes */ 479 do { 480 count = kobj_read(fd, &c, 1, size); 481 size += count; 482 } while (count); 483 484 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp)); 485 ASSERT(ucodefp); 486 ufp->amd = ucodefp; 487 488 ucodefp->usize = size; 489 ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size); 490 ASSERT(ucodefp->ucodep); 491 492 /* load the microcode patch container file */ 493 count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0); 494 (void) kobj_close(fd); 495 496 if (count != size) 497 return (EM_FILESIZE); 498 499 /* make sure the container file is valid */ 500 rc = ucode->validate(ucodefp->ucodep, ucodefp->usize); 501 502 if (rc != EM_OK) 503 return (rc); 504 505 /* disable chipset-specific patches */ 506 ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize); 507 508 return (EM_OK); 509 #endif 510 } 511 512 static ucode_errno_t 513 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp) 514 { 515 char name[MAXPATHLEN]; 516 intptr_t fd; 517 int count; 518 int header_size = UCODE_HEADER_SIZE_INTEL; 519 int cpi_sig = cpuid_getsig(cp); 520 ucode_errno_t rc = EM_OK; 521 ucode_file_intel_t *ucodefp = &ufp->intel; 522 523 ASSERT(ucode); 524 525 /* 526 * If the microcode matches the CPU we are processing, use it. 527 */ 528 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header, 529 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) { 530 return (EM_OK); 531 } 532 533 /* 534 * Look for microcode file with the right name. 535 */ 536 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X", 537 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig, 538 uinfop->cui_platid); 539 if ((fd = kobj_open(name)) == -1) { 540 return (EM_OPENFILE); 541 } 542 543 /* 544 * We found a microcode file for the CPU we are processing, 545 * reset the microcode data structure and read in the new 546 * file. 547 */ 548 ucode->file_reset(ufp, cp->cpu_id); 549 550 ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size); 551 if (ucodefp->uf_header == NULL) 552 return (EM_NOMEM); 553 554 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0); 555 556 switch (count) { 557 case UCODE_HEADER_SIZE_INTEL: { 558 559 ucode_header_intel_t *uhp = ucodefp->uf_header; 560 uint32_t offset = header_size; 561 int total_size, body_size, ext_size; 562 uint32_t sum = 0; 563 564 /* 565 * Make sure that the header contains valid fields. 566 */ 567 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) { 568 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 569 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size); 570 ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size); 571 if (ucodefp->uf_body == NULL) { 572 rc = EM_NOMEM; 573 break; 574 } 575 576 if (kobj_read(fd, (char *)ucodefp->uf_body, 577 body_size, offset) != body_size) 578 rc = EM_FILESIZE; 579 } 580 581 if (rc) 582 break; 583 584 sum = ucode_checksum_intel(0, header_size, 585 (uint8_t *)ucodefp->uf_header); 586 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) { 587 rc = EM_CHECKSUM; 588 break; 589 } 590 591 /* 592 * Check to see if there is extended signature table. 593 */ 594 offset = body_size + header_size; 595 ext_size = total_size - offset; 596 597 if (ext_size <= 0) 598 break; 599 600 ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size); 601 if (ucodefp->uf_ext_table == NULL) { 602 rc = EM_NOMEM; 603 break; 604 } 605 606 if (kobj_read(fd, (char *)ucodefp->uf_ext_table, 607 ext_size, offset) != ext_size) { 608 rc = EM_FILESIZE; 609 } else if (ucode_checksum_intel(0, ext_size, 610 (uint8_t *)(ucodefp->uf_ext_table))) { 611 rc = EM_CHECKSUM; 612 } else { 613 int i; 614 615 ext_size -= UCODE_EXT_TABLE_SIZE_INTEL; 616 for (i = 0; i < ucodefp->uf_ext_table->uet_count; 617 i++) { 618 if (ucode_checksum_intel(0, 619 UCODE_EXT_SIG_SIZE_INTEL, 620 (uint8_t *)(&(ucodefp->uf_ext_table-> 621 uet_ext_sig[i])))) { 622 rc = EM_CHECKSUM; 623 break; 624 } 625 } 626 } 627 break; 628 } 629 630 default: 631 rc = EM_FILESIZE; 632 break; 633 } 634 635 kobj_close(fd); 636 637 if (rc != EM_OK) 638 return (rc); 639 640 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header, 641 ucodefp->uf_ext_table); 642 643 return (rc); 644 } 645 646 #ifndef __xpv 647 static ucode_errno_t 648 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop, 649 ucode_file_amd_t *ucodefp, int size) 650 { 651 ucode_header_amd_t *uh; 652 653 if (ucodefp == NULL || size < sizeof (ucode_header_amd_t)) 654 return (EM_NOMATCH); 655 656 uh = &ucodefp->uf_header; 657 658 /* 659 * Don't even think about loading patches that would require code 660 * execution. Does not apply to patches for family 0x14 and beyond. 661 */ 662 if (uh->uh_cpu_rev < 0x5000 && 663 size > offsetof(ucode_file_amd_t, uf_code_present) && 664 ucodefp->uf_code_present) 665 return (EM_NOMATCH); 666 667 if (eq_sig != uh->uh_cpu_rev) 668 return (EM_NOMATCH); 669 670 if (uh->uh_nb_id) { 671 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: " 672 "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev); 673 return (EM_NOMATCH); 674 } 675 676 if (uh->uh_sb_id) { 677 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: " 678 "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev); 679 return (EM_NOMATCH); 680 } 681 682 if (uh->uh_patch_id <= uinfop->cui_rev && !ucode_force_update) 683 return (EM_HIGHERREV); 684 685 return (EM_OK); 686 } 687 #endif 688 689 /* 690 * Returns 1 if the microcode is for this processor; 0 otherwise. 691 */ 692 static ucode_errno_t 693 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop, 694 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp) 695 { 696 if (uhp == NULL) 697 return (EM_NOMATCH); 698 699 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature, 700 uinfop->cui_platid, uhp->uh_proc_flags)) { 701 702 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update) 703 return (EM_HIGHERREV); 704 705 return (EM_OK); 706 } 707 708 if (uetp != NULL) { 709 int i; 710 711 for (i = 0; i < uetp->uet_count; i++) { 712 ucode_ext_sig_intel_t *uesp; 713 714 uesp = &uetp->uet_ext_sig[i]; 715 716 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature, 717 uinfop->cui_platid, uesp->ues_proc_flags)) { 718 719 if (uinfop->cui_rev >= uhp->uh_rev && 720 !ucode_force_update) 721 return (EM_HIGHERREV); 722 723 return (EM_OK); 724 } 725 } 726 } 727 728 return (EM_NOMATCH); 729 } 730 731 /*ARGSUSED*/ 732 static int 733 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3) 734 { 735 ucode_update_t *uusp = (ucode_update_t *)arg1; 736 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info; 737 #ifndef __xpv 738 on_trap_data_t otd; 739 #endif 740 741 ASSERT(ucode); 742 ASSERT(uusp->ucodep); 743 744 #ifndef __xpv 745 /* 746 * Check one more time to see if it is really necessary to update 747 * microcode just in case this is a hyperthreaded processor where 748 * the threads share the same microcode. 749 */ 750 if (!ucode_force_update) { 751 ucode->read_rev(uinfop); 752 uusp->new_rev = uinfop->cui_rev; 753 if (uinfop->cui_rev >= uusp->expected_rev) 754 return (0); 755 } 756 757 if (!on_trap(&otd, OT_DATA_ACCESS)) 758 wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep); 759 760 no_trap(); 761 #endif 762 ucode->read_rev(uinfop); 763 uusp->new_rev = uinfop->cui_rev; 764 765 return (0); 766 } 767 768 /*ARGSUSED*/ 769 static uint32_t 770 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp) 771 { 772 ucode_file_amd_t *ucodefp = ufp->amd; 773 #ifdef __xpv 774 ucode_update_t uus; 775 #else 776 on_trap_data_t otd; 777 #endif 778 779 ASSERT(ucode); 780 ASSERT(ucodefp); 781 782 #ifndef __xpv 783 kpreempt_disable(); 784 if (on_trap(&otd, OT_DATA_ACCESS)) { 785 no_trap(); 786 kpreempt_enable(); 787 return (0); 788 } 789 wrmsr(ucode->write_msr, (uintptr_t)ucodefp); 790 no_trap(); 791 ucode->read_rev(uinfop); 792 kpreempt_enable(); 793 794 return (ucodefp->uf_header.uh_patch_id); 795 #else 796 uus.ucodep = ucodefp->ucodep; 797 uus.usize = ucodefp->usize; 798 ucode_load_xpv(&uus); 799 ucode->read_rev(uinfop); 800 uus.new_rev = uinfop->cui_rev; 801 802 return (uus.new_rev); 803 #endif 804 } 805 806 /*ARGSUSED2*/ 807 static uint32_t 808 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp) 809 { 810 ucode_file_intel_t *ucodefp = &ufp->intel; 811 #ifdef __xpv 812 uint32_t ext_offset; 813 uint32_t body_size; 814 uint32_t ext_size; 815 uint8_t *ustart; 816 uint32_t usize; 817 ucode_update_t uus; 818 #endif 819 820 ASSERT(ucode); 821 822 #ifdef __xpv 823 /* 824 * the hypervisor wants the header, data, and extended 825 * signature tables. We can only get here from the boot 826 * CPU (cpu #0), we don't need to free as ucode_zalloc() will 827 * use BOP_ALLOC(). 828 */ 829 usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size); 830 ustart = ucode_zalloc(cp->cpu_id, usize); 831 ASSERT(ustart); 832 833 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size); 834 ext_offset = body_size + UCODE_HEADER_SIZE_INTEL; 835 ext_size = usize - ext_offset; 836 ASSERT(ext_size >= 0); 837 838 (void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL); 839 (void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body, 840 body_size); 841 if (ext_size > 0) { 842 (void) memcpy(&ustart[ext_offset], 843 ucodefp->uf_ext_table, ext_size); 844 } 845 uus.ucodep = ustart; 846 uus.usize = usize; 847 ucode_load_xpv(&uus); 848 ucode->read_rev(uinfop); 849 uus.new_rev = uinfop->cui_rev; 850 #else 851 kpreempt_disable(); 852 wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body); 853 ucode->read_rev(uinfop); 854 kpreempt_enable(); 855 #endif 856 857 return (ucodefp->uf_header->uh_rev); 858 } 859 860 861 #ifdef __xpv 862 static void 863 ucode_load_xpv(ucode_update_t *uusp) 864 { 865 xen_platform_op_t op; 866 int e; 867 868 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info)); 869 870 kpreempt_disable(); 871 op.cmd = XENPF_microcode_update; 872 op.interface_version = XENPF_INTERFACE_VERSION; 873 /*LINTED: constant in conditional context*/ 874 set_xen_guest_handle(op.u.microcode.data, uusp->ucodep); 875 op.u.microcode.length = uusp->usize; 876 e = HYPERVISOR_platform_op(&op); 877 if (e != 0) { 878 cmn_err(CE_WARN, "hypervisor failed to accept uCode update"); 879 } 880 kpreempt_enable(); 881 } 882 #endif /* __xpv */ 883 884 static void 885 ucode_read_rev_amd(cpu_ucode_info_t *uinfop) 886 { 887 uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL); 888 } 889 890 static void 891 ucode_read_rev_intel(cpu_ucode_info_t *uinfop) 892 { 893 struct cpuid_regs crs; 894 895 /* 896 * The Intel 64 and IA-32 Architecture Software Developer's Manual 897 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then 898 * execute cpuid to guarantee the correct reading of this register. 899 */ 900 wrmsr(MSR_INTC_UCODE_REV, 0); 901 (void) __cpuid_insn(&crs); 902 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT); 903 } 904 905 static ucode_errno_t 906 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size) 907 { 908 #ifndef __xpv 909 uint32_t *ptr = (uint32_t *)ucodep; 910 ucode_eqtbl_amd_t *eqtbl; 911 ucode_file_amd_t *ufp; 912 int count; 913 int higher = 0; 914 ucode_errno_t rc = EM_NOMATCH; 915 uint16_t eq_sig; 916 917 /* skip over magic number & equivalence table header */ 918 ptr += 2; size -= 8; 919 920 count = *ptr++; size -= 4; 921 for (eqtbl = (ucode_eqtbl_amd_t *)ptr; 922 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig; 923 eqtbl++) 924 ; 925 926 eq_sig = eqtbl->ue_equiv_cpu; 927 928 /* No equivalent CPU id found, assume outdated microcode file. */ 929 if (eq_sig == 0) 930 return (EM_HIGHERREV); 931 932 /* Use the first microcode patch that matches. */ 933 do { 934 ptr += count >> 2; size -= count; 935 936 if (!size) 937 return (higher ? EM_HIGHERREV : EM_NOMATCH); 938 939 ptr++; size -= 4; 940 count = *ptr++; size -= 4; 941 ufp = (ucode_file_amd_t *)ptr; 942 943 rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count); 944 if (rc == EM_HIGHERREV) 945 higher = 1; 946 } while (rc != EM_OK); 947 948 uusp->ucodep = (uint8_t *)ufp; 949 uusp->usize = count; 950 uusp->expected_rev = ufp->uf_header.uh_patch_id; 951 #else 952 /* 953 * The hypervisor will choose the patch to load, so there is no way to 954 * know the "expected revision" in advance. This is especially true on 955 * mixed-revision systems where more than one patch will be loaded. 956 */ 957 uusp->expected_rev = 0; 958 uusp->ucodep = ucodep; 959 uusp->usize = size; 960 961 ucode_chipset_amd(ucodep, size); 962 #endif 963 964 return (EM_OK); 965 } 966 967 static ucode_errno_t 968 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size) 969 { 970 uint32_t header_size = UCODE_HEADER_SIZE_INTEL; 971 int remaining; 972 int found = 0; 973 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 974 975 /* 976 * Go through the whole buffer in case there are 977 * multiple versions of matching microcode for this 978 * processor. 979 */ 980 for (remaining = size; remaining > 0; ) { 981 int total_size, body_size, ext_size; 982 uint8_t *curbuf = &ucodep[size - remaining]; 983 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf; 984 ucode_ext_table_intel_t *uetp = NULL; 985 ucode_errno_t tmprc; 986 987 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 988 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size); 989 ext_size = total_size - (header_size + body_size); 990 991 if (ext_size > 0) 992 uetp = (ucode_ext_table_intel_t *) 993 &curbuf[header_size + body_size]; 994 995 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp); 996 997 /* 998 * Since we are searching through a big file 999 * containing microcode for pretty much all the 1000 * processors, we are bound to get EM_NOMATCH 1001 * at one point. However, if we return 1002 * EM_NOMATCH to users, it will really confuse 1003 * them. Therefore, if we ever find a match of 1004 * a lower rev, we will set return code to 1005 * EM_HIGHERREV. 1006 */ 1007 if (tmprc == EM_HIGHERREV) 1008 search_rc = EM_HIGHERREV; 1009 1010 if (tmprc == EM_OK && 1011 uusp->expected_rev < uhp->uh_rev) { 1012 #ifndef __xpv 1013 uusp->ucodep = (uint8_t *)&curbuf[header_size]; 1014 #else 1015 uusp->ucodep = (uint8_t *)curbuf; 1016 #endif 1017 uusp->usize = 1018 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size); 1019 uusp->expected_rev = uhp->uh_rev; 1020 found = 1; 1021 } 1022 1023 remaining -= total_size; 1024 } 1025 1026 if (!found) 1027 return (search_rc); 1028 1029 return (EM_OK); 1030 } 1031 /* 1032 * Entry point to microcode update from the ucode_drv driver. 1033 * 1034 * Returns EM_OK on success, corresponding error code on failure. 1035 */ 1036 ucode_errno_t 1037 ucode_update(uint8_t *ucodep, int size) 1038 { 1039 int found = 0; 1040 processorid_t id; 1041 ucode_update_t cached = { 0 }; 1042 ucode_update_t *cachedp = NULL; 1043 ucode_errno_t rc = EM_OK; 1044 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 1045 cpuset_t cpuset; 1046 1047 ASSERT(ucode); 1048 ASSERT(ucodep); 1049 CPUSET_ZERO(cpuset); 1050 1051 if (!ucode->capable(CPU)) 1052 return (EM_NOTSUP); 1053 1054 mutex_enter(&cpu_lock); 1055 1056 for (id = 0; id < max_ncpus; id++) { 1057 cpu_t *cpu; 1058 ucode_update_t uus = { 0 }; 1059 ucode_update_t *uusp = &uus; 1060 1061 /* 1062 * If there is no such CPU or it is not xcall ready, skip it. 1063 */ 1064 if ((cpu = cpu_get(id)) == NULL || 1065 !(cpu->cpu_flags & CPU_READY)) 1066 continue; 1067 1068 uusp->sig = cpuid_getsig(cpu); 1069 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info, 1070 sizeof (uusp->info)); 1071 1072 /* 1073 * If the current CPU has the same signature and platform 1074 * id as the previous one we processed, reuse the information. 1075 */ 1076 if (cachedp && cachedp->sig == cpuid_getsig(cpu) && 1077 cachedp->info.cui_platid == uusp->info.cui_platid) { 1078 uusp->ucodep = cachedp->ucodep; 1079 uusp->expected_rev = cachedp->expected_rev; 1080 /* 1081 * Intuitively we should check here to see whether the 1082 * running microcode rev is >= the expected rev, and 1083 * quit if it is. But we choose to proceed with the 1084 * xcall regardless of the running version so that 1085 * the other threads in an HT processor can update 1086 * the cpu_ucode_info structure in machcpu. 1087 */ 1088 } else if ((search_rc = ucode->extract(uusp, ucodep, size)) 1089 == EM_OK) { 1090 bcopy(uusp, &cached, sizeof (cached)); 1091 cachedp = &cached; 1092 found = 1; 1093 } 1094 1095 /* Nothing to do */ 1096 if (uusp->ucodep == NULL) 1097 continue; 1098 1099 #ifdef __xpv 1100 /* 1101 * for i86xpv, the hypervisor will update all the CPUs. 1102 * the hypervisor wants the header, data, and extended 1103 * signature tables. ucode_write will just read in the 1104 * updated version on all the CPUs after the update has 1105 * completed. 1106 */ 1107 if (id == 0) { 1108 ucode_load_xpv(uusp); 1109 } 1110 #endif 1111 1112 CPUSET_ADD(cpuset, id); 1113 kpreempt_disable(); 1114 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write); 1115 kpreempt_enable(); 1116 CPUSET_DEL(cpuset, id); 1117 1118 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev && 1119 !ucode_force_update) { 1120 rc = EM_HIGHERREV; 1121 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 && 1122 uusp->expected_rev != uusp->new_rev)) { 1123 cmn_err(CE_WARN, ucode_failure_fmt, 1124 id, uusp->info.cui_rev, uusp->expected_rev); 1125 rc = EM_UPDATE; 1126 } else { 1127 cmn_err(CE_CONT, ucode_success_fmt, 1128 id, uusp->info.cui_rev, uusp->new_rev); 1129 } 1130 } 1131 1132 mutex_exit(&cpu_lock); 1133 1134 if (!found) 1135 rc = search_rc; 1136 1137 return (rc); 1138 } 1139 1140 /* 1141 * Initialize mcpu_ucode_info, and perform microcode update if necessary. 1142 * This is the entry point from boot path where pointer to CPU structure 1143 * is available. 1144 * 1145 * cpuid_info must be initialized before ucode_check can be called. 1146 */ 1147 void 1148 ucode_check(cpu_t *cp) 1149 { 1150 cpu_ucode_info_t *uinfop; 1151 ucode_errno_t rc = EM_OK; 1152 uint32_t new_rev = 0; 1153 1154 ASSERT(cp); 1155 /* 1156 * Space statically allocated for BSP, ensure pointer is set 1157 */ 1158 if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL) 1159 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0; 1160 1161 uinfop = cp->cpu_m.mcpu_ucode_info; 1162 ASSERT(uinfop); 1163 1164 /* set up function pointers if not already done */ 1165 if (!ucode) 1166 switch (cpuid_getvendor(cp)) { 1167 case X86_VENDOR_AMD: 1168 ucode = &ucode_amd; 1169 break; 1170 case X86_VENDOR_Intel: 1171 ucode = &ucode_intel; 1172 break; 1173 default: 1174 ucode = NULL; 1175 return; 1176 } 1177 1178 if (!ucode->capable(cp)) 1179 return; 1180 1181 /* 1182 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon 1183 * (Family 6, model 5 and above) and all processors after. 1184 */ 1185 if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) && 1186 ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) { 1187 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >> 1188 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK); 1189 } 1190 1191 ucode->read_rev(uinfop); 1192 1193 #ifdef __xpv 1194 /* 1195 * for i86xpv, the hypervisor will update all the CPUs. We only need 1196 * do do this on one of the CPUs (and there always is a CPU 0). 1197 */ 1198 if (cp->cpu_id != 0) { 1199 return; 1200 } 1201 #endif 1202 1203 /* 1204 * Check to see if we need ucode update 1205 */ 1206 if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) { 1207 new_rev = ucode->load(&ucodefile, uinfop, cp); 1208 1209 if (uinfop->cui_rev != new_rev) 1210 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, 1211 uinfop->cui_rev, new_rev); 1212 } 1213 1214 /* 1215 * If we fail to find a match for any reason, free the file structure 1216 * just in case we have read in a partial file. 1217 * 1218 * Since the scratch memory for holding the microcode for the boot CPU 1219 * came from BOP_ALLOC, we will reset the data structure as if we 1220 * never did the allocation so we don't have to keep track of this 1221 * special chunk of memory. We free the memory used for the rest 1222 * of the CPUs in start_other_cpus(). 1223 */ 1224 if (rc != EM_OK || cp->cpu_id == 0) 1225 ucode->file_reset(&ucodefile, cp->cpu_id); 1226 } 1227 1228 /* 1229 * Returns microcode revision from the machcpu structure. 1230 */ 1231 ucode_errno_t 1232 ucode_get_rev(uint32_t *revp) 1233 { 1234 int i; 1235 1236 ASSERT(ucode); 1237 ASSERT(revp); 1238 1239 if (!ucode->capable(CPU)) 1240 return (EM_NOTSUP); 1241 1242 mutex_enter(&cpu_lock); 1243 for (i = 0; i < max_ncpus; i++) { 1244 cpu_t *cpu; 1245 1246 if ((cpu = cpu_get(i)) == NULL) 1247 continue; 1248 1249 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev; 1250 } 1251 mutex_exit(&cpu_lock); 1252 1253 return (EM_OK); 1254 } 1255