1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/asm_linkage.h> 30 #include <sys/bootconf.h> 31 #include <sys/cpuvar.h> 32 #include <sys/cmn_err.h> 33 #include <sys/controlregs.h> 34 #include <sys/debug.h> 35 #include <sys/kobj.h> 36 #include <sys/kobj_impl.h> 37 #include <sys/machsystm.h> 38 #include <sys/param.h> 39 #include <sys/machparam.h> 40 #include <sys/promif.h> 41 #include <sys/sysmacros.h> 42 #include <sys/systm.h> 43 #include <sys/types.h> 44 #include <sys/thread.h> 45 #include <sys/ucode.h> 46 #include <sys/x86_archext.h> 47 #include <sys/x_call.h> 48 49 /* 50 * Microcode specific information per core 51 */ 52 struct cpu_ucode_info { 53 uint32_t cui_platid; /* platform id */ 54 uint32_t cui_rev; /* microcode revision */ 55 }; 56 57 /* 58 * Data structure used for xcall 59 */ 60 struct ucode_update_struct { 61 uint32_t sig; /* signature */ 62 struct cpu_ucode_info info; /* ucode info */ 63 uint32_t expected_rev; 64 uint32_t new_rev; 65 uint8_t *ucodep; /* pointer to ucode body */ 66 }; 67 68 /* 69 * mcpu_ucode_info for the boot CPU. Statically allocated. 70 */ 71 static struct cpu_ucode_info cpu_ucode_info0; 72 73 static ucode_file_t ucodefile = { 0 }; 74 75 static int ucode_capable(cpu_t *); 76 static void ucode_file_reset(ucode_file_t *, processorid_t); 77 static ucode_errno_t ucode_match(int, struct cpu_ucode_info *, 78 ucode_header_t *, ucode_ext_table_t *); 79 static ucode_errno_t ucode_locate(cpu_t *, struct cpu_ucode_info *, 80 ucode_file_t *); 81 static void ucode_update_intel(uint8_t *, struct cpu_ucode_info *); 82 static void ucode_read_rev(struct cpu_ucode_info *); 83 84 static const char ucode_failure_fmt[] = 85 "cpu%d: failed to update microcode code from version 0x%x to 0x%x\n"; 86 static const char ucode_success_fmt[] = 87 "?cpu%d: microcode code has been updated from version 0x%x to 0x%x\n"; 88 89 /* 90 * Force flag. If set, the first microcode binary that matches 91 * signature and platform id will be used for microcode update, 92 * regardless of version. Should only be used for debugging. 93 */ 94 int ucode_force_update = 0; 95 96 /* 97 * Allocate space for mcpu_ucode_info in the machcpu structure 98 * for all non-boot CPUs. 99 */ 100 void 101 ucode_alloc_space(cpu_t *cp) 102 { 103 ASSERT(cp->cpu_id != 0); 104 cp->cpu_m.mcpu_ucode_info = 105 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP); 106 } 107 108 void 109 ucode_free_space(cpu_t *cp) 110 { 111 ASSERT(cp->cpu_id != 0); 112 kmem_free(cp->cpu_m.mcpu_ucode_info, 113 sizeof (*cp->cpu_m.mcpu_ucode_info)); 114 } 115 116 /* 117 * Called when we are done with microcode update on all processors to free up 118 * space allocated for the microcode file. 119 */ 120 void 121 ucode_free() 122 { 123 ucode_file_reset(&ucodefile, -1); 124 } 125 126 /* 127 * Check whether or not a processor is capable of microcode operations 128 * Returns 1 if it is capable, 0 if not. 129 */ 130 static int 131 ucode_capable(cpu_t *cp) 132 { 133 /* 134 * At this point we only support microcode update for Intel 135 * processors family 6 and above. 136 * 137 * We also assume that we don't support a mix of Intel and 138 * AMD processors in the same box. 139 */ 140 if (cpuid_getvendor(cp) != X86_VENDOR_Intel || 141 cpuid_getfamily(cp) < 6) 142 return (0); 143 else 144 return (1); 145 } 146 147 /* 148 * Called when it is no longer necessary to keep the microcode around, 149 * or when the cached microcode doesn't match the CPU being processed. 150 */ 151 static void 152 ucode_file_reset(ucode_file_t *ucodefp, processorid_t id) 153 { 154 int total_size, body_size; 155 156 if (ucodefp == NULL) 157 return; 158 159 total_size = UCODE_TOTAL_SIZE(ucodefp->uf_header.uh_total_size); 160 body_size = UCODE_BODY_SIZE(ucodefp->uf_header.uh_body_size); 161 if (ucodefp->uf_body) { 162 /* 163 * Space for the boot CPU is allocated with BOP_ALLOC() 164 * and does not require a free. 165 */ 166 if (id != 0) 167 kmem_free(ucodefp->uf_body, body_size); 168 ucodefp->uf_body = NULL; 169 } 170 171 if (ucodefp->uf_ext_table) { 172 int size = total_size - body_size - UCODE_HEADER_SIZE; 173 /* 174 * Space for the boot CPU is allocated with BOP_ALLOC() 175 * and does not require a free. 176 */ 177 if (id != 0) 178 kmem_free(ucodefp->uf_ext_table, size); 179 ucodefp->uf_ext_table = NULL; 180 } 181 182 bzero(&ucodefp->uf_header, UCODE_HEADER_SIZE); 183 } 184 185 /* 186 * Populate the ucode file structure from microcode file corresponding to 187 * this CPU, if exists. 188 * 189 * Return EM_OK on success, corresponding error code on failure. 190 */ 191 static ucode_errno_t 192 ucode_locate(cpu_t *cp, struct cpu_ucode_info *uinfop, ucode_file_t *ucodefp) 193 { 194 char name[MAXPATHLEN]; 195 intptr_t fd; 196 int count; 197 int header_size = UCODE_HEADER_SIZE; 198 int cpi_sig = cpuid_getsig(cp); 199 ucode_errno_t rc = EM_OK; 200 201 /* 202 * If the microcode matches the CPU we are processing, use it. 203 */ 204 if (ucode_match(cpi_sig, uinfop, &ucodefp->uf_header, 205 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) { 206 return (EM_OK); 207 } 208 209 /* 210 * Look for microcode file with the right name. 211 */ 212 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X", 213 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig, 214 uinfop->cui_platid); 215 if ((fd = kobj_open(name)) == -1) { 216 return (EM_OPENFILE); 217 } 218 219 /* 220 * We found a microcode file for the CPU we are processing, 221 * reset the microcode data structure and read in the new 222 * file. 223 */ 224 ucode_file_reset(ucodefp, cp->cpu_id); 225 226 count = kobj_read(fd, (char *)&ucodefp->uf_header, header_size, 0); 227 228 switch (count) { 229 case UCODE_HEADER_SIZE: { 230 231 ucode_header_t *uhp = &ucodefp->uf_header; 232 uint32_t offset = header_size; 233 int total_size, body_size, ext_size; 234 uint32_t sum = 0; 235 236 /* 237 * Make sure that the header contains valid fields. 238 */ 239 if ((rc = ucode_header_validate(uhp)) == EM_OK) { 240 total_size = UCODE_TOTAL_SIZE(uhp->uh_total_size); 241 body_size = UCODE_BODY_SIZE(uhp->uh_body_size); 242 if (cp->cpu_id != 0) { 243 if ((ucodefp->uf_body = kmem_zalloc(body_size, 244 KM_NOSLEEP)) == NULL) { 245 rc = EM_NOMEM; 246 break; 247 } 248 } else { 249 /* 250 * BOP_ALLOC() failure results in panic so we 251 * don't have to check for NULL return. 252 */ 253 ucodefp->uf_body = 254 (uint8_t *)BOP_ALLOC(bootops, 255 NULL, body_size, MMU_PAGESIZE); 256 } 257 258 if (kobj_read(fd, (char *)ucodefp->uf_body, 259 body_size, offset) != body_size) 260 rc = EM_FILESIZE; 261 } 262 263 if (rc) 264 break; 265 266 sum = ucode_checksum(0, header_size, 267 (uint8_t *)&ucodefp->uf_header); 268 if (ucode_checksum(sum, body_size, ucodefp->uf_body)) { 269 rc = EM_CHECKSUM; 270 break; 271 } 272 273 /* 274 * Check to see if there is extended signature table. 275 */ 276 offset = body_size + header_size; 277 ext_size = total_size - offset; 278 279 if (ext_size <= 0) 280 break; 281 282 if (cp->cpu_id != 0) { 283 if ((ucodefp->uf_ext_table = kmem_zalloc(ext_size, 284 KM_NOSLEEP)) == NULL) { 285 rc = EM_NOMEM; 286 break; 287 } 288 } else { 289 /* 290 * BOP_ALLOC() failure results in panic so we 291 * don't have to check for NULL return. 292 */ 293 ucodefp->uf_ext_table = 294 (ucode_ext_table_t *)BOP_ALLOC(bootops, NULL, 295 ext_size, MMU_PAGESIZE); 296 } 297 298 if (kobj_read(fd, (char *)ucodefp->uf_ext_table, 299 ext_size, offset) != ext_size) { 300 rc = EM_FILESIZE; 301 } else if (ucode_checksum(0, ext_size, 302 (uint8_t *)(ucodefp->uf_ext_table))) { 303 rc = EM_CHECKSUM; 304 } else { 305 int i; 306 307 ext_size -= UCODE_EXT_TABLE_SIZE; 308 for (i = 0; i < ucodefp->uf_ext_table->uet_count; 309 i++) { 310 if (ucode_checksum(0, UCODE_EXT_SIG_SIZE, 311 (uint8_t *)(&(ucodefp->uf_ext_table-> 312 uet_ext_sig[i])))) { 313 rc = EM_CHECKSUM; 314 break; 315 } 316 } 317 } 318 break; 319 } 320 321 default: 322 rc = EM_FILESIZE; 323 break; 324 } 325 326 kobj_close(fd); 327 328 if (rc != EM_OK) 329 return (rc); 330 331 rc = ucode_match(cpi_sig, uinfop, &ucodefp->uf_header, 332 ucodefp->uf_ext_table); 333 334 return (rc); 335 } 336 337 338 /* 339 * Returns 1 if the microcode is for this processor; 0 otherwise. 340 */ 341 static ucode_errno_t 342 ucode_match(int cpi_sig, struct cpu_ucode_info *uinfop, 343 ucode_header_t *uhp, ucode_ext_table_t *uetp) 344 { 345 ASSERT(uhp); 346 347 if (UCODE_MATCH(cpi_sig, uhp->uh_signature, 348 uinfop->cui_platid, uhp->uh_proc_flags)) { 349 350 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update) 351 return (EM_HIGHERREV); 352 353 return (EM_OK); 354 } 355 356 if (uetp != NULL) { 357 int i; 358 359 for (i = 0; i < uetp->uet_count; i++) { 360 ucode_ext_sig_t *uesp; 361 362 uesp = &uetp->uet_ext_sig[i]; 363 364 if (UCODE_MATCH(cpi_sig, uesp->ues_signature, 365 uinfop->cui_platid, uesp->ues_proc_flags)) { 366 367 if (uinfop->cui_rev >= uhp->uh_rev && 368 !ucode_force_update) 369 return (EM_HIGHERREV); 370 371 return (EM_OK); 372 } 373 } 374 } 375 376 return (EM_NOMATCH); 377 } 378 379 /*ARGSUSED*/ 380 static int 381 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3) 382 { 383 struct ucode_update_struct *uusp = (struct ucode_update_struct *)arg1; 384 struct cpu_ucode_info *uinfop = CPU->cpu_m.mcpu_ucode_info; 385 386 ASSERT(uusp->ucodep); 387 388 /* 389 * Check one more time to see if it is really necessary to update 390 * microcode just in case this is a hyperthreaded processor where 391 * the threads share the same microcode. 392 */ 393 if (!ucode_force_update) { 394 ucode_read_rev(uinfop); 395 uusp->new_rev = uinfop->cui_rev; 396 if (uinfop->cui_rev >= uusp->expected_rev) 397 return (0); 398 } 399 400 wrmsr(MSR_INTC_UCODE_WRITE, 401 (uint64_t)(intptr_t)(uusp->ucodep)); 402 ucode_read_rev(uinfop); 403 uusp->new_rev = uinfop->cui_rev; 404 405 return (0); 406 } 407 408 409 static void 410 ucode_update_intel(uint8_t *ucode_body, struct cpu_ucode_info *uinfop) 411 { 412 kpreempt_disable(); 413 wrmsr(MSR_INTC_UCODE_WRITE, (uint64_t)(uintptr_t)ucode_body); 414 ucode_read_rev(uinfop); 415 kpreempt_enable(); 416 } 417 418 static void 419 ucode_read_rev(struct cpu_ucode_info *uinfop) 420 { 421 struct cpuid_regs crs; 422 423 /* 424 * The Intel 64 and IA-32 Architecture Software Developer's Manual 425 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then 426 * execute cpuid to guarantee the correct reading of this register. 427 */ 428 wrmsr(MSR_INTC_UCODE_REV, 0); 429 (void) __cpuid_insn(&crs); 430 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT); 431 } 432 433 /* 434 * Entry point to microcode update from the ucode_drv driver. 435 * 436 * Returns EM_OK on success, corresponding error code on failure. 437 */ 438 ucode_errno_t 439 ucode_update(uint8_t *ucodep, int size) 440 { 441 uint32_t header_size = UCODE_HEADER_SIZE; 442 int remaining; 443 int found = 0; 444 processorid_t id; 445 struct ucode_update_struct cached = { 0 }; 446 struct ucode_update_struct *cachedp = NULL; 447 ucode_errno_t rc = EM_OK; 448 ucode_errno_t search_rc = EM_NOMATCH; /* search result */ 449 cpuset_t cpuset; 450 451 ASSERT(ucodep); 452 453 CPUSET_ZERO(cpuset); 454 455 if (!ucode_capable(CPU)) 456 return (EM_NOTSUP); 457 458 mutex_enter(&cpu_lock); 459 460 for (id = 0; id < max_ncpus; id++) { 461 cpu_t *cpu; 462 struct ucode_update_struct uus = { 0 }; 463 struct ucode_update_struct *uusp = &uus; 464 465 /* 466 * If there is no such CPU or it is not xcall ready, skip it. 467 */ 468 if ((cpu = cpu_get(id)) == NULL || 469 !(cpu->cpu_flags & CPU_READY)) 470 continue; 471 472 uusp->sig = cpuid_getsig(cpu); 473 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info, 474 sizeof (uusp->info)); 475 476 /* 477 * If the current CPU has the same signature and platform 478 * id as the previous one we processed, reuse the information. 479 */ 480 if (cachedp && cachedp->sig == cpuid_getsig(cpu) && 481 cachedp->info.cui_platid == uusp->info.cui_platid) { 482 uusp->ucodep = cachedp->ucodep; 483 uusp->expected_rev = cachedp->expected_rev; 484 /* 485 * Intuitively we should check here to see whether the 486 * running microcode rev is >= the expected rev, and 487 * quit if it is. But we choose to proceed with the 488 * xcall regardless of the running version so that 489 * the other threads in an HT processor can update 490 * the cpu_ucode_info structure in machcpu. 491 */ 492 } else { 493 /* 494 * Go through the whole buffer in case there are 495 * multiple versions of matching microcode for this 496 * processor. 497 */ 498 for (remaining = size; remaining > 0; ) { 499 int total_size, body_size, ext_size; 500 uint8_t *curbuf = &ucodep[size - remaining]; 501 ucode_header_t *uhp = (ucode_header_t *)curbuf; 502 ucode_ext_table_t *uetp = NULL; 503 ucode_errno_t tmprc; 504 505 total_size = 506 UCODE_TOTAL_SIZE(uhp->uh_total_size); 507 body_size = UCODE_BODY_SIZE(uhp->uh_body_size); 508 ext_size = total_size - 509 (header_size + body_size); 510 511 if (ext_size > 0) 512 uetp = (ucode_ext_table_t *) 513 &curbuf[header_size + body_size]; 514 515 tmprc = ucode_match(uusp->sig, &uusp->info, 516 uhp, uetp); 517 518 /* 519 * Since we are searching through a big file 520 * containing microcode for pretty much all the 521 * processors, we are bound to get EM_NOMATCH 522 * at one point. However, if we return 523 * EM_NOMATCH to users, it will really confuse 524 * them. Therefore, if we ever find a match of 525 * a lower rev, we will set return code to 526 * EM_HIGHERREV. 527 */ 528 if (tmprc == EM_HIGHERREV) 529 search_rc = EM_HIGHERREV; 530 531 if (tmprc == EM_OK && 532 uusp->expected_rev < uhp->uh_rev) { 533 uusp->ucodep = &curbuf[header_size]; 534 uusp->expected_rev = uhp->uh_rev; 535 bcopy(uusp, &cached, sizeof (cached)); 536 cachedp = &cached; 537 found = 1; 538 } 539 540 remaining -= total_size; 541 } 542 } 543 544 /* Nothing to do */ 545 if (uusp->ucodep == NULL) 546 continue; 547 548 CPUSET_ADD(cpuset, id); 549 kpreempt_disable(); 550 xc_sync((xc_arg_t)uusp, 0, 0, X_CALL_HIPRI, cpuset, 551 ucode_write); 552 kpreempt_enable(); 553 CPUSET_DEL(cpuset, id); 554 555 if (uusp->expected_rev == uusp->new_rev) { 556 cmn_err(CE_CONT, ucode_success_fmt, 557 id, uusp->info.cui_rev, uusp->expected_rev); 558 } else { 559 cmn_err(CE_WARN, ucode_failure_fmt, 560 id, uusp->info.cui_rev, uusp->expected_rev); 561 rc = EM_UPDATE; 562 } 563 } 564 565 mutex_exit(&cpu_lock); 566 567 if (!found) 568 rc = search_rc; 569 570 return (rc); 571 } 572 573 /* 574 * Initialize mcpu_ucode_info, and perform microcode update if necessary. 575 * This is the entry point from boot path where pointer to CPU structure 576 * is available. 577 * 578 * cpuid_info must be initialized before ucode_check can be called. 579 */ 580 void 581 ucode_check(cpu_t *cp) 582 { 583 #ifdef __xpv 584 { 585 This needs to be ported. Only do ucode update from dom0. In 586 addition figure out how to bind to physical CPUs when doing 587 it in dom0. 588 } 589 #endif /* __xpv */ 590 591 struct cpu_ucode_info *uinfop; 592 ucode_errno_t rc = EM_OK; 593 594 ASSERT(cp); 595 if (cp->cpu_id == 0) 596 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0; 597 598 uinfop = cp->cpu_m.mcpu_ucode_info; 599 ASSERT(uinfop); 600 601 if (!ucode_capable(cp)) 602 return; 603 604 /* 605 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon 606 * (Family 6, model 5 and above) and all processors after. 607 */ 608 if ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6)) { 609 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >> 610 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK); 611 } 612 613 ucode_read_rev(uinfop); 614 615 /* 616 * Check to see if we need ucode update 617 */ 618 if ((rc = ucode_locate(cp, uinfop, &ucodefile)) == EM_OK) { 619 ucode_update_intel(ucodefile.uf_body, uinfop); 620 621 if (uinfop->cui_rev != ucodefile.uf_header.uh_rev) 622 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id, 623 uinfop->cui_rev, ucodefile.uf_header.uh_rev); 624 } 625 626 /* 627 * If we fail to find a match for any reason, free the file structure 628 * just in case we have read in a partial file. 629 * 630 * Since the scratch memory for holding the microcode for the boot CPU 631 * came from BOP_ALLOC, we will reset the data structure as if we 632 * never did the allocation so we don't have to keep track of this 633 * special chunk of memory. We free the memory used for the rest 634 * of the CPUs in start_other_cpus(). 635 */ 636 if (rc != EM_OK || cp->cpu_id == 0) 637 ucode_file_reset(&ucodefile, cp->cpu_id); 638 } 639 640 /* 641 * Returns microcode revision from the machcpu structure. 642 */ 643 ucode_errno_t 644 ucode_get_rev(uint32_t *revp) 645 { 646 int i; 647 648 ASSERT(revp); 649 650 if (!ucode_capable(CPU)) 651 return (EM_NOTSUP); 652 653 mutex_enter(&cpu_lock); 654 for (i = 0; i < max_ncpus; i++) { 655 cpu_t *cpu; 656 657 if ((cpu = cpu_get(i)) == NULL) 658 continue; 659 660 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev; 661 } 662 mutex_exit(&cpu_lock); 663 664 return (EM_OK); 665 } 666