1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD CPU Microcode Update Driver for Linux 4 * 5 * This driver allows to upgrade microcode on F10h AMD 6 * CPUs and later. 7 * 8 * Copyright (C) 2008-2011 Advanced Micro Devices Inc. 9 * 2013-2018 Borislav Petkov <bp@alien8.de> 10 * 11 * Author: Peter Oruba <peter.oruba@amd.com> 12 * 13 * Based on work by: 14 * Tigran Aivazian <aivazian.tigran@gmail.com> 15 * 16 * early loader: 17 * Copyright (C) 2013 Advanced Micro Devices, Inc. 18 * 19 * Author: Jacob Shin <jacob.shin@amd.com> 20 * Fixes: Borislav Petkov <bp@suse.de> 21 */ 22 #define pr_fmt(fmt) "microcode: " fmt 23 24 #include <linux/earlycpio.h> 25 #include <linux/firmware.h> 26 #include <linux/uaccess.h> 27 #include <linux/vmalloc.h> 28 #include <linux/initrd.h> 29 #include <linux/kernel.h> 30 #include <linux/pci.h> 31 32 #include <asm/microcode.h> 33 #include <asm/processor.h> 34 #include <asm/setup.h> 35 #include <asm/cpu.h> 36 #include <asm/msr.h> 37 38 #include "internal.h" 39 40 struct ucode_patch { 41 struct list_head plist; 42 void *data; 43 unsigned int size; 44 u32 patch_id; 45 u16 equiv_cpu; 46 }; 47 48 static LIST_HEAD(microcode_cache); 49 50 #define UCODE_MAGIC 0x00414d44 51 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 52 #define UCODE_UCODE_TYPE 0x00000001 53 54 #define SECTION_HDR_SIZE 8 55 #define CONTAINER_HDR_SZ 12 56 57 struct equiv_cpu_entry { 58 u32 installed_cpu; 59 u32 fixed_errata_mask; 60 u32 fixed_errata_compare; 61 u16 equiv_cpu; 62 u16 res; 63 } __packed; 64 65 struct microcode_header_amd { 66 u32 data_code; 67 u32 patch_id; 68 u16 mc_patch_data_id; 69 u8 mc_patch_data_len; 70 u8 init_flag; 71 u32 mc_patch_data_checksum; 72 u32 nb_dev_id; 73 u32 sb_dev_id; 74 u16 processor_rev_id; 75 u8 nb_rev_id; 76 u8 sb_rev_id; 77 u8 bios_api_rev; 78 u8 reserved1[3]; 79 u32 match_reg[8]; 80 } __packed; 81 82 struct microcode_amd { 83 struct microcode_header_amd hdr; 84 unsigned int mpb[]; 85 }; 86 87 #define PATCH_MAX_SIZE (3 * PAGE_SIZE) 88 89 static struct equiv_cpu_table { 90 unsigned int num_entries; 91 struct equiv_cpu_entry *entry; 92 } equiv_table; 93 94 /* 95 * This points to the current valid container of microcode patches which we will 96 * save from the initrd/builtin before jettisoning its contents. @mc is the 97 * microcode patch we found to match. 98 */ 99 struct cont_desc { 100 struct microcode_amd *mc; 101 u32 cpuid_1_eax; 102 u32 psize; 103 u8 *data; 104 size_t size; 105 }; 106 107 /* 108 * Microcode patch container file is prepended to the initrd in cpio 109 * format. See Documentation/arch/x86/microcode.rst 110 */ 111 static const char 112 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; 113 114 static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) 115 { 116 unsigned int i; 117 118 if (!et || !et->num_entries) 119 return 0; 120 121 for (i = 0; i < et->num_entries; i++) { 122 struct equiv_cpu_entry *e = &et->entry[i]; 123 124 if (sig == e->installed_cpu) 125 return e->equiv_cpu; 126 } 127 return 0; 128 } 129 130 /* 131 * Check whether there is a valid microcode container file at the beginning 132 * of @buf of size @buf_size. 133 */ 134 static bool verify_container(const u8 *buf, size_t buf_size) 135 { 136 u32 cont_magic; 137 138 if (buf_size <= CONTAINER_HDR_SZ) { 139 pr_debug("Truncated microcode container header.\n"); 140 return false; 141 } 142 143 cont_magic = *(const u32 *)buf; 144 if (cont_magic != UCODE_MAGIC) { 145 pr_debug("Invalid magic value (0x%08x).\n", cont_magic); 146 return false; 147 } 148 149 return true; 150 } 151 152 /* 153 * Check whether there is a valid, non-truncated CPU equivalence table at the 154 * beginning of @buf of size @buf_size. 155 */ 156 static bool verify_equivalence_table(const u8 *buf, size_t buf_size) 157 { 158 const u32 *hdr = (const u32 *)buf; 159 u32 cont_type, equiv_tbl_len; 160 161 if (!verify_container(buf, buf_size)) 162 return false; 163 164 cont_type = hdr[1]; 165 if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { 166 pr_debug("Wrong microcode container equivalence table type: %u.\n", 167 cont_type); 168 return false; 169 } 170 171 buf_size -= CONTAINER_HDR_SZ; 172 173 equiv_tbl_len = hdr[2]; 174 if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || 175 buf_size < equiv_tbl_len) { 176 pr_debug("Truncated equivalence table.\n"); 177 return false; 178 } 179 180 return true; 181 } 182 183 /* 184 * Check whether there is a valid, non-truncated microcode patch section at the 185 * beginning of @buf of size @buf_size. 186 * 187 * On success, @sh_psize returns the patch size according to the section header, 188 * to the caller. 189 */ 190 static bool 191 __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) 192 { 193 u32 p_type, p_size; 194 const u32 *hdr; 195 196 if (buf_size < SECTION_HDR_SIZE) { 197 pr_debug("Truncated patch section.\n"); 198 return false; 199 } 200 201 hdr = (const u32 *)buf; 202 p_type = hdr[0]; 203 p_size = hdr[1]; 204 205 if (p_type != UCODE_UCODE_TYPE) { 206 pr_debug("Invalid type field (0x%x) in container file section header.\n", 207 p_type); 208 return false; 209 } 210 211 if (p_size < sizeof(struct microcode_header_amd)) { 212 pr_debug("Patch of size %u too short.\n", p_size); 213 return false; 214 } 215 216 *sh_psize = p_size; 217 218 return true; 219 } 220 221 /* 222 * Check whether the passed remaining file @buf_size is large enough to contain 223 * a patch of the indicated @sh_psize (and also whether this size does not 224 * exceed the per-family maximum). @sh_psize is the size read from the section 225 * header. 226 */ 227 static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size) 228 { 229 u32 max_size; 230 231 if (family >= 0x15) 232 return min_t(u32, sh_psize, buf_size); 233 234 #define F1XH_MPB_MAX_SIZE 2048 235 #define F14H_MPB_MAX_SIZE 1824 236 237 switch (family) { 238 case 0x10 ... 0x12: 239 max_size = F1XH_MPB_MAX_SIZE; 240 break; 241 case 0x14: 242 max_size = F14H_MPB_MAX_SIZE; 243 break; 244 default: 245 WARN(1, "%s: WTF family: 0x%x\n", __func__, family); 246 return 0; 247 } 248 249 if (sh_psize > min_t(u32, buf_size, max_size)) 250 return 0; 251 252 return sh_psize; 253 } 254 255 /* 256 * Verify the patch in @buf. 257 * 258 * Returns: 259 * negative: on error 260 * positive: patch is not for this family, skip it 261 * 0: success 262 */ 263 static int 264 verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) 265 { 266 struct microcode_header_amd *mc_hdr; 267 unsigned int ret; 268 u32 sh_psize; 269 u16 proc_id; 270 u8 patch_fam; 271 272 if (!__verify_patch_section(buf, buf_size, &sh_psize)) 273 return -1; 274 275 /* 276 * The section header length is not included in this indicated size 277 * but is present in the leftover file length so we need to subtract 278 * it before passing this value to the function below. 279 */ 280 buf_size -= SECTION_HDR_SIZE; 281 282 /* 283 * Check if the remaining buffer is big enough to contain a patch of 284 * size sh_psize, as the section claims. 285 */ 286 if (buf_size < sh_psize) { 287 pr_debug("Patch of size %u truncated.\n", sh_psize); 288 return -1; 289 } 290 291 ret = __verify_patch_size(family, sh_psize, buf_size); 292 if (!ret) { 293 pr_debug("Per-family patch size mismatch.\n"); 294 return -1; 295 } 296 297 *patch_size = sh_psize; 298 299 mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); 300 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { 301 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); 302 return -1; 303 } 304 305 proc_id = mc_hdr->processor_rev_id; 306 patch_fam = 0xf + (proc_id >> 12); 307 if (patch_fam != family) 308 return 1; 309 310 return 0; 311 } 312 313 /* 314 * This scans the ucode blob for the proper container as we can have multiple 315 * containers glued together. Returns the equivalence ID from the equivalence 316 * table or 0 if none found. 317 * Returns the amount of bytes consumed while scanning. @desc contains all the 318 * data we're going to use in later stages of the application. 319 */ 320 static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) 321 { 322 struct equiv_cpu_table table; 323 size_t orig_size = size; 324 u32 *hdr = (u32 *)ucode; 325 u16 eq_id; 326 u8 *buf; 327 328 if (!verify_equivalence_table(ucode, size)) 329 return 0; 330 331 buf = ucode; 332 333 table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); 334 table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry); 335 336 /* 337 * Find the equivalence ID of our CPU in this table. Even if this table 338 * doesn't contain a patch for the CPU, scan through the whole container 339 * so that it can be skipped in case there are other containers appended. 340 */ 341 eq_id = find_equiv_id(&table, desc->cpuid_1_eax); 342 343 buf += hdr[2] + CONTAINER_HDR_SZ; 344 size -= hdr[2] + CONTAINER_HDR_SZ; 345 346 /* 347 * Scan through the rest of the container to find where it ends. We do 348 * some basic sanity-checking too. 349 */ 350 while (size > 0) { 351 struct microcode_amd *mc; 352 u32 patch_size; 353 int ret; 354 355 ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size); 356 if (ret < 0) { 357 /* 358 * Patch verification failed, skip to the next container, if 359 * there is one. Before exit, check whether that container has 360 * found a patch already. If so, use it. 361 */ 362 goto out; 363 } else if (ret > 0) { 364 goto skip; 365 } 366 367 mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE); 368 if (eq_id == mc->hdr.processor_rev_id) { 369 desc->psize = patch_size; 370 desc->mc = mc; 371 } 372 373 skip: 374 /* Skip patch section header too: */ 375 buf += patch_size + SECTION_HDR_SIZE; 376 size -= patch_size + SECTION_HDR_SIZE; 377 } 378 379 out: 380 /* 381 * If we have found a patch (desc->mc), it means we're looking at the 382 * container which has a patch for this CPU so return 0 to mean, @ucode 383 * already points to the proper container. Otherwise, we return the size 384 * we scanned so that we can advance to the next container in the 385 * buffer. 386 */ 387 if (desc->mc) { 388 desc->data = ucode; 389 desc->size = orig_size - size; 390 391 return 0; 392 } 393 394 return orig_size - size; 395 } 396 397 /* 398 * Scan the ucode blob for the proper container as we can have multiple 399 * containers glued together. 400 */ 401 static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) 402 { 403 while (size) { 404 size_t s = parse_container(ucode, size, desc); 405 if (!s) 406 return; 407 408 /* catch wraparound */ 409 if (size >= s) { 410 ucode += s; 411 size -= s; 412 } else { 413 return; 414 } 415 } 416 } 417 418 static int __apply_microcode_amd(struct microcode_amd *mc) 419 { 420 u32 rev, dummy; 421 422 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code); 423 424 /* verify patch application was successful */ 425 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 426 if (rev != mc->hdr.patch_id) 427 return -1; 428 429 return 0; 430 } 431 432 /* 433 * Early load occurs before we can vmalloc(). So we look for the microcode 434 * patch container file in initrd, traverse equivalent cpu table, look for a 435 * matching microcode patch, and update, all in initrd memory in place. 436 * When vmalloc() is available for use later -- on 64-bit during first AP load, 437 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call 438 * load_microcode_amd() to save equivalent cpu table and microcode patches in 439 * kernel heap memory. 440 * 441 * Returns true if container found (sets @desc), false otherwise. 442 */ 443 static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size) 444 { 445 struct cont_desc desc = { 0 }; 446 struct microcode_amd *mc; 447 bool ret = false; 448 449 desc.cpuid_1_eax = cpuid_1_eax; 450 451 scan_containers(ucode, size, &desc); 452 453 mc = desc.mc; 454 if (!mc) 455 return ret; 456 457 /* 458 * Allow application of the same revision to pick up SMT-specific 459 * changes even if the revision of the other SMT thread is already 460 * up-to-date. 461 */ 462 if (old_rev > mc->hdr.patch_id) 463 return ret; 464 465 return !__apply_microcode_amd(mc); 466 } 467 468 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) 469 { 470 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 471 struct firmware fw; 472 473 if (IS_ENABLED(CONFIG_X86_32)) 474 return false; 475 476 if (family >= 0x15) 477 snprintf(fw_name, sizeof(fw_name), 478 "amd-ucode/microcode_amd_fam%02hhxh.bin", family); 479 480 if (firmware_request_builtin(&fw, fw_name)) { 481 cp->size = fw.size; 482 cp->data = (void *)fw.data; 483 return true; 484 } 485 486 return false; 487 } 488 489 static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) 490 { 491 struct cpio_data cp; 492 493 if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) 494 cp = find_microcode_in_initrd(ucode_path); 495 496 *ret = cp; 497 } 498 499 void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) 500 { 501 struct cpio_data cp = { }; 502 u32 dummy; 503 504 native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); 505 506 /* Needed in load_microcode_amd() */ 507 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; 508 509 find_blobs_in_containers(cpuid_1_eax, &cp); 510 if (!(cp.data && cp.size)) 511 return; 512 513 if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size)) 514 native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); 515 } 516 517 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); 518 519 static int __init save_microcode_in_initrd(void) 520 { 521 unsigned int cpuid_1_eax = native_cpuid_eax(1); 522 struct cpuinfo_x86 *c = &boot_cpu_data; 523 struct cont_desc desc = { 0 }; 524 enum ucode_state ret; 525 struct cpio_data cp; 526 527 if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) 528 return 0; 529 530 find_blobs_in_containers(cpuid_1_eax, &cp); 531 if (!(cp.data && cp.size)) 532 return -EINVAL; 533 534 desc.cpuid_1_eax = cpuid_1_eax; 535 536 scan_containers(cp.data, cp.size, &desc); 537 if (!desc.mc) 538 return -EINVAL; 539 540 ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); 541 if (ret > UCODE_UPDATED) 542 return -EINVAL; 543 544 return 0; 545 } 546 early_initcall(save_microcode_in_initrd); 547 548 /* 549 * a small, trivial cache of per-family ucode patches 550 */ 551 static struct ucode_patch *cache_find_patch(u16 equiv_cpu) 552 { 553 struct ucode_patch *p; 554 555 list_for_each_entry(p, µcode_cache, plist) 556 if (p->equiv_cpu == equiv_cpu) 557 return p; 558 return NULL; 559 } 560 561 static void update_cache(struct ucode_patch *new_patch) 562 { 563 struct ucode_patch *p; 564 565 list_for_each_entry(p, µcode_cache, plist) { 566 if (p->equiv_cpu == new_patch->equiv_cpu) { 567 if (p->patch_id >= new_patch->patch_id) { 568 /* we already have the latest patch */ 569 kfree(new_patch->data); 570 kfree(new_patch); 571 return; 572 } 573 574 list_replace(&p->plist, &new_patch->plist); 575 kfree(p->data); 576 kfree(p); 577 return; 578 } 579 } 580 /* no patch found, add it */ 581 list_add_tail(&new_patch->plist, µcode_cache); 582 } 583 584 static void free_cache(void) 585 { 586 struct ucode_patch *p, *tmp; 587 588 list_for_each_entry_safe(p, tmp, µcode_cache, plist) { 589 __list_del(p->plist.prev, p->plist.next); 590 kfree(p->data); 591 kfree(p); 592 } 593 } 594 595 static struct ucode_patch *find_patch(unsigned int cpu) 596 { 597 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 598 u16 equiv_id; 599 600 equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); 601 if (!equiv_id) 602 return NULL; 603 604 return cache_find_patch(equiv_id); 605 } 606 607 void reload_ucode_amd(unsigned int cpu) 608 { 609 u32 rev, dummy __always_unused; 610 struct microcode_amd *mc; 611 struct ucode_patch *p; 612 613 p = find_patch(cpu); 614 if (!p) 615 return; 616 617 mc = p->data; 618 619 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 620 621 if (rev < mc->hdr.patch_id) { 622 if (!__apply_microcode_amd(mc)) 623 pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); 624 } 625 } 626 627 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) 628 { 629 struct cpuinfo_x86 *c = &cpu_data(cpu); 630 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 631 struct ucode_patch *p; 632 633 csig->sig = cpuid_eax(0x00000001); 634 csig->rev = c->microcode; 635 636 /* 637 * a patch could have been loaded early, set uci->mc so that 638 * mc_bp_resume() can call apply_microcode() 639 */ 640 p = find_patch(cpu); 641 if (p && (p->patch_id == csig->rev)) 642 uci->mc = p->data; 643 644 return 0; 645 } 646 647 static enum ucode_state apply_microcode_amd(int cpu) 648 { 649 struct cpuinfo_x86 *c = &cpu_data(cpu); 650 struct microcode_amd *mc_amd; 651 struct ucode_cpu_info *uci; 652 struct ucode_patch *p; 653 enum ucode_state ret; 654 u32 rev, dummy __always_unused; 655 656 BUG_ON(raw_smp_processor_id() != cpu); 657 658 uci = ucode_cpu_info + cpu; 659 660 p = find_patch(cpu); 661 if (!p) 662 return UCODE_NFOUND; 663 664 mc_amd = p->data; 665 uci->mc = p->data; 666 667 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 668 669 /* need to apply patch? */ 670 if (rev > mc_amd->hdr.patch_id) { 671 ret = UCODE_OK; 672 goto out; 673 } 674 675 if (__apply_microcode_amd(mc_amd)) { 676 pr_err("CPU%d: update failed for patch_level=0x%08x\n", 677 cpu, mc_amd->hdr.patch_id); 678 return UCODE_ERROR; 679 } 680 681 rev = mc_amd->hdr.patch_id; 682 ret = UCODE_UPDATED; 683 684 out: 685 uci->cpu_sig.rev = rev; 686 c->microcode = rev; 687 688 /* Update boot_cpu_data's revision too, if we're on the BSP: */ 689 if (c->cpu_index == boot_cpu_data.cpu_index) 690 boot_cpu_data.microcode = rev; 691 692 return ret; 693 } 694 695 void load_ucode_amd_ap(unsigned int cpuid_1_eax) 696 { 697 unsigned int cpu = smp_processor_id(); 698 699 ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax; 700 apply_microcode_amd(cpu); 701 } 702 703 static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) 704 { 705 u32 equiv_tbl_len; 706 const u32 *hdr; 707 708 if (!verify_equivalence_table(buf, buf_size)) 709 return 0; 710 711 hdr = (const u32 *)buf; 712 equiv_tbl_len = hdr[2]; 713 714 equiv_table.entry = vmalloc(equiv_tbl_len); 715 if (!equiv_table.entry) { 716 pr_err("failed to allocate equivalent CPU table\n"); 717 return 0; 718 } 719 720 memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len); 721 equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry); 722 723 /* add header length */ 724 return equiv_tbl_len + CONTAINER_HDR_SZ; 725 } 726 727 static void free_equiv_cpu_table(void) 728 { 729 vfree(equiv_table.entry); 730 memset(&equiv_table, 0, sizeof(equiv_table)); 731 } 732 733 static void cleanup(void) 734 { 735 free_equiv_cpu_table(); 736 free_cache(); 737 } 738 739 /* 740 * Return a non-negative value even if some of the checks failed so that 741 * we can skip over the next patch. If we return a negative value, we 742 * signal a grave error like a memory allocation has failed and the 743 * driver cannot continue functioning normally. In such cases, we tear 744 * down everything we've used up so far and exit. 745 */ 746 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, 747 unsigned int *patch_size) 748 { 749 struct microcode_header_amd *mc_hdr; 750 struct ucode_patch *patch; 751 u16 proc_id; 752 int ret; 753 754 ret = verify_patch(family, fw, leftover, patch_size); 755 if (ret) 756 return ret; 757 758 patch = kzalloc(sizeof(*patch), GFP_KERNEL); 759 if (!patch) { 760 pr_err("Patch allocation failure.\n"); 761 return -EINVAL; 762 } 763 764 patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL); 765 if (!patch->data) { 766 pr_err("Patch data allocation failure.\n"); 767 kfree(patch); 768 return -EINVAL; 769 } 770 patch->size = *patch_size; 771 772 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); 773 proc_id = mc_hdr->processor_rev_id; 774 775 INIT_LIST_HEAD(&patch->plist); 776 patch->patch_id = mc_hdr->patch_id; 777 patch->equiv_cpu = proc_id; 778 779 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", 780 __func__, patch->patch_id, proc_id); 781 782 /* ... and add to cache. */ 783 update_cache(patch); 784 785 return 0; 786 } 787 788 /* Scan the blob in @data and add microcode patches to the cache. */ 789 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, 790 size_t size) 791 { 792 u8 *fw = (u8 *)data; 793 size_t offset; 794 795 offset = install_equiv_cpu_table(data, size); 796 if (!offset) 797 return UCODE_ERROR; 798 799 fw += offset; 800 size -= offset; 801 802 if (*(u32 *)fw != UCODE_UCODE_TYPE) { 803 pr_err("invalid type field in container file section header\n"); 804 free_equiv_cpu_table(); 805 return UCODE_ERROR; 806 } 807 808 while (size > 0) { 809 unsigned int crnt_size = 0; 810 int ret; 811 812 ret = verify_and_add_patch(family, fw, size, &crnt_size); 813 if (ret < 0) 814 return UCODE_ERROR; 815 816 fw += crnt_size + SECTION_HDR_SIZE; 817 size -= (crnt_size + SECTION_HDR_SIZE); 818 } 819 820 return UCODE_OK; 821 } 822 823 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) 824 { 825 struct cpuinfo_x86 *c; 826 unsigned int nid, cpu; 827 struct ucode_patch *p; 828 enum ucode_state ret; 829 830 /* free old equiv table */ 831 free_equiv_cpu_table(); 832 833 ret = __load_microcode_amd(family, data, size); 834 if (ret != UCODE_OK) { 835 cleanup(); 836 return ret; 837 } 838 839 for_each_node(nid) { 840 cpu = cpumask_first(cpumask_of_node(nid)); 841 c = &cpu_data(cpu); 842 843 p = find_patch(cpu); 844 if (!p) 845 continue; 846 847 if (c->microcode >= p->patch_id) 848 continue; 849 850 ret = UCODE_NEW; 851 } 852 853 return ret; 854 } 855 856 /* 857 * AMD microcode firmware naming convention, up to family 15h they are in 858 * the legacy file: 859 * 860 * amd-ucode/microcode_amd.bin 861 * 862 * This legacy file is always smaller than 2K in size. 863 * 864 * Beginning with family 15h, they are in family-specific firmware files: 865 * 866 * amd-ucode/microcode_amd_fam15h.bin 867 * amd-ucode/microcode_amd_fam16h.bin 868 * ... 869 * 870 * These might be larger than 2K. 871 */ 872 static enum ucode_state request_microcode_amd(int cpu, struct device *device) 873 { 874 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 875 struct cpuinfo_x86 *c = &cpu_data(cpu); 876 enum ucode_state ret = UCODE_NFOUND; 877 const struct firmware *fw; 878 879 if (force_minrev) 880 return UCODE_NFOUND; 881 882 if (c->x86 >= 0x15) 883 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); 884 885 if (request_firmware_direct(&fw, (const char *)fw_name, device)) { 886 pr_debug("failed to load file %s\n", fw_name); 887 goto out; 888 } 889 890 ret = UCODE_ERROR; 891 if (!verify_container(fw->data, fw->size)) 892 goto fw_release; 893 894 ret = load_microcode_amd(c->x86, fw->data, fw->size); 895 896 fw_release: 897 release_firmware(fw); 898 899 out: 900 return ret; 901 } 902 903 static void microcode_fini_cpu_amd(int cpu) 904 { 905 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 906 907 uci->mc = NULL; 908 } 909 910 static struct microcode_ops microcode_amd_ops = { 911 .request_microcode_fw = request_microcode_amd, 912 .collect_cpu_info = collect_cpu_info_amd, 913 .apply_microcode = apply_microcode_amd, 914 .microcode_fini_cpu = microcode_fini_cpu_amd, 915 .nmi_safe = true, 916 }; 917 918 struct microcode_ops * __init init_amd_microcode(void) 919 { 920 struct cpuinfo_x86 *c = &boot_cpu_data; 921 922 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { 923 pr_warn("AMD CPU family 0x%x not supported\n", c->x86); 924 return NULL; 925 } 926 return µcode_amd_ops; 927 } 928 929 void __exit exit_amd_microcode(void) 930 { 931 cleanup(); 932 } 933