xref: /linux/arch/x86/kernel/cpu/microcode/amd.c (revision 4d872d51bc9d7b899c1f61534e3dbde72613f627)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  AMD CPU Microcode Update Driver for Linux
4  *
5  *  This driver allows to upgrade microcode on F10h AMD
6  *  CPUs and later.
7  *
8  *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9  *	          2013-2018 Borislav Petkov <bp@alien8.de>
10  *
11  *  Author: Peter Oruba <peter.oruba@amd.com>
12  *
13  *  Based on work by:
14  *  Tigran Aivazian <aivazian.tigran@gmail.com>
15  *
16  *  early loader:
17  *  Copyright (C) 2013 Advanced Micro Devices, Inc.
18  *
19  *  Author: Jacob Shin <jacob.shin@amd.com>
20  *  Fixes: Borislav Petkov <bp@suse.de>
21  */
22 #define pr_fmt(fmt) "microcode: " fmt
23 
24 #include <linux/earlycpio.h>
25 #include <linux/firmware.h>
26 #include <linux/bsearch.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/initrd.h>
30 #include <linux/kernel.h>
31 #include <linux/pci.h>
32 
33 #include <crypto/sha2.h>
34 
35 #include <asm/microcode.h>
36 #include <asm/processor.h>
37 #include <asm/cmdline.h>
38 #include <asm/setup.h>
39 #include <asm/cpu.h>
40 #include <asm/msr.h>
41 #include <asm/tlb.h>
42 
43 #include "internal.h"
44 
45 struct ucode_patch {
46 	struct list_head plist;
47 	void *data;
48 	unsigned int size;
49 	u32 patch_id;
50 	u16 equiv_cpu;
51 };
52 
53 static LIST_HEAD(microcode_cache);
54 
55 #define UCODE_MAGIC			0x00414d44
56 #define UCODE_EQUIV_CPU_TABLE_TYPE	0x00000000
57 #define UCODE_UCODE_TYPE		0x00000001
58 
59 #define SECTION_HDR_SIZE		8
60 #define CONTAINER_HDR_SZ		12
61 
62 struct equiv_cpu_entry {
63 	u32	installed_cpu;
64 	u32	fixed_errata_mask;
65 	u32	fixed_errata_compare;
66 	u16	equiv_cpu;
67 	u16	res;
68 } __packed;
69 
70 struct microcode_header_amd {
71 	u32	data_code;
72 	u32	patch_id;
73 	u16	mc_patch_data_id;
74 	u8	mc_patch_data_len;
75 	u8	init_flag;
76 	u32	mc_patch_data_checksum;
77 	u32	nb_dev_id;
78 	u32	sb_dev_id;
79 	u16	processor_rev_id;
80 	u8	nb_rev_id;
81 	u8	sb_rev_id;
82 	u8	bios_api_rev;
83 	u8	reserved1[3];
84 	u32	match_reg[8];
85 } __packed;
86 
87 struct microcode_amd {
88 	struct microcode_header_amd	hdr;
89 	unsigned int			mpb[];
90 };
91 
92 static struct equiv_cpu_table {
93 	unsigned int num_entries;
94 	struct equiv_cpu_entry *entry;
95 } equiv_table;
96 
97 union zen_patch_rev {
98 	struct {
99 		__u32 rev	 : 8,
100 		      stepping	 : 4,
101 		      model	 : 4,
102 		      __reserved : 4,
103 		      ext_model	 : 4,
104 		      ext_fam	 : 8;
105 	};
106 	__u32 ucode_rev;
107 };
108 
109 union cpuid_1_eax {
110 	struct {
111 		__u32 stepping    : 4,
112 		      model	  : 4,
113 		      family	  : 4,
114 		      __reserved0 : 4,
115 		      ext_model   : 4,
116 		      ext_fam     : 8,
117 		      __reserved1 : 4;
118 	};
119 	__u32 full;
120 };
121 
122 /*
123  * This points to the current valid container of microcode patches which we will
124  * save from the initrd/builtin before jettisoning its contents. @mc is the
125  * microcode patch we found to match.
126  */
127 struct cont_desc {
128 	struct microcode_amd *mc;
129 	u32		     psize;
130 	u8		     *data;
131 	size_t		     size;
132 };
133 
134 /*
135  * Microcode patch container file is prepended to the initrd in cpio
136  * format. See Documentation/arch/x86/microcode.rst
137  */
138 static const char
139 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
140 
141 /*
142  * This is CPUID(1).EAX on the BSP. It is used in two ways:
143  *
144  * 1. To ignore the equivalence table on Zen1 and newer.
145  *
146  * 2. To match which patches to load because the patch revision ID
147  *    already contains the f/m/s for which the microcode is destined
148  *    for.
149  */
150 static u32 bsp_cpuid_1_eax __ro_after_init;
151 
152 static bool sha_check = true;
153 
154 struct patch_digest {
155 	u32 patch_id;
156 	u8 sha256[SHA256_DIGEST_SIZE];
157 };
158 
159 #include "amd_shas.c"
160 
cmp_id(const void * key,const void * elem)161 static int cmp_id(const void *key, const void *elem)
162 {
163 	struct patch_digest *pd = (struct patch_digest *)elem;
164 	u32 patch_id = *(u32 *)key;
165 
166 	if (patch_id == pd->patch_id)
167 		return 0;
168 	else if (patch_id < pd->patch_id)
169 		return -1;
170 	else
171 		return 1;
172 }
173 
need_sha_check(u32 cur_rev)174 static bool need_sha_check(u32 cur_rev)
175 {
176 	switch (cur_rev >> 8) {
177 	case 0x80012: return cur_rev <= 0x800126f; break;
178 	case 0x80082: return cur_rev <= 0x800820f; break;
179 	case 0x83010: return cur_rev <= 0x830107c; break;
180 	case 0x86001: return cur_rev <= 0x860010e; break;
181 	case 0x86081: return cur_rev <= 0x8608108; break;
182 	case 0x87010: return cur_rev <= 0x8701034; break;
183 	case 0x8a000: return cur_rev <= 0x8a0000a; break;
184 	case 0xa0010: return cur_rev <= 0xa00107a; break;
185 	case 0xa0011: return cur_rev <= 0xa0011da; break;
186 	case 0xa0012: return cur_rev <= 0xa001243; break;
187 	case 0xa0082: return cur_rev <= 0xa00820e; break;
188 	case 0xa1011: return cur_rev <= 0xa101153; break;
189 	case 0xa1012: return cur_rev <= 0xa10124e; break;
190 	case 0xa1081: return cur_rev <= 0xa108109; break;
191 	case 0xa2010: return cur_rev <= 0xa20102f; break;
192 	case 0xa2012: return cur_rev <= 0xa201212; break;
193 	case 0xa4041: return cur_rev <= 0xa404109; break;
194 	case 0xa5000: return cur_rev <= 0xa500013; break;
195 	case 0xa6012: return cur_rev <= 0xa60120a; break;
196 	case 0xa7041: return cur_rev <= 0xa704109; break;
197 	case 0xa7052: return cur_rev <= 0xa705208; break;
198 	case 0xa7080: return cur_rev <= 0xa708009; break;
199 	case 0xa70c0: return cur_rev <= 0xa70C009; break;
200 	case 0xaa001: return cur_rev <= 0xaa00116; break;
201 	case 0xaa002: return cur_rev <= 0xaa00218; break;
202 	default: break;
203 	}
204 
205 	pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
206 	pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
207 	return true;
208 }
209 
verify_sha256_digest(u32 patch_id,u32 cur_rev,const u8 * data,unsigned int len)210 static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
211 {
212 	struct patch_digest *pd = NULL;
213 	u8 digest[SHA256_DIGEST_SIZE];
214 	struct sha256_state s;
215 	int i;
216 
217 	if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
218 	    x86_family(bsp_cpuid_1_eax) > 0x19)
219 		return true;
220 
221 	if (!need_sha_check(cur_rev))
222 		return true;
223 
224 	if (!sha_check)
225 		return true;
226 
227 	pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
228 	if (!pd) {
229 		pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
230 		return false;
231 	}
232 
233 	sha256_init(&s);
234 	sha256_update(&s, data, len);
235 	sha256_final(&s, digest);
236 
237 	if (memcmp(digest, pd->sha256, sizeof(digest))) {
238 		pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
239 
240 		for (i = 0; i < SHA256_DIGEST_SIZE; i++)
241 			pr_cont("0x%x ", digest[i]);
242 		pr_info("\n");
243 
244 		return false;
245 	}
246 
247 	return true;
248 }
249 
get_patch_level(void)250 static u32 get_patch_level(void)
251 {
252 	u32 rev, dummy __always_unused;
253 
254 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
255 
256 	return rev;
257 }
258 
ucode_rev_to_cpuid(unsigned int val)259 static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
260 {
261 	union zen_patch_rev p;
262 	union cpuid_1_eax c;
263 
264 	p.ucode_rev = val;
265 	c.full = 0;
266 
267 	c.stepping  = p.stepping;
268 	c.model     = p.model;
269 	c.ext_model = p.ext_model;
270 	c.family    = 0xf;
271 	c.ext_fam   = p.ext_fam;
272 
273 	return c;
274 }
275 
find_equiv_id(struct equiv_cpu_table * et,u32 sig)276 static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
277 {
278 	unsigned int i;
279 
280 	/* Zen and newer do not need an equivalence table. */
281 	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
282 		return 0;
283 
284 	if (!et || !et->num_entries)
285 		return 0;
286 
287 	for (i = 0; i < et->num_entries; i++) {
288 		struct equiv_cpu_entry *e = &et->entry[i];
289 
290 		if (sig == e->installed_cpu)
291 			return e->equiv_cpu;
292 	}
293 	return 0;
294 }
295 
296 /*
297  * Check whether there is a valid microcode container file at the beginning
298  * of @buf of size @buf_size.
299  */
verify_container(const u8 * buf,size_t buf_size)300 static bool verify_container(const u8 *buf, size_t buf_size)
301 {
302 	u32 cont_magic;
303 
304 	if (buf_size <= CONTAINER_HDR_SZ) {
305 		pr_debug("Truncated microcode container header.\n");
306 		return false;
307 	}
308 
309 	cont_magic = *(const u32 *)buf;
310 	if (cont_magic != UCODE_MAGIC) {
311 		pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
312 		return false;
313 	}
314 
315 	return true;
316 }
317 
318 /*
319  * Check whether there is a valid, non-truncated CPU equivalence table at the
320  * beginning of @buf of size @buf_size.
321  */
verify_equivalence_table(const u8 * buf,size_t buf_size)322 static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
323 {
324 	const u32 *hdr = (const u32 *)buf;
325 	u32 cont_type, equiv_tbl_len;
326 
327 	if (!verify_container(buf, buf_size))
328 		return false;
329 
330 	/* Zen and newer do not need an equivalence table. */
331 	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
332 		return true;
333 
334 	cont_type = hdr[1];
335 	if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
336 		pr_debug("Wrong microcode container equivalence table type: %u.\n",
337 			 cont_type);
338 		return false;
339 	}
340 
341 	buf_size -= CONTAINER_HDR_SZ;
342 
343 	equiv_tbl_len = hdr[2];
344 	if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
345 	    buf_size < equiv_tbl_len) {
346 		pr_debug("Truncated equivalence table.\n");
347 		return false;
348 	}
349 
350 	return true;
351 }
352 
353 /*
354  * Check whether there is a valid, non-truncated microcode patch section at the
355  * beginning of @buf of size @buf_size.
356  *
357  * On success, @sh_psize returns the patch size according to the section header,
358  * to the caller.
359  */
__verify_patch_section(const u8 * buf,size_t buf_size,u32 * sh_psize)360 static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
361 {
362 	u32 p_type, p_size;
363 	const u32 *hdr;
364 
365 	if (buf_size < SECTION_HDR_SIZE) {
366 		pr_debug("Truncated patch section.\n");
367 		return false;
368 	}
369 
370 	hdr = (const u32 *)buf;
371 	p_type = hdr[0];
372 	p_size = hdr[1];
373 
374 	if (p_type != UCODE_UCODE_TYPE) {
375 		pr_debug("Invalid type field (0x%x) in container file section header.\n",
376 			 p_type);
377 		return false;
378 	}
379 
380 	if (p_size < sizeof(struct microcode_header_amd)) {
381 		pr_debug("Patch of size %u too short.\n", p_size);
382 		return false;
383 	}
384 
385 	*sh_psize = p_size;
386 
387 	return true;
388 }
389 
390 /*
391  * Check whether the passed remaining file @buf_size is large enough to contain
392  * a patch of the indicated @sh_psize (and also whether this size does not
393  * exceed the per-family maximum). @sh_psize is the size read from the section
394  * header.
395  */
__verify_patch_size(u32 sh_psize,size_t buf_size)396 static bool __verify_patch_size(u32 sh_psize, size_t buf_size)
397 {
398 	u8 family = x86_family(bsp_cpuid_1_eax);
399 	u32 max_size;
400 
401 	if (family >= 0x15)
402 		goto ret;
403 
404 #define F1XH_MPB_MAX_SIZE 2048
405 #define F14H_MPB_MAX_SIZE 1824
406 
407 	switch (family) {
408 	case 0x10 ... 0x12:
409 		max_size = F1XH_MPB_MAX_SIZE;
410 		break;
411 	case 0x14:
412 		max_size = F14H_MPB_MAX_SIZE;
413 		break;
414 	default:
415 		WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
416 		return false;
417 	}
418 
419 	if (sh_psize > max_size)
420 		return false;
421 
422 ret:
423 	/* Working with the whole buffer so < is ok. */
424 	return sh_psize <= buf_size;
425 }
426 
427 /*
428  * Verify the patch in @buf.
429  *
430  * Returns:
431  * negative: on error
432  * positive: patch is not for this family, skip it
433  * 0: success
434  */
verify_patch(const u8 * buf,size_t buf_size,u32 * patch_size)435 static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
436 {
437 	u8 family = x86_family(bsp_cpuid_1_eax);
438 	struct microcode_header_amd *mc_hdr;
439 	u32 sh_psize;
440 	u16 proc_id;
441 	u8 patch_fam;
442 
443 	if (!__verify_patch_section(buf, buf_size, &sh_psize))
444 		return -1;
445 
446 	/*
447 	 * The section header length is not included in this indicated size
448 	 * but is present in the leftover file length so we need to subtract
449 	 * it before passing this value to the function below.
450 	 */
451 	buf_size -= SECTION_HDR_SIZE;
452 
453 	/*
454 	 * Check if the remaining buffer is big enough to contain a patch of
455 	 * size sh_psize, as the section claims.
456 	 */
457 	if (buf_size < sh_psize) {
458 		pr_debug("Patch of size %u truncated.\n", sh_psize);
459 		return -1;
460 	}
461 
462 	if (!__verify_patch_size(sh_psize, buf_size)) {
463 		pr_debug("Per-family patch size mismatch.\n");
464 		return -1;
465 	}
466 
467 	*patch_size = sh_psize;
468 
469 	mc_hdr	= (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
470 	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
471 		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
472 		return -1;
473 	}
474 
475 	proc_id	= mc_hdr->processor_rev_id;
476 	patch_fam = 0xf + (proc_id >> 12);
477 	if (patch_fam != family)
478 		return 1;
479 
480 	return 0;
481 }
482 
mc_patch_matches(struct microcode_amd * mc,u16 eq_id)483 static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
484 {
485 	/* Zen and newer do not need an equivalence table. */
486 	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
487 		return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
488 	else
489 		return eq_id == mc->hdr.processor_rev_id;
490 }
491 
492 /*
493  * This scans the ucode blob for the proper container as we can have multiple
494  * containers glued together.
495  *
496  * Returns the amount of bytes consumed while scanning. @desc contains all the
497  * data we're going to use in later stages of the application.
498  */
parse_container(u8 * ucode,size_t size,struct cont_desc * desc)499 static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
500 {
501 	struct equiv_cpu_table table;
502 	size_t orig_size = size;
503 	u32 *hdr = (u32 *)ucode;
504 	u16 eq_id;
505 	u8 *buf;
506 
507 	if (!verify_equivalence_table(ucode, size))
508 		return 0;
509 
510 	buf = ucode;
511 
512 	table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
513 	table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
514 
515 	/*
516 	 * Find the equivalence ID of our CPU in this table. Even if this table
517 	 * doesn't contain a patch for the CPU, scan through the whole container
518 	 * so that it can be skipped in case there are other containers appended.
519 	 */
520 	eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
521 
522 	buf  += hdr[2] + CONTAINER_HDR_SZ;
523 	size -= hdr[2] + CONTAINER_HDR_SZ;
524 
525 	/*
526 	 * Scan through the rest of the container to find where it ends. We do
527 	 * some basic sanity-checking too.
528 	 */
529 	while (size > 0) {
530 		struct microcode_amd *mc;
531 		u32 patch_size;
532 		int ret;
533 
534 		ret = verify_patch(buf, size, &patch_size);
535 		if (ret < 0) {
536 			/*
537 			 * Patch verification failed, skip to the next container, if
538 			 * there is one. Before exit, check whether that container has
539 			 * found a patch already. If so, use it.
540 			 */
541 			goto out;
542 		} else if (ret > 0) {
543 			goto skip;
544 		}
545 
546 		mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
547 		if (mc_patch_matches(mc, eq_id)) {
548 			desc->psize = patch_size;
549 			desc->mc = mc;
550 		}
551 
552 skip:
553 		/* Skip patch section header too: */
554 		buf  += patch_size + SECTION_HDR_SIZE;
555 		size -= patch_size + SECTION_HDR_SIZE;
556 	}
557 
558 out:
559 	/*
560 	 * If we have found a patch (desc->mc), it means we're looking at the
561 	 * container which has a patch for this CPU so return 0 to mean, @ucode
562 	 * already points to the proper container. Otherwise, we return the size
563 	 * we scanned so that we can advance to the next container in the
564 	 * buffer.
565 	 */
566 	if (desc->mc) {
567 		desc->data = ucode;
568 		desc->size = orig_size - size;
569 
570 		return 0;
571 	}
572 
573 	return orig_size - size;
574 }
575 
576 /*
577  * Scan the ucode blob for the proper container as we can have multiple
578  * containers glued together.
579  */
scan_containers(u8 * ucode,size_t size,struct cont_desc * desc)580 static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
581 {
582 	while (size) {
583 		size_t s = parse_container(ucode, size, desc);
584 		if (!s)
585 			return;
586 
587 		/* catch wraparound */
588 		if (size >= s) {
589 			ucode += s;
590 			size  -= s;
591 		} else {
592 			return;
593 		}
594 	}
595 }
596 
__apply_microcode_amd(struct microcode_amd * mc,u32 * cur_rev,unsigned int psize)597 static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
598 				  unsigned int psize)
599 {
600 	unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
601 
602 	if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
603 		return -1;
604 
605 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
606 
607 	if (x86_family(bsp_cpuid_1_eax) == 0x17) {
608 		unsigned long p_addr_end = p_addr + psize - 1;
609 
610 		invlpg(p_addr);
611 
612 		/*
613 		 * Flush next page too if patch image is crossing a page
614 		 * boundary.
615 		 */
616 		if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
617 			invlpg(p_addr_end);
618 	}
619 
620 	/* verify patch application was successful */
621 	*cur_rev = get_patch_level();
622 	if (*cur_rev != mc->hdr.patch_id)
623 		return false;
624 
625 	return true;
626 }
627 
get_builtin_microcode(struct cpio_data * cp)628 static bool get_builtin_microcode(struct cpio_data *cp)
629 {
630 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
631 	u8 family = x86_family(bsp_cpuid_1_eax);
632 	struct firmware fw;
633 
634 	if (IS_ENABLED(CONFIG_X86_32))
635 		return false;
636 
637 	if (family >= 0x15)
638 		snprintf(fw_name, sizeof(fw_name),
639 			 "amd-ucode/microcode_amd_fam%02hhxh.bin", family);
640 
641 	if (firmware_request_builtin(&fw, fw_name)) {
642 		cp->size = fw.size;
643 		cp->data = (void *)fw.data;
644 		return true;
645 	}
646 
647 	return false;
648 }
649 
find_blobs_in_containers(struct cpio_data * ret)650 static bool __init find_blobs_in_containers(struct cpio_data *ret)
651 {
652 	struct cpio_data cp;
653 	bool found;
654 
655 	if (!get_builtin_microcode(&cp))
656 		cp = find_microcode_in_initrd(ucode_path);
657 
658 	found = cp.data && cp.size;
659 	if (found)
660 		*ret = cp;
661 
662 	return found;
663 }
664 
665 /*
666  * Early load occurs before we can vmalloc(). So we look for the microcode
667  * patch container file in initrd, traverse equivalent cpu table, look for a
668  * matching microcode patch, and update, all in initrd memory in place.
669  * When vmalloc() is available for use later -- on 64-bit during first AP load,
670  * and on 32-bit during save_microcode_in_initrd() -- we can call
671  * load_microcode_amd() to save equivalent cpu table and microcode patches in
672  * kernel heap memory.
673  */
load_ucode_amd_bsp(struct early_load_data * ed,unsigned int cpuid_1_eax)674 void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
675 {
676 	struct cont_desc desc = { };
677 	struct microcode_amd *mc;
678 	struct cpio_data cp = { };
679 	char buf[4];
680 	u32 rev;
681 
682 	if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
683 		if (!strncmp(buf, "off", 3)) {
684 			sha_check = false;
685 			pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
686 			add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
687 		}
688 	}
689 
690 	bsp_cpuid_1_eax = cpuid_1_eax;
691 
692 	rev = get_patch_level();
693 	ed->old_rev = rev;
694 
695 	/* Needed in load_microcode_amd() */
696 	ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
697 
698 	if (!find_blobs_in_containers(&cp))
699 		return;
700 
701 	scan_containers(cp.data, cp.size, &desc);
702 
703 	mc = desc.mc;
704 	if (!mc)
705 		return;
706 
707 	/*
708 	 * Allow application of the same revision to pick up SMT-specific
709 	 * changes even if the revision of the other SMT thread is already
710 	 * up-to-date.
711 	 */
712 	if (ed->old_rev > mc->hdr.patch_id)
713 		return;
714 
715 	if (__apply_microcode_amd(mc, &rev, desc.psize))
716 		ed->new_rev = rev;
717 }
718 
patch_cpus_equivalent(struct ucode_patch * p,struct ucode_patch * n,bool ignore_stepping)719 static inline bool patch_cpus_equivalent(struct ucode_patch *p,
720 					 struct ucode_patch *n,
721 					 bool ignore_stepping)
722 {
723 	/* Zen and newer hardcode the f/m/s in the patch ID */
724         if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
725 		union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
726 		union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
727 
728 		if (ignore_stepping) {
729 			p_cid.stepping = 0;
730 			n_cid.stepping = 0;
731 		}
732 
733 		return p_cid.full == n_cid.full;
734 	} else {
735 		return p->equiv_cpu == n->equiv_cpu;
736 	}
737 }
738 
739 /*
740  * a small, trivial cache of per-family ucode patches
741  */
cache_find_patch(struct ucode_cpu_info * uci,u16 equiv_cpu)742 static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
743 {
744 	struct ucode_patch *p;
745 	struct ucode_patch n;
746 
747 	n.equiv_cpu = equiv_cpu;
748 	n.patch_id  = uci->cpu_sig.rev;
749 
750 	WARN_ON_ONCE(!n.patch_id);
751 
752 	list_for_each_entry(p, &microcode_cache, plist)
753 		if (patch_cpus_equivalent(p, &n, false))
754 			return p;
755 
756 	return NULL;
757 }
758 
patch_newer(struct ucode_patch * p,struct ucode_patch * n)759 static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
760 {
761 	/* Zen and newer hardcode the f/m/s in the patch ID */
762         if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
763 		union zen_patch_rev zp, zn;
764 
765 		zp.ucode_rev = p->patch_id;
766 		zn.ucode_rev = n->patch_id;
767 
768 		if (zn.stepping != zp.stepping)
769 			return -1;
770 
771 		return zn.rev > zp.rev;
772 	} else {
773 		return n->patch_id > p->patch_id;
774 	}
775 }
776 
update_cache(struct ucode_patch * new_patch)777 static void update_cache(struct ucode_patch *new_patch)
778 {
779 	struct ucode_patch *p;
780 	int ret;
781 
782 	list_for_each_entry(p, &microcode_cache, plist) {
783 		if (patch_cpus_equivalent(p, new_patch, true)) {
784 			ret = patch_newer(p, new_patch);
785 			if (ret < 0)
786 				continue;
787 			else if (!ret) {
788 				/* we already have the latest patch */
789 				kfree(new_patch->data);
790 				kfree(new_patch);
791 				return;
792 			}
793 
794 			list_replace(&p->plist, &new_patch->plist);
795 			kfree(p->data);
796 			kfree(p);
797 			return;
798 		}
799 	}
800 	/* no patch found, add it */
801 	list_add_tail(&new_patch->plist, &microcode_cache);
802 }
803 
free_cache(void)804 static void free_cache(void)
805 {
806 	struct ucode_patch *p, *tmp;
807 
808 	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
809 		__list_del(p->plist.prev, p->plist.next);
810 		kfree(p->data);
811 		kfree(p);
812 	}
813 }
814 
find_patch(unsigned int cpu)815 static struct ucode_patch *find_patch(unsigned int cpu)
816 {
817 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
818 	u16 equiv_id = 0;
819 
820 	uci->cpu_sig.rev = get_patch_level();
821 
822 	if (x86_family(bsp_cpuid_1_eax) < 0x17) {
823 		equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
824 		if (!equiv_id)
825 			return NULL;
826 	}
827 
828 	return cache_find_patch(uci, equiv_id);
829 }
830 
reload_ucode_amd(unsigned int cpu)831 void reload_ucode_amd(unsigned int cpu)
832 {
833 	u32 rev, dummy __always_unused;
834 	struct microcode_amd *mc;
835 	struct ucode_patch *p;
836 
837 	p = find_patch(cpu);
838 	if (!p)
839 		return;
840 
841 	mc = p->data;
842 
843 	rev = get_patch_level();
844 	if (rev < mc->hdr.patch_id) {
845 		if (__apply_microcode_amd(mc, &rev, p->size))
846 			pr_info_once("reload revision: 0x%08x\n", rev);
847 	}
848 }
849 
collect_cpu_info_amd(int cpu,struct cpu_signature * csig)850 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
851 {
852 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
853 	struct ucode_patch *p;
854 
855 	csig->sig = cpuid_eax(0x00000001);
856 	csig->rev = get_patch_level();
857 
858 	/*
859 	 * a patch could have been loaded early, set uci->mc so that
860 	 * mc_bp_resume() can call apply_microcode()
861 	 */
862 	p = find_patch(cpu);
863 	if (p && (p->patch_id == csig->rev))
864 		uci->mc = p->data;
865 
866 	return 0;
867 }
868 
apply_microcode_amd(int cpu)869 static enum ucode_state apply_microcode_amd(int cpu)
870 {
871 	struct cpuinfo_x86 *c = &cpu_data(cpu);
872 	struct microcode_amd *mc_amd;
873 	struct ucode_cpu_info *uci;
874 	struct ucode_patch *p;
875 	enum ucode_state ret;
876 	u32 rev;
877 
878 	BUG_ON(raw_smp_processor_id() != cpu);
879 
880 	uci = ucode_cpu_info + cpu;
881 
882 	p = find_patch(cpu);
883 	if (!p)
884 		return UCODE_NFOUND;
885 
886 	rev = uci->cpu_sig.rev;
887 
888 	mc_amd  = p->data;
889 	uci->mc = p->data;
890 
891 	/* need to apply patch? */
892 	if (rev > mc_amd->hdr.patch_id) {
893 		ret = UCODE_OK;
894 		goto out;
895 	}
896 
897 	if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
898 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
899 			cpu, mc_amd->hdr.patch_id);
900 		return UCODE_ERROR;
901 	}
902 
903 	rev = mc_amd->hdr.patch_id;
904 	ret = UCODE_UPDATED;
905 
906 out:
907 	uci->cpu_sig.rev = rev;
908 	c->microcode	 = rev;
909 
910 	/* Update boot_cpu_data's revision too, if we're on the BSP: */
911 	if (c->cpu_index == boot_cpu_data.cpu_index)
912 		boot_cpu_data.microcode = rev;
913 
914 	return ret;
915 }
916 
load_ucode_amd_ap(unsigned int cpuid_1_eax)917 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
918 {
919 	unsigned int cpu = smp_processor_id();
920 
921 	ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
922 	apply_microcode_amd(cpu);
923 }
924 
install_equiv_cpu_table(const u8 * buf,size_t buf_size)925 static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
926 {
927 	u32 equiv_tbl_len;
928 	const u32 *hdr;
929 
930 	if (!verify_equivalence_table(buf, buf_size))
931 		return 0;
932 
933 	hdr = (const u32 *)buf;
934 	equiv_tbl_len = hdr[2];
935 
936 	/* Zen and newer do not need an equivalence table. */
937 	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
938 		goto out;
939 
940 	equiv_table.entry = vmalloc(equiv_tbl_len);
941 	if (!equiv_table.entry) {
942 		pr_err("failed to allocate equivalent CPU table\n");
943 		return 0;
944 	}
945 
946 	memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
947 	equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
948 
949 out:
950 	/* add header length */
951 	return equiv_tbl_len + CONTAINER_HDR_SZ;
952 }
953 
free_equiv_cpu_table(void)954 static void free_equiv_cpu_table(void)
955 {
956 	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
957 		return;
958 
959 	vfree(equiv_table.entry);
960 	memset(&equiv_table, 0, sizeof(equiv_table));
961 }
962 
cleanup(void)963 static void cleanup(void)
964 {
965 	free_equiv_cpu_table();
966 	free_cache();
967 }
968 
969 /*
970  * Return a non-negative value even if some of the checks failed so that
971  * we can skip over the next patch. If we return a negative value, we
972  * signal a grave error like a memory allocation has failed and the
973  * driver cannot continue functioning normally. In such cases, we tear
974  * down everything we've used up so far and exit.
975  */
verify_and_add_patch(u8 family,u8 * fw,unsigned int leftover,unsigned int * patch_size)976 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
977 				unsigned int *patch_size)
978 {
979 	struct microcode_header_amd *mc_hdr;
980 	struct ucode_patch *patch;
981 	u16 proc_id;
982 	int ret;
983 
984 	ret = verify_patch(fw, leftover, patch_size);
985 	if (ret)
986 		return ret;
987 
988 	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
989 	if (!patch) {
990 		pr_err("Patch allocation failure.\n");
991 		return -EINVAL;
992 	}
993 
994 	patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
995 	if (!patch->data) {
996 		pr_err("Patch data allocation failure.\n");
997 		kfree(patch);
998 		return -EINVAL;
999 	}
1000 	patch->size = *patch_size;
1001 
1002 	mc_hdr      = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
1003 	proc_id     = mc_hdr->processor_rev_id;
1004 
1005 	INIT_LIST_HEAD(&patch->plist);
1006 	patch->patch_id  = mc_hdr->patch_id;
1007 	patch->equiv_cpu = proc_id;
1008 
1009 	pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
1010 		 __func__, patch->patch_id, proc_id);
1011 
1012 	/* ... and add to cache. */
1013 	update_cache(patch);
1014 
1015 	return 0;
1016 }
1017 
1018 /* Scan the blob in @data and add microcode patches to the cache. */
__load_microcode_amd(u8 family,const u8 * data,size_t size)1019 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
1020 {
1021 	u8 *fw = (u8 *)data;
1022 	size_t offset;
1023 
1024 	offset = install_equiv_cpu_table(data, size);
1025 	if (!offset)
1026 		return UCODE_ERROR;
1027 
1028 	fw   += offset;
1029 	size -= offset;
1030 
1031 	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
1032 		pr_err("invalid type field in container file section header\n");
1033 		free_equiv_cpu_table();
1034 		return UCODE_ERROR;
1035 	}
1036 
1037 	while (size > 0) {
1038 		unsigned int crnt_size = 0;
1039 		int ret;
1040 
1041 		ret = verify_and_add_patch(family, fw, size, &crnt_size);
1042 		if (ret < 0)
1043 			return UCODE_ERROR;
1044 
1045 		fw   +=  crnt_size + SECTION_HDR_SIZE;
1046 		size -= (crnt_size + SECTION_HDR_SIZE);
1047 	}
1048 
1049 	return UCODE_OK;
1050 }
1051 
_load_microcode_amd(u8 family,const u8 * data,size_t size)1052 static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
1053 {
1054 	enum ucode_state ret;
1055 
1056 	/* free old equiv table */
1057 	free_equiv_cpu_table();
1058 
1059 	ret = __load_microcode_amd(family, data, size);
1060 	if (ret != UCODE_OK)
1061 		cleanup();
1062 
1063 	return ret;
1064 }
1065 
load_microcode_amd(u8 family,const u8 * data,size_t size)1066 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
1067 {
1068 	struct cpuinfo_x86 *c;
1069 	unsigned int nid, cpu;
1070 	struct ucode_patch *p;
1071 	enum ucode_state ret;
1072 
1073 	ret = _load_microcode_amd(family, data, size);
1074 	if (ret != UCODE_OK)
1075 		return ret;
1076 
1077 	for_each_node_with_cpus(nid) {
1078 		cpu = cpumask_first(cpumask_of_node(nid));
1079 		c = &cpu_data(cpu);
1080 
1081 		p = find_patch(cpu);
1082 		if (!p)
1083 			continue;
1084 
1085 		if (c->microcode >= p->patch_id)
1086 			continue;
1087 
1088 		ret = UCODE_NEW;
1089 	}
1090 
1091 	return ret;
1092 }
1093 
save_microcode_in_initrd(void)1094 static int __init save_microcode_in_initrd(void)
1095 {
1096 	unsigned int cpuid_1_eax = native_cpuid_eax(1);
1097 	struct cpuinfo_x86 *c = &boot_cpu_data;
1098 	struct cont_desc desc = { 0 };
1099 	enum ucode_state ret;
1100 	struct cpio_data cp;
1101 
1102 	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
1103 		return 0;
1104 
1105 	if (!find_blobs_in_containers(&cp))
1106 		return -EINVAL;
1107 
1108 	scan_containers(cp.data, cp.size, &desc);
1109 	if (!desc.mc)
1110 		return -EINVAL;
1111 
1112 	ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
1113 	if (ret > UCODE_UPDATED)
1114 		return -EINVAL;
1115 
1116 	return 0;
1117 }
1118 early_initcall(save_microcode_in_initrd);
1119 
1120 /*
1121  * AMD microcode firmware naming convention, up to family 15h they are in
1122  * the legacy file:
1123  *
1124  *    amd-ucode/microcode_amd.bin
1125  *
1126  * This legacy file is always smaller than 2K in size.
1127  *
1128  * Beginning with family 15h, they are in family-specific firmware files:
1129  *
1130  *    amd-ucode/microcode_amd_fam15h.bin
1131  *    amd-ucode/microcode_amd_fam16h.bin
1132  *    ...
1133  *
1134  * These might be larger than 2K.
1135  */
request_microcode_amd(int cpu,struct device * device)1136 static enum ucode_state request_microcode_amd(int cpu, struct device *device)
1137 {
1138 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
1139 	struct cpuinfo_x86 *c = &cpu_data(cpu);
1140 	enum ucode_state ret = UCODE_NFOUND;
1141 	const struct firmware *fw;
1142 
1143 	if (force_minrev)
1144 		return UCODE_NFOUND;
1145 
1146 	if (c->x86 >= 0x15)
1147 		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
1148 
1149 	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
1150 		pr_debug("failed to load file %s\n", fw_name);
1151 		goto out;
1152 	}
1153 
1154 	ret = UCODE_ERROR;
1155 	if (!verify_container(fw->data, fw->size))
1156 		goto fw_release;
1157 
1158 	ret = load_microcode_amd(c->x86, fw->data, fw->size);
1159 
1160  fw_release:
1161 	release_firmware(fw);
1162 
1163  out:
1164 	return ret;
1165 }
1166 
microcode_fini_cpu_amd(int cpu)1167 static void microcode_fini_cpu_amd(int cpu)
1168 {
1169 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1170 
1171 	uci->mc = NULL;
1172 }
1173 
1174 static struct microcode_ops microcode_amd_ops = {
1175 	.request_microcode_fw	= request_microcode_amd,
1176 	.collect_cpu_info	= collect_cpu_info_amd,
1177 	.apply_microcode	= apply_microcode_amd,
1178 	.microcode_fini_cpu	= microcode_fini_cpu_amd,
1179 	.nmi_safe		= true,
1180 };
1181 
init_amd_microcode(void)1182 struct microcode_ops * __init init_amd_microcode(void)
1183 {
1184 	struct cpuinfo_x86 *c = &boot_cpu_data;
1185 
1186 	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
1187 		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
1188 		return NULL;
1189 	}
1190 	return &microcode_amd_ops;
1191 }
1192 
exit_amd_microcode(void)1193 void __exit exit_amd_microcode(void)
1194 {
1195 	cleanup();
1196 }
1197