xref: /linux/arch/x86/kernel/cpu/microcode/amd.c (revision 308d3165d8b2b98d3dc3d97d6662062735daea67)
1 /*
2  *  AMD CPU Microcode Update Driver for Linux
3  *
4  *  This driver allows to upgrade microcode on F10h AMD
5  *  CPUs and later.
6  *
7  *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8  *	          2013-2016 Borislav Petkov <bp@alien8.de>
9  *
10  *  Author: Peter Oruba <peter.oruba@amd.com>
11  *
12  *  Based on work by:
13  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
14  *
15  *  early loader:
16  *  Copyright (C) 2013 Advanced Micro Devices, Inc.
17  *
18  *  Author: Jacob Shin <jacob.shin@amd.com>
19  *  Fixes: Borislav Petkov <bp@suse.de>
20  *
21  *  Licensed under the terms of the GNU General Public
22  *  License version 2. See file COPYING for details.
23  */
24 #define pr_fmt(fmt) "microcode: " fmt
25 
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33 
34 #include <asm/microcode_amd.h>
35 #include <asm/microcode.h>
36 #include <asm/processor.h>
37 #include <asm/setup.h>
38 #include <asm/cpu.h>
39 #include <asm/msr.h>
40 
41 static struct equiv_cpu_entry *equiv_cpu_table;
42 
43 /*
44  * This points to the current valid container of microcode patches which we will
45  * save from the initrd/builtin before jettisoning its contents.
46  */
47 struct container {
48 	u8 *data;
49 	size_t size;
50 } cont;
51 
52 static u32 ucode_new_rev;
53 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
54 static u16 this_equiv_id;
55 
56 /*
57  * Microcode patch container file is prepended to the initrd in cpio
58  * format. See Documentation/x86/early-microcode.txt
59  */
60 static const char
61 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
62 
63 static size_t compute_container_size(u8 *data, u32 total_size)
64 {
65 	size_t size = 0;
66 	u32 *header = (u32 *)data;
67 
68 	if (header[0] != UCODE_MAGIC ||
69 	    header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
70 	    header[2] == 0)                            /* size */
71 		return size;
72 
73 	size = header[2] + CONTAINER_HDR_SZ;
74 	total_size -= size;
75 	data += size;
76 
77 	while (total_size) {
78 		u16 patch_size;
79 
80 		header = (u32 *)data;
81 
82 		if (header[0] != UCODE_UCODE_TYPE)
83 			break;
84 
85 		/*
86 		 * Sanity-check patch size.
87 		 */
88 		patch_size = header[1];
89 		if (patch_size > PATCH_MAX_SIZE)
90 			break;
91 
92 		size	   += patch_size + SECTION_HDR_SIZE;
93 		data	   += patch_size + SECTION_HDR_SIZE;
94 		total_size -= patch_size + SECTION_HDR_SIZE;
95 	}
96 
97 	return size;
98 }
99 
100 static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
101 				unsigned int sig)
102 {
103 	int i = 0;
104 
105 	if (!equiv_cpu_table)
106 		return 0;
107 
108 	while (equiv_cpu_table[i].installed_cpu != 0) {
109 		if (sig == equiv_cpu_table[i].installed_cpu)
110 			return equiv_cpu_table[i].equiv_cpu;
111 
112 		i++;
113 	}
114 	return 0;
115 }
116 
117 /*
118  * This scans the ucode blob for the proper container as we can have multiple
119  * containers glued together.
120  */
121 static struct container
122 find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
123 {
124 	struct container ret = { NULL, 0 };
125 	u32 eax, ebx, ecx, edx;
126 	struct equiv_cpu_entry *eq;
127 	int offset, left;
128 	u16 eq_id = 0;
129 	u32 *header;
130 	u8 *data;
131 
132 	data   = ucode;
133 	left   = size;
134 	header = (u32 *)data;
135 
136 
137 	/* find equiv cpu table */
138 	if (header[0] != UCODE_MAGIC ||
139 	    header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
140 	    header[2] == 0)                            /* size */
141 		return ret;
142 
143 	eax = 0x00000001;
144 	ecx = 0;
145 	native_cpuid(&eax, &ebx, &ecx, &edx);
146 
147 	while (left > 0) {
148 		eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
149 
150 		ret.data = data;
151 
152 		/* Advance past the container header */
153 		offset = header[2] + CONTAINER_HDR_SZ;
154 		data  += offset;
155 		left  -= offset;
156 
157 		eq_id = find_equiv_id(eq, eax);
158 		if (eq_id) {
159 			ret.size = compute_container_size(ret.data, left + offset);
160 
161 			/*
162 			 * truncate how much we need to iterate over in the
163 			 * ucode update loop below
164 			 */
165 			left = ret.size - offset;
166 			*ret_id = eq_id;
167 			return ret;
168 		}
169 
170 		/*
171 		 * support multiple container files appended together. if this
172 		 * one does not have a matching equivalent cpu entry, we fast
173 		 * forward to the next container file.
174 		 */
175 		while (left > 0) {
176 			header = (u32 *)data;
177 
178 			if (header[0] == UCODE_MAGIC &&
179 			    header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
180 				break;
181 
182 			offset = header[1] + SECTION_HDR_SIZE;
183 			data  += offset;
184 			left  -= offset;
185 		}
186 
187 		/* mark where the next microcode container file starts */
188 		offset    = data - (u8 *)ucode;
189 		ucode     = data;
190 	}
191 
192 	return ret;
193 }
194 
195 static int __apply_microcode_amd(struct microcode_amd *mc_amd)
196 {
197 	u32 rev, dummy;
198 
199 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
200 
201 	/* verify patch application was successful */
202 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
203 	if (rev != mc_amd->hdr.patch_id)
204 		return -1;
205 
206 	return 0;
207 }
208 
209 /*
210  * Early load occurs before we can vmalloc(). So we look for the microcode
211  * patch container file in initrd, traverse equivalent cpu table, look for a
212  * matching microcode patch, and update, all in initrd memory in place.
213  * When vmalloc() is available for use later -- on 64-bit during first AP load,
214  * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
215  * load_microcode_amd() to save equivalent cpu table and microcode patches in
216  * kernel heap memory.
217  */
218 static struct container
219 apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
220 {
221 	struct container ret = { NULL, 0 };
222 	u8 (*patch)[PATCH_MAX_SIZE];
223 	int offset, left;
224 	u32 rev, *header;
225 	u8  *data;
226 	u16 eq_id = 0;
227 	u32 *new_rev;
228 
229 #ifdef CONFIG_X86_32
230 	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
231 	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
232 #else
233 	new_rev = &ucode_new_rev;
234 	patch	= &amd_ucode_patch;
235 #endif
236 
237 	if (check_current_patch_level(&rev, true))
238 		return (struct container){ NULL, 0 };
239 
240 	ret = find_proper_container(ucode, size, &eq_id);
241 	if (!eq_id)
242 		return (struct container){ NULL, 0 };
243 
244 	this_equiv_id = eq_id;
245 	header = (u32 *)ret.data;
246 
247 	/* We're pointing to an equiv table, skip over it. */
248 	data = ret.data +  header[2] + CONTAINER_HDR_SZ;
249 	left = ret.size - (header[2] + CONTAINER_HDR_SZ);
250 
251 	while (left > 0) {
252 		struct microcode_amd *mc;
253 
254 		header = (u32 *)data;
255 		if (header[0] != UCODE_UCODE_TYPE || /* type */
256 		    header[1] == 0)                  /* size */
257 			break;
258 
259 		mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
260 
261 		if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
262 
263 			if (!__apply_microcode_amd(mc)) {
264 				rev = mc->hdr.patch_id;
265 				*new_rev = rev;
266 
267 				if (save_patch)
268 					memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE));
269 			}
270 		}
271 
272 		offset  = header[1] + SECTION_HDR_SIZE;
273 		data   += offset;
274 		left   -= offset;
275 	}
276 	return ret;
277 }
278 
279 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
280 {
281 #ifdef CONFIG_X86_64
282 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
283 
284 	if (family >= 0x15)
285 		snprintf(fw_name, sizeof(fw_name),
286 			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
287 
288 	return get_builtin_firmware(cp, fw_name);
289 #else
290 	return false;
291 #endif
292 }
293 
294 void __init load_ucode_amd_bsp(unsigned int family)
295 {
296 	struct ucode_cpu_info *uci;
297 	struct cpio_data cp;
298 	const char *path;
299 	bool use_pa;
300 
301 	if (IS_ENABLED(CONFIG_X86_32)) {
302 		uci	= (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
303 		path	= (const char *)__pa_nodebug(ucode_path);
304 		use_pa	= true;
305 	} else {
306 		uci     = ucode_cpu_info;
307 		path	= ucode_path;
308 		use_pa	= false;
309 	}
310 
311 	if (!get_builtin_microcode(&cp, family))
312 		cp = find_microcode_in_initrd(path, use_pa);
313 
314 	if (!(cp.data && cp.size))
315 		return;
316 
317 	/* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
318 	uci->cpu_sig.sig = cpuid_eax(1);
319 
320 	apply_microcode_early_amd(cp.data, cp.size, true);
321 }
322 
323 #ifdef CONFIG_X86_32
324 /*
325  * On 32-bit, since AP's early load occurs before paging is turned on, we
326  * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
327  * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
328  * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
329  * which is used upon resume from suspend.
330  */
331 void load_ucode_amd_ap(unsigned int family)
332 {
333 	struct microcode_amd *mc;
334 	struct cpio_data cp;
335 
336 	mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
337 	if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
338 		__apply_microcode_amd(mc);
339 		return;
340 	}
341 
342 	if (!get_builtin_microcode(&cp, family))
343 		cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
344 
345 	if (!(cp.data && cp.size))
346 		return;
347 
348 	/*
349 	 * This would set amd_ucode_patch above so that the following APs can
350 	 * use it directly instead of going down this path again.
351 	 */
352 	apply_microcode_early_amd(cp.data, cp.size, true);
353 }
354 #else
355 void load_ucode_amd_ap(unsigned int family)
356 {
357 	struct equiv_cpu_entry *eq;
358 	struct microcode_amd *mc;
359 	u32 rev, eax;
360 	u16 eq_id;
361 
362 	/* 64-bit runs with paging enabled, thus early==false. */
363 	if (check_current_patch_level(&rev, false))
364 		return;
365 
366 	/* First AP hasn't cached it yet, go through the blob. */
367 	if (!cont.data) {
368 		struct cpio_data cp = { NULL, 0, "" };
369 
370 		if (cont.size == -1)
371 			return;
372 
373 reget:
374 		if (!get_builtin_microcode(&cp, family)) {
375 #ifdef CONFIG_BLK_DEV_INITRD
376 			cp = find_cpio_data(ucode_path, (void *)initrd_start,
377 					    initrd_end - initrd_start, NULL);
378 #endif
379 			if (!(cp.data && cp.size)) {
380 				/*
381 				 * Mark it so that other APs do not scan again
382 				 * for no real reason and slow down boot
383 				 * needlessly.
384 				 */
385 				cont.size = -1;
386 				return;
387 			}
388 		}
389 
390 		cont = apply_microcode_early_amd(cp.data, cp.size, false);
391 		if (!(cont.data && cont.size)) {
392 			cont.size = -1;
393 			return;
394 		}
395 	}
396 
397 	eax = cpuid_eax(0x00000001);
398 	eq  = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
399 
400 	eq_id = find_equiv_id(eq, eax);
401 	if (!eq_id)
402 		return;
403 
404 	if (eq_id == this_equiv_id) {
405 		mc = (struct microcode_amd *)amd_ucode_patch;
406 
407 		if (mc && rev < mc->hdr.patch_id) {
408 			if (!__apply_microcode_amd(mc))
409 				ucode_new_rev = mc->hdr.patch_id;
410 		}
411 
412 	} else {
413 
414 		/*
415 		 * AP has a different equivalence ID than BSP, looks like
416 		 * mixed-steppings silicon so go through the ucode blob anew.
417 		 */
418 		goto reget;
419 	}
420 }
421 #endif /* CONFIG_X86_32 */
422 
423 static enum ucode_state
424 load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
425 
426 int __init save_microcode_in_initrd_amd(unsigned int fam)
427 {
428 	enum ucode_state ret;
429 	int retval = 0;
430 	u16 eq_id;
431 
432 	if (!cont.data) {
433 		if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
434 			struct cpio_data cp = { NULL, 0, "" };
435 
436 #ifdef CONFIG_BLK_DEV_INITRD
437 			cp = find_cpio_data(ucode_path, (void *)initrd_start,
438 					    initrd_end - initrd_start, NULL);
439 #endif
440 
441 			if (!(cp.data && cp.size)) {
442 				cont.size = -1;
443 				return -EINVAL;
444 			}
445 
446 			cont = find_proper_container(cp.data, cp.size, &eq_id);
447 			if (!eq_id) {
448 				cont.size = -1;
449 				return -EINVAL;
450 			}
451 
452 		} else
453 			return -EINVAL;
454 	}
455 
456 	ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size);
457 	if (ret != UCODE_OK)
458 		retval = -EINVAL;
459 
460 	/*
461 	 * This will be freed any msec now, stash patches for the current
462 	 * family and switch to patch cache for cpu hotplug, etc later.
463 	 */
464 	cont.data = NULL;
465 	cont.size = 0;
466 
467 	return retval;
468 }
469 
470 void reload_ucode_amd(void)
471 {
472 	struct microcode_amd *mc;
473 	u32 rev;
474 
475 	/*
476 	 * early==false because this is a syscore ->resume path and by
477 	 * that time paging is long enabled.
478 	 */
479 	if (check_current_patch_level(&rev, false))
480 		return;
481 
482 	mc = (struct microcode_amd *)amd_ucode_patch;
483 	if (!mc)
484 		return;
485 
486 	if (rev < mc->hdr.patch_id) {
487 		if (!__apply_microcode_amd(mc)) {
488 			ucode_new_rev = mc->hdr.patch_id;
489 			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
490 		}
491 	}
492 }
493 static u16 __find_equiv_id(unsigned int cpu)
494 {
495 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
496 	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
497 }
498 
499 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
500 {
501 	int i = 0;
502 
503 	BUG_ON(!equiv_cpu_table);
504 
505 	while (equiv_cpu_table[i].equiv_cpu != 0) {
506 		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
507 			return equiv_cpu_table[i].installed_cpu;
508 		i++;
509 	}
510 	return 0;
511 }
512 
513 /*
514  * a small, trivial cache of per-family ucode patches
515  */
516 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
517 {
518 	struct ucode_patch *p;
519 
520 	list_for_each_entry(p, &microcode_cache, plist)
521 		if (p->equiv_cpu == equiv_cpu)
522 			return p;
523 	return NULL;
524 }
525 
526 static void update_cache(struct ucode_patch *new_patch)
527 {
528 	struct ucode_patch *p;
529 
530 	list_for_each_entry(p, &microcode_cache, plist) {
531 		if (p->equiv_cpu == new_patch->equiv_cpu) {
532 			if (p->patch_id >= new_patch->patch_id)
533 				/* we already have the latest patch */
534 				return;
535 
536 			list_replace(&p->plist, &new_patch->plist);
537 			kfree(p->data);
538 			kfree(p);
539 			return;
540 		}
541 	}
542 	/* no patch found, add it */
543 	list_add_tail(&new_patch->plist, &microcode_cache);
544 }
545 
546 static void free_cache(void)
547 {
548 	struct ucode_patch *p, *tmp;
549 
550 	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
551 		__list_del(p->plist.prev, p->plist.next);
552 		kfree(p->data);
553 		kfree(p);
554 	}
555 }
556 
557 static struct ucode_patch *find_patch(unsigned int cpu)
558 {
559 	u16 equiv_id;
560 
561 	equiv_id = __find_equiv_id(cpu);
562 	if (!equiv_id)
563 		return NULL;
564 
565 	return cache_find_patch(equiv_id);
566 }
567 
568 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
569 {
570 	struct cpuinfo_x86 *c = &cpu_data(cpu);
571 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
572 	struct ucode_patch *p;
573 
574 	csig->sig = cpuid_eax(0x00000001);
575 	csig->rev = c->microcode;
576 
577 	/*
578 	 * a patch could have been loaded early, set uci->mc so that
579 	 * mc_bp_resume() can call apply_microcode()
580 	 */
581 	p = find_patch(cpu);
582 	if (p && (p->patch_id == csig->rev))
583 		uci->mc = p->data;
584 
585 	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
586 
587 	return 0;
588 }
589 
590 static unsigned int verify_patch_size(u8 family, u32 patch_size,
591 				      unsigned int size)
592 {
593 	u32 max_size;
594 
595 #define F1XH_MPB_MAX_SIZE 2048
596 #define F14H_MPB_MAX_SIZE 1824
597 #define F15H_MPB_MAX_SIZE 4096
598 #define F16H_MPB_MAX_SIZE 3458
599 
600 	switch (family) {
601 	case 0x14:
602 		max_size = F14H_MPB_MAX_SIZE;
603 		break;
604 	case 0x15:
605 		max_size = F15H_MPB_MAX_SIZE;
606 		break;
607 	case 0x16:
608 		max_size = F16H_MPB_MAX_SIZE;
609 		break;
610 	default:
611 		max_size = F1XH_MPB_MAX_SIZE;
612 		break;
613 	}
614 
615 	if (patch_size > min_t(u32, size, max_size)) {
616 		pr_err("patch size mismatch\n");
617 		return 0;
618 	}
619 
620 	return patch_size;
621 }
622 
623 /*
624  * Those patch levels cannot be updated to newer ones and thus should be final.
625  */
626 static u32 final_levels[] = {
627 	0x01000098,
628 	0x0100009f,
629 	0x010000af,
630 	0, /* T-101 terminator */
631 };
632 
633 /*
634  * Check the current patch level on this CPU.
635  *
636  * @rev: Use it to return the patch level. It is set to 0 in the case of
637  * error.
638  *
639  * Returns:
640  *  - true: if update should stop
641  *  - false: otherwise
642  */
643 bool check_current_patch_level(u32 *rev, bool early)
644 {
645 	u32 lvl, dummy, i;
646 	bool ret = false;
647 	u32 *levels;
648 
649 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
650 
651 	if (IS_ENABLED(CONFIG_X86_32) && early)
652 		levels = (u32 *)__pa_nodebug(&final_levels);
653 	else
654 		levels = final_levels;
655 
656 	for (i = 0; levels[i]; i++) {
657 		if (lvl == levels[i]) {
658 			lvl = 0;
659 			ret = true;
660 			break;
661 		}
662 	}
663 
664 	if (rev)
665 		*rev = lvl;
666 
667 	return ret;
668 }
669 
670 static int apply_microcode_amd(int cpu)
671 {
672 	struct cpuinfo_x86 *c = &cpu_data(cpu);
673 	struct microcode_amd *mc_amd;
674 	struct ucode_cpu_info *uci;
675 	struct ucode_patch *p;
676 	u32 rev;
677 
678 	BUG_ON(raw_smp_processor_id() != cpu);
679 
680 	uci = ucode_cpu_info + cpu;
681 
682 	p = find_patch(cpu);
683 	if (!p)
684 		return 0;
685 
686 	mc_amd  = p->data;
687 	uci->mc = p->data;
688 
689 	if (check_current_patch_level(&rev, false))
690 		return -1;
691 
692 	/* need to apply patch? */
693 	if (rev >= mc_amd->hdr.patch_id) {
694 		c->microcode = rev;
695 		uci->cpu_sig.rev = rev;
696 		return 0;
697 	}
698 
699 	if (__apply_microcode_amd(mc_amd)) {
700 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
701 			cpu, mc_amd->hdr.patch_id);
702 		return -1;
703 	}
704 	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
705 		mc_amd->hdr.patch_id);
706 
707 	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
708 	c->microcode = mc_amd->hdr.patch_id;
709 
710 	return 0;
711 }
712 
713 static int install_equiv_cpu_table(const u8 *buf)
714 {
715 	unsigned int *ibuf = (unsigned int *)buf;
716 	unsigned int type = ibuf[1];
717 	unsigned int size = ibuf[2];
718 
719 	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
720 		pr_err("empty section/"
721 		       "invalid type field in container file section header\n");
722 		return -EINVAL;
723 	}
724 
725 	equiv_cpu_table = vmalloc(size);
726 	if (!equiv_cpu_table) {
727 		pr_err("failed to allocate equivalent CPU table\n");
728 		return -ENOMEM;
729 	}
730 
731 	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
732 
733 	/* add header length */
734 	return size + CONTAINER_HDR_SZ;
735 }
736 
737 static void free_equiv_cpu_table(void)
738 {
739 	vfree(equiv_cpu_table);
740 	equiv_cpu_table = NULL;
741 }
742 
743 static void cleanup(void)
744 {
745 	free_equiv_cpu_table();
746 	free_cache();
747 }
748 
749 /*
750  * We return the current size even if some of the checks failed so that
751  * we can skip over the next patch. If we return a negative value, we
752  * signal a grave error like a memory allocation has failed and the
753  * driver cannot continue functioning normally. In such cases, we tear
754  * down everything we've used up so far and exit.
755  */
756 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
757 {
758 	struct microcode_header_amd *mc_hdr;
759 	struct ucode_patch *patch;
760 	unsigned int patch_size, crnt_size, ret;
761 	u32 proc_fam;
762 	u16 proc_id;
763 
764 	patch_size  = *(u32 *)(fw + 4);
765 	crnt_size   = patch_size + SECTION_HDR_SIZE;
766 	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
767 	proc_id	    = mc_hdr->processor_rev_id;
768 
769 	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
770 	if (!proc_fam) {
771 		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
772 		return crnt_size;
773 	}
774 
775 	/* check if patch is for the current family */
776 	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
777 	if (proc_fam != family)
778 		return crnt_size;
779 
780 	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
781 		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
782 			mc_hdr->patch_id);
783 		return crnt_size;
784 	}
785 
786 	ret = verify_patch_size(family, patch_size, leftover);
787 	if (!ret) {
788 		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
789 		return crnt_size;
790 	}
791 
792 	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
793 	if (!patch) {
794 		pr_err("Patch allocation failure.\n");
795 		return -EINVAL;
796 	}
797 
798 	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
799 	if (!patch->data) {
800 		pr_err("Patch data allocation failure.\n");
801 		kfree(patch);
802 		return -EINVAL;
803 	}
804 
805 	INIT_LIST_HEAD(&patch->plist);
806 	patch->patch_id  = mc_hdr->patch_id;
807 	patch->equiv_cpu = proc_id;
808 
809 	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
810 		 __func__, patch->patch_id, proc_id);
811 
812 	/* ... and add to cache. */
813 	update_cache(patch);
814 
815 	return crnt_size;
816 }
817 
818 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
819 					     size_t size)
820 {
821 	enum ucode_state ret = UCODE_ERROR;
822 	unsigned int leftover;
823 	u8 *fw = (u8 *)data;
824 	int crnt_size = 0;
825 	int offset;
826 
827 	offset = install_equiv_cpu_table(data);
828 	if (offset < 0) {
829 		pr_err("failed to create equivalent cpu table\n");
830 		return ret;
831 	}
832 	fw += offset;
833 	leftover = size - offset;
834 
835 	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
836 		pr_err("invalid type field in container file section header\n");
837 		free_equiv_cpu_table();
838 		return ret;
839 	}
840 
841 	while (leftover) {
842 		crnt_size = verify_and_add_patch(family, fw, leftover);
843 		if (crnt_size < 0)
844 			return ret;
845 
846 		fw	 += crnt_size;
847 		leftover -= crnt_size;
848 	}
849 
850 	return UCODE_OK;
851 }
852 
853 static enum ucode_state
854 load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
855 {
856 	enum ucode_state ret;
857 
858 	/* free old equiv table */
859 	free_equiv_cpu_table();
860 
861 	ret = __load_microcode_amd(family, data, size);
862 
863 	if (ret != UCODE_OK)
864 		cleanup();
865 
866 #ifdef CONFIG_X86_32
867 	/* save BSP's matching patch for early load */
868 	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
869 		struct ucode_patch *p = find_patch(cpu);
870 		if (p) {
871 			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
872 			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
873 							       PATCH_MAX_SIZE));
874 		}
875 	}
876 #endif
877 	return ret;
878 }
879 
880 /*
881  * AMD microcode firmware naming convention, up to family 15h they are in
882  * the legacy file:
883  *
884  *    amd-ucode/microcode_amd.bin
885  *
886  * This legacy file is always smaller than 2K in size.
887  *
888  * Beginning with family 15h, they are in family-specific firmware files:
889  *
890  *    amd-ucode/microcode_amd_fam15h.bin
891  *    amd-ucode/microcode_amd_fam16h.bin
892  *    ...
893  *
894  * These might be larger than 2K.
895  */
896 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
897 					      bool refresh_fw)
898 {
899 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
900 	struct cpuinfo_x86 *c = &cpu_data(cpu);
901 	enum ucode_state ret = UCODE_NFOUND;
902 	const struct firmware *fw;
903 
904 	/* reload ucode container only on the boot cpu */
905 	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
906 		return UCODE_OK;
907 
908 	if (c->x86 >= 0x15)
909 		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
910 
911 	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
912 		pr_debug("failed to load file %s\n", fw_name);
913 		goto out;
914 	}
915 
916 	ret = UCODE_ERROR;
917 	if (*(u32 *)fw->data != UCODE_MAGIC) {
918 		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
919 		goto fw_release;
920 	}
921 
922 	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
923 
924  fw_release:
925 	release_firmware(fw);
926 
927  out:
928 	return ret;
929 }
930 
931 static enum ucode_state
932 request_microcode_user(int cpu, const void __user *buf, size_t size)
933 {
934 	return UCODE_ERROR;
935 }
936 
937 static void microcode_fini_cpu_amd(int cpu)
938 {
939 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
940 
941 	uci->mc = NULL;
942 }
943 
944 static struct microcode_ops microcode_amd_ops = {
945 	.request_microcode_user           = request_microcode_user,
946 	.request_microcode_fw             = request_microcode_amd,
947 	.collect_cpu_info                 = collect_cpu_info_amd,
948 	.apply_microcode                  = apply_microcode_amd,
949 	.microcode_fini_cpu               = microcode_fini_cpu_amd,
950 };
951 
952 struct microcode_ops * __init init_amd_microcode(void)
953 {
954 	struct cpuinfo_x86 *c = &boot_cpu_data;
955 
956 	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
957 		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
958 		return NULL;
959 	}
960 
961 	if (ucode_new_rev)
962 		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
963 			     ucode_new_rev);
964 
965 	return &microcode_amd_ops;
966 }
967 
968 void __exit exit_amd_microcode(void)
969 {
970 	cleanup();
971 }
972