xref: /linux/arch/x86/kernel/cpu/microcode/amd.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  *  AMD CPU Microcode Update Driver for Linux
3  *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
4  *
5  *  Author: Peter Oruba <peter.oruba@amd.com>
6  *
7  *  Based on work by:
8  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9  *
10  *  Maintainers:
11  *  Andreas Herrmann <herrmann.der.user@googlemail.com>
12  *  Borislav Petkov <bp@alien8.de>
13  *
14  *  This driver allows to upgrade microcode on F10h AMD
15  *  CPUs and later.
16  *
17  *  Licensed under the terms of the GNU General Public
18  *  License version 2. See file COPYING for details.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/firmware.h>
24 #include <linux/pci_ids.h>
25 #include <linux/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 
31 #include <asm/microcode.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/microcode_amd.h>
35 
36 MODULE_DESCRIPTION("AMD Microcode Update Driver");
37 MODULE_AUTHOR("Peter Oruba");
38 MODULE_LICENSE("GPL v2");
39 
40 static struct equiv_cpu_entry *equiv_cpu_table;
41 
42 struct ucode_patch {
43 	struct list_head plist;
44 	void *data;
45 	u32 patch_id;
46 	u16 equiv_cpu;
47 };
48 
49 static LIST_HEAD(pcache);
50 
51 static u16 __find_equiv_id(unsigned int cpu)
52 {
53 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
54 	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
55 }
56 
57 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
58 {
59 	int i = 0;
60 
61 	BUG_ON(!equiv_cpu_table);
62 
63 	while (equiv_cpu_table[i].equiv_cpu != 0) {
64 		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
65 			return equiv_cpu_table[i].installed_cpu;
66 		i++;
67 	}
68 	return 0;
69 }
70 
71 /*
72  * a small, trivial cache of per-family ucode patches
73  */
74 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
75 {
76 	struct ucode_patch *p;
77 
78 	list_for_each_entry(p, &pcache, plist)
79 		if (p->equiv_cpu == equiv_cpu)
80 			return p;
81 	return NULL;
82 }
83 
84 static void update_cache(struct ucode_patch *new_patch)
85 {
86 	struct ucode_patch *p;
87 
88 	list_for_each_entry(p, &pcache, plist) {
89 		if (p->equiv_cpu == new_patch->equiv_cpu) {
90 			if (p->patch_id >= new_patch->patch_id)
91 				/* we already have the latest patch */
92 				return;
93 
94 			list_replace(&p->plist, &new_patch->plist);
95 			kfree(p->data);
96 			kfree(p);
97 			return;
98 		}
99 	}
100 	/* no patch found, add it */
101 	list_add_tail(&new_patch->plist, &pcache);
102 }
103 
104 static void free_cache(void)
105 {
106 	struct ucode_patch *p, *tmp;
107 
108 	list_for_each_entry_safe(p, tmp, &pcache, plist) {
109 		__list_del(p->plist.prev, p->plist.next);
110 		kfree(p->data);
111 		kfree(p);
112 	}
113 }
114 
115 static struct ucode_patch *find_patch(unsigned int cpu)
116 {
117 	u16 equiv_id;
118 
119 	equiv_id = __find_equiv_id(cpu);
120 	if (!equiv_id)
121 		return NULL;
122 
123 	return cache_find_patch(equiv_id);
124 }
125 
126 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
127 {
128 	struct cpuinfo_x86 *c = &cpu_data(cpu);
129 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
130 	struct ucode_patch *p;
131 
132 	csig->sig = cpuid_eax(0x00000001);
133 	csig->rev = c->microcode;
134 
135 	/*
136 	 * a patch could have been loaded early, set uci->mc so that
137 	 * mc_bp_resume() can call apply_microcode()
138 	 */
139 	p = find_patch(cpu);
140 	if (p && (p->patch_id == csig->rev))
141 		uci->mc = p->data;
142 
143 	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
144 
145 	return 0;
146 }
147 
148 static unsigned int verify_patch_size(u8 family, u32 patch_size,
149 				      unsigned int size)
150 {
151 	u32 max_size;
152 
153 #define F1XH_MPB_MAX_SIZE 2048
154 #define F14H_MPB_MAX_SIZE 1824
155 #define F15H_MPB_MAX_SIZE 4096
156 #define F16H_MPB_MAX_SIZE 3458
157 
158 	switch (family) {
159 	case 0x14:
160 		max_size = F14H_MPB_MAX_SIZE;
161 		break;
162 	case 0x15:
163 		max_size = F15H_MPB_MAX_SIZE;
164 		break;
165 	case 0x16:
166 		max_size = F16H_MPB_MAX_SIZE;
167 		break;
168 	default:
169 		max_size = F1XH_MPB_MAX_SIZE;
170 		break;
171 	}
172 
173 	if (patch_size > min_t(u32, size, max_size)) {
174 		pr_err("patch size mismatch\n");
175 		return 0;
176 	}
177 
178 	return patch_size;
179 }
180 
181 int __apply_microcode_amd(struct microcode_amd *mc_amd)
182 {
183 	u32 rev, dummy;
184 
185 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
186 
187 	/* verify patch application was successful */
188 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
189 	if (rev != mc_amd->hdr.patch_id)
190 		return -1;
191 
192 	return 0;
193 }
194 
195 int apply_microcode_amd(int cpu)
196 {
197 	struct cpuinfo_x86 *c = &cpu_data(cpu);
198 	struct microcode_amd *mc_amd;
199 	struct ucode_cpu_info *uci;
200 	struct ucode_patch *p;
201 	u32 rev, dummy;
202 
203 	BUG_ON(raw_smp_processor_id() != cpu);
204 
205 	uci = ucode_cpu_info + cpu;
206 
207 	p = find_patch(cpu);
208 	if (!p)
209 		return 0;
210 
211 	mc_amd  = p->data;
212 	uci->mc = p->data;
213 
214 	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
215 
216 	/* need to apply patch? */
217 	if (rev >= mc_amd->hdr.patch_id) {
218 		c->microcode = rev;
219 		uci->cpu_sig.rev = rev;
220 		return 0;
221 	}
222 
223 	if (__apply_microcode_amd(mc_amd)) {
224 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
225 			cpu, mc_amd->hdr.patch_id);
226 		return -1;
227 	}
228 	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
229 		mc_amd->hdr.patch_id);
230 
231 	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
232 	c->microcode = mc_amd->hdr.patch_id;
233 
234 	return 0;
235 }
236 
237 static int install_equiv_cpu_table(const u8 *buf)
238 {
239 	unsigned int *ibuf = (unsigned int *)buf;
240 	unsigned int type = ibuf[1];
241 	unsigned int size = ibuf[2];
242 
243 	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
244 		pr_err("empty section/"
245 		       "invalid type field in container file section header\n");
246 		return -EINVAL;
247 	}
248 
249 	equiv_cpu_table = vmalloc(size);
250 	if (!equiv_cpu_table) {
251 		pr_err("failed to allocate equivalent CPU table\n");
252 		return -ENOMEM;
253 	}
254 
255 	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
256 
257 	/* add header length */
258 	return size + CONTAINER_HDR_SZ;
259 }
260 
261 static void free_equiv_cpu_table(void)
262 {
263 	vfree(equiv_cpu_table);
264 	equiv_cpu_table = NULL;
265 }
266 
267 static void cleanup(void)
268 {
269 	free_equiv_cpu_table();
270 	free_cache();
271 }
272 
273 /*
274  * We return the current size even if some of the checks failed so that
275  * we can skip over the next patch. If we return a negative value, we
276  * signal a grave error like a memory allocation has failed and the
277  * driver cannot continue functioning normally. In such cases, we tear
278  * down everything we've used up so far and exit.
279  */
280 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
281 {
282 	struct microcode_header_amd *mc_hdr;
283 	struct ucode_patch *patch;
284 	unsigned int patch_size, crnt_size, ret;
285 	u32 proc_fam;
286 	u16 proc_id;
287 
288 	patch_size  = *(u32 *)(fw + 4);
289 	crnt_size   = patch_size + SECTION_HDR_SIZE;
290 	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
291 	proc_id	    = mc_hdr->processor_rev_id;
292 
293 	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
294 	if (!proc_fam) {
295 		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
296 		return crnt_size;
297 	}
298 
299 	/* check if patch is for the current family */
300 	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
301 	if (proc_fam != family)
302 		return crnt_size;
303 
304 	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
305 		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
306 			mc_hdr->patch_id);
307 		return crnt_size;
308 	}
309 
310 	ret = verify_patch_size(family, patch_size, leftover);
311 	if (!ret) {
312 		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
313 		return crnt_size;
314 	}
315 
316 	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
317 	if (!patch) {
318 		pr_err("Patch allocation failure.\n");
319 		return -EINVAL;
320 	}
321 
322 	patch->data = kzalloc(patch_size, GFP_KERNEL);
323 	if (!patch->data) {
324 		pr_err("Patch data allocation failure.\n");
325 		kfree(patch);
326 		return -EINVAL;
327 	}
328 
329 	/* All looks ok, copy patch... */
330 	memcpy(patch->data, fw + SECTION_HDR_SIZE, patch_size);
331 	INIT_LIST_HEAD(&patch->plist);
332 	patch->patch_id  = mc_hdr->patch_id;
333 	patch->equiv_cpu = proc_id;
334 
335 	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
336 		 __func__, patch->patch_id, proc_id);
337 
338 	/* ... and add to cache. */
339 	update_cache(patch);
340 
341 	return crnt_size;
342 }
343 
344 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
345 					     size_t size)
346 {
347 	enum ucode_state ret = UCODE_ERROR;
348 	unsigned int leftover;
349 	u8 *fw = (u8 *)data;
350 	int crnt_size = 0;
351 	int offset;
352 
353 	offset = install_equiv_cpu_table(data);
354 	if (offset < 0) {
355 		pr_err("failed to create equivalent cpu table\n");
356 		return ret;
357 	}
358 	fw += offset;
359 	leftover = size - offset;
360 
361 	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
362 		pr_err("invalid type field in container file section header\n");
363 		free_equiv_cpu_table();
364 		return ret;
365 	}
366 
367 	while (leftover) {
368 		crnt_size = verify_and_add_patch(family, fw, leftover);
369 		if (crnt_size < 0)
370 			return ret;
371 
372 		fw	 += crnt_size;
373 		leftover -= crnt_size;
374 	}
375 
376 	return UCODE_OK;
377 }
378 
379 enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
380 {
381 	enum ucode_state ret;
382 
383 	/* free old equiv table */
384 	free_equiv_cpu_table();
385 
386 	ret = __load_microcode_amd(family, data, size);
387 
388 	if (ret != UCODE_OK)
389 		cleanup();
390 
391 #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
392 	/* save BSP's matching patch for early load */
393 	if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
394 		struct ucode_patch *p = find_patch(smp_processor_id());
395 		if (p) {
396 			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
397 			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
398 							       PATCH_MAX_SIZE));
399 		}
400 	}
401 #endif
402 	return ret;
403 }
404 
405 /*
406  * AMD microcode firmware naming convention, up to family 15h they are in
407  * the legacy file:
408  *
409  *    amd-ucode/microcode_amd.bin
410  *
411  * This legacy file is always smaller than 2K in size.
412  *
413  * Beginning with family 15h, they are in family-specific firmware files:
414  *
415  *    amd-ucode/microcode_amd_fam15h.bin
416  *    amd-ucode/microcode_amd_fam16h.bin
417  *    ...
418  *
419  * These might be larger than 2K.
420  */
421 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
422 					      bool refresh_fw)
423 {
424 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
425 	struct cpuinfo_x86 *c = &cpu_data(cpu);
426 	enum ucode_state ret = UCODE_NFOUND;
427 	const struct firmware *fw;
428 
429 	/* reload ucode container only on the boot cpu */
430 	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
431 		return UCODE_OK;
432 
433 	if (c->x86 >= 0x15)
434 		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
435 
436 	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
437 		pr_debug("failed to load file %s\n", fw_name);
438 		goto out;
439 	}
440 
441 	ret = UCODE_ERROR;
442 	if (*(u32 *)fw->data != UCODE_MAGIC) {
443 		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
444 		goto fw_release;
445 	}
446 
447 	ret = load_microcode_amd(c->x86, fw->data, fw->size);
448 
449  fw_release:
450 	release_firmware(fw);
451 
452  out:
453 	return ret;
454 }
455 
456 static enum ucode_state
457 request_microcode_user(int cpu, const void __user *buf, size_t size)
458 {
459 	return UCODE_ERROR;
460 }
461 
462 static void microcode_fini_cpu_amd(int cpu)
463 {
464 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
465 
466 	uci->mc = NULL;
467 }
468 
469 static struct microcode_ops microcode_amd_ops = {
470 	.request_microcode_user           = request_microcode_user,
471 	.request_microcode_fw             = request_microcode_amd,
472 	.collect_cpu_info                 = collect_cpu_info_amd,
473 	.apply_microcode                  = apply_microcode_amd,
474 	.microcode_fini_cpu               = microcode_fini_cpu_amd,
475 };
476 
477 struct microcode_ops * __init init_amd_microcode(void)
478 {
479 	struct cpuinfo_x86 *c = &cpu_data(0);
480 
481 	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
482 		pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
483 		return NULL;
484 	}
485 
486 	return &microcode_amd_ops;
487 }
488 
489 void __exit exit_amd_microcode(void)
490 {
491 	cleanup();
492 }
493