xref: /linux/arch/x86/kernel/cpu/microcode/core.c (revision 9cfc5c90ad38c8fc11bfd39de42a107da00871ba)
1 /*
2  * CPU Microcode Update Driver for Linux
3  *
4  * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5  *	      2006	Shaohua Li <shaohua.li@intel.com>
6  *	      2013-2015	Borislav Petkov <bp@alien8.de>
7  *
8  * X86 CPU microcode early update for Linux:
9  *
10  *	Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
11  *			   H Peter Anvin" <hpa@zytor.com>
12  *		  (C) 2015 Borislav Petkov <bp@alien8.de>
13  *
14  * This driver allows to upgrade microcode on x86 processors.
15  *
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License
18  * as published by the Free Software Foundation; either version
19  * 2 of the License, or (at your option) any later version.
20  */
21 
22 #define pr_fmt(fmt) "microcode: " fmt
23 
24 #include <linux/platform_device.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/miscdevice.h>
27 #include <linux/capability.h>
28 #include <linux/firmware.h>
29 #include <linux/kernel.h>
30 #include <linux/mutex.h>
31 #include <linux/cpu.h>
32 #include <linux/fs.h>
33 #include <linux/mm.h>
34 
35 #include <asm/microcode_intel.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/microcode_amd.h>
38 #include <asm/perf_event.h>
39 #include <asm/microcode.h>
40 #include <asm/processor.h>
41 #include <asm/cmdline.h>
42 
43 #define MICROCODE_VERSION	"2.01"
44 
45 static struct microcode_ops	*microcode_ops;
46 
47 static bool dis_ucode_ldr;
48 
49 static int __init disable_loader(char *str)
50 {
51 	dis_ucode_ldr = true;
52 	return 1;
53 }
54 __setup("dis_ucode_ldr", disable_loader);
55 
56 /*
57  * Synchronization.
58  *
59  * All non cpu-hotplug-callback call sites use:
60  *
61  * - microcode_mutex to synchronize with each other;
62  * - get/put_online_cpus() to synchronize with
63  *   the cpu-hotplug-callback call sites.
64  *
65  * We guarantee that only a single cpu is being
66  * updated at any particular moment of time.
67  */
68 static DEFINE_MUTEX(microcode_mutex);
69 
70 struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];
71 EXPORT_SYMBOL_GPL(ucode_cpu_info);
72 
73 /*
74  * Operations that are run on a target cpu:
75  */
76 
77 struct cpu_info_ctx {
78 	struct cpu_signature	*cpu_sig;
79 	int			err;
80 };
81 
82 static bool __init check_loader_disabled_bsp(void)
83 {
84 #ifdef CONFIG_X86_32
85 	const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
86 	const char *opt	    = "dis_ucode_ldr";
87 	const char *option  = (const char *)__pa_nodebug(opt);
88 	bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
89 
90 #else /* CONFIG_X86_64 */
91 	const char *cmdline = boot_command_line;
92 	const char *option  = "dis_ucode_ldr";
93 	bool *res = &dis_ucode_ldr;
94 #endif
95 
96 	if (cmdline_find_option_bool(cmdline, option))
97 		*res = true;
98 
99 	return *res;
100 }
101 
102 extern struct builtin_fw __start_builtin_fw[];
103 extern struct builtin_fw __end_builtin_fw[];
104 
105 bool get_builtin_firmware(struct cpio_data *cd, const char *name)
106 {
107 #ifdef CONFIG_FW_LOADER
108 	struct builtin_fw *b_fw;
109 
110 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
111 		if (!strcmp(name, b_fw->name)) {
112 			cd->size = b_fw->size;
113 			cd->data = b_fw->data;
114 			return true;
115 		}
116 	}
117 #endif
118 	return false;
119 }
120 
121 void __init load_ucode_bsp(void)
122 {
123 	int vendor;
124 	unsigned int family;
125 
126 	if (check_loader_disabled_bsp())
127 		return;
128 
129 	if (!have_cpuid_p())
130 		return;
131 
132 	vendor = x86_vendor();
133 	family = x86_family();
134 
135 	switch (vendor) {
136 	case X86_VENDOR_INTEL:
137 		if (family >= 6)
138 			load_ucode_intel_bsp();
139 		break;
140 	case X86_VENDOR_AMD:
141 		if (family >= 0x10)
142 			load_ucode_amd_bsp(family);
143 		break;
144 	default:
145 		break;
146 	}
147 }
148 
149 static bool check_loader_disabled_ap(void)
150 {
151 #ifdef CONFIG_X86_32
152 	return *((bool *)__pa_nodebug(&dis_ucode_ldr));
153 #else
154 	return dis_ucode_ldr;
155 #endif
156 }
157 
158 void load_ucode_ap(void)
159 {
160 	int vendor, family;
161 
162 	if (check_loader_disabled_ap())
163 		return;
164 
165 	if (!have_cpuid_p())
166 		return;
167 
168 	vendor = x86_vendor();
169 	family = x86_family();
170 
171 	switch (vendor) {
172 	case X86_VENDOR_INTEL:
173 		if (family >= 6)
174 			load_ucode_intel_ap();
175 		break;
176 	case X86_VENDOR_AMD:
177 		if (family >= 0x10)
178 			load_ucode_amd_ap();
179 		break;
180 	default:
181 		break;
182 	}
183 }
184 
185 int __init save_microcode_in_initrd(void)
186 {
187 	struct cpuinfo_x86 *c = &boot_cpu_data;
188 
189 	switch (c->x86_vendor) {
190 	case X86_VENDOR_INTEL:
191 		if (c->x86 >= 6)
192 			save_microcode_in_initrd_intel();
193 		break;
194 	case X86_VENDOR_AMD:
195 		if (c->x86 >= 0x10)
196 			save_microcode_in_initrd_amd();
197 		break;
198 	default:
199 		break;
200 	}
201 
202 	return 0;
203 }
204 
205 void reload_early_microcode(void)
206 {
207 	int vendor, family;
208 
209 	vendor = x86_vendor();
210 	family = x86_family();
211 
212 	switch (vendor) {
213 	case X86_VENDOR_INTEL:
214 		if (family >= 6)
215 			reload_ucode_intel();
216 		break;
217 	case X86_VENDOR_AMD:
218 		if (family >= 0x10)
219 			reload_ucode_amd();
220 		break;
221 	default:
222 		break;
223 	}
224 }
225 
226 static void collect_cpu_info_local(void *arg)
227 {
228 	struct cpu_info_ctx *ctx = arg;
229 
230 	ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
231 						   ctx->cpu_sig);
232 }
233 
234 static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
235 {
236 	struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
237 	int ret;
238 
239 	ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
240 	if (!ret)
241 		ret = ctx.err;
242 
243 	return ret;
244 }
245 
246 static int collect_cpu_info(int cpu)
247 {
248 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
249 	int ret;
250 
251 	memset(uci, 0, sizeof(*uci));
252 
253 	ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
254 	if (!ret)
255 		uci->valid = 1;
256 
257 	return ret;
258 }
259 
260 struct apply_microcode_ctx {
261 	int err;
262 };
263 
264 static void apply_microcode_local(void *arg)
265 {
266 	struct apply_microcode_ctx *ctx = arg;
267 
268 	ctx->err = microcode_ops->apply_microcode(smp_processor_id());
269 }
270 
271 static int apply_microcode_on_target(int cpu)
272 {
273 	struct apply_microcode_ctx ctx = { .err = 0 };
274 	int ret;
275 
276 	ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
277 	if (!ret)
278 		ret = ctx.err;
279 
280 	return ret;
281 }
282 
283 #ifdef CONFIG_MICROCODE_OLD_INTERFACE
284 static int do_microcode_update(const void __user *buf, size_t size)
285 {
286 	int error = 0;
287 	int cpu;
288 
289 	for_each_online_cpu(cpu) {
290 		struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
291 		enum ucode_state ustate;
292 
293 		if (!uci->valid)
294 			continue;
295 
296 		ustate = microcode_ops->request_microcode_user(cpu, buf, size);
297 		if (ustate == UCODE_ERROR) {
298 			error = -1;
299 			break;
300 		} else if (ustate == UCODE_OK)
301 			apply_microcode_on_target(cpu);
302 	}
303 
304 	return error;
305 }
306 
307 static int microcode_open(struct inode *inode, struct file *file)
308 {
309 	return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
310 }
311 
312 static ssize_t microcode_write(struct file *file, const char __user *buf,
313 			       size_t len, loff_t *ppos)
314 {
315 	ssize_t ret = -EINVAL;
316 
317 	if ((len >> PAGE_SHIFT) > totalram_pages) {
318 		pr_err("too much data (max %ld pages)\n", totalram_pages);
319 		return ret;
320 	}
321 
322 	get_online_cpus();
323 	mutex_lock(&microcode_mutex);
324 
325 	if (do_microcode_update(buf, len) == 0)
326 		ret = (ssize_t)len;
327 
328 	if (ret > 0)
329 		perf_check_microcode();
330 
331 	mutex_unlock(&microcode_mutex);
332 	put_online_cpus();
333 
334 	return ret;
335 }
336 
337 static const struct file_operations microcode_fops = {
338 	.owner			= THIS_MODULE,
339 	.write			= microcode_write,
340 	.open			= microcode_open,
341 	.llseek		= no_llseek,
342 };
343 
344 static struct miscdevice microcode_dev = {
345 	.minor			= MICROCODE_MINOR,
346 	.name			= "microcode",
347 	.nodename		= "cpu/microcode",
348 	.fops			= &microcode_fops,
349 };
350 
351 static int __init microcode_dev_init(void)
352 {
353 	int error;
354 
355 	error = misc_register(&microcode_dev);
356 	if (error) {
357 		pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
358 		return error;
359 	}
360 
361 	return 0;
362 }
363 
364 static void __exit microcode_dev_exit(void)
365 {
366 	misc_deregister(&microcode_dev);
367 }
368 #else
369 #define microcode_dev_init()	0
370 #define microcode_dev_exit()	do { } while (0)
371 #endif
372 
373 /* fake device for request_firmware */
374 static struct platform_device	*microcode_pdev;
375 
376 static int reload_for_cpu(int cpu)
377 {
378 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
379 	enum ucode_state ustate;
380 	int err = 0;
381 
382 	if (!uci->valid)
383 		return err;
384 
385 	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
386 	if (ustate == UCODE_OK)
387 		apply_microcode_on_target(cpu);
388 	else
389 		if (ustate == UCODE_ERROR)
390 			err = -EINVAL;
391 	return err;
392 }
393 
394 static ssize_t reload_store(struct device *dev,
395 			    struct device_attribute *attr,
396 			    const char *buf, size_t size)
397 {
398 	unsigned long val;
399 	int cpu;
400 	ssize_t ret = 0, tmp_ret;
401 
402 	ret = kstrtoul(buf, 0, &val);
403 	if (ret)
404 		return ret;
405 
406 	if (val != 1)
407 		return size;
408 
409 	get_online_cpus();
410 	mutex_lock(&microcode_mutex);
411 	for_each_online_cpu(cpu) {
412 		tmp_ret = reload_for_cpu(cpu);
413 		if (tmp_ret != 0)
414 			pr_warn("Error reloading microcode on CPU %d\n", cpu);
415 
416 		/* save retval of the first encountered reload error */
417 		if (!ret)
418 			ret = tmp_ret;
419 	}
420 	if (!ret)
421 		perf_check_microcode();
422 	mutex_unlock(&microcode_mutex);
423 	put_online_cpus();
424 
425 	if (!ret)
426 		ret = size;
427 
428 	return ret;
429 }
430 
431 static ssize_t version_show(struct device *dev,
432 			struct device_attribute *attr, char *buf)
433 {
434 	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
435 
436 	return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
437 }
438 
439 static ssize_t pf_show(struct device *dev,
440 			struct device_attribute *attr, char *buf)
441 {
442 	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
443 
444 	return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
445 }
446 
447 static DEVICE_ATTR(reload, 0200, NULL, reload_store);
448 static DEVICE_ATTR(version, 0400, version_show, NULL);
449 static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
450 
451 static struct attribute *mc_default_attrs[] = {
452 	&dev_attr_version.attr,
453 	&dev_attr_processor_flags.attr,
454 	NULL
455 };
456 
457 static struct attribute_group mc_attr_group = {
458 	.attrs			= mc_default_attrs,
459 	.name			= "microcode",
460 };
461 
462 static void microcode_fini_cpu(int cpu)
463 {
464 	microcode_ops->microcode_fini_cpu(cpu);
465 }
466 
467 static enum ucode_state microcode_resume_cpu(int cpu)
468 {
469 	pr_debug("CPU%d updated upon resume\n", cpu);
470 
471 	if (apply_microcode_on_target(cpu))
472 		return UCODE_ERROR;
473 
474 	return UCODE_OK;
475 }
476 
477 static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
478 {
479 	enum ucode_state ustate;
480 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
481 
482 	if (uci && uci->valid)
483 		return UCODE_OK;
484 
485 	if (collect_cpu_info(cpu))
486 		return UCODE_ERROR;
487 
488 	/* --dimm. Trigger a delayed update? */
489 	if (system_state != SYSTEM_RUNNING)
490 		return UCODE_NFOUND;
491 
492 	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
493 						     refresh_fw);
494 
495 	if (ustate == UCODE_OK) {
496 		pr_debug("CPU%d updated upon init\n", cpu);
497 		apply_microcode_on_target(cpu);
498 	}
499 
500 	return ustate;
501 }
502 
503 static enum ucode_state microcode_update_cpu(int cpu)
504 {
505 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
506 
507 	if (uci->valid)
508 		return microcode_resume_cpu(cpu);
509 
510 	return microcode_init_cpu(cpu, false);
511 }
512 
513 static int mc_device_add(struct device *dev, struct subsys_interface *sif)
514 {
515 	int err, cpu = dev->id;
516 
517 	if (!cpu_online(cpu))
518 		return 0;
519 
520 	pr_debug("CPU%d added\n", cpu);
521 
522 	err = sysfs_create_group(&dev->kobj, &mc_attr_group);
523 	if (err)
524 		return err;
525 
526 	if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
527 		return -EINVAL;
528 
529 	return err;
530 }
531 
532 static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
533 {
534 	int cpu = dev->id;
535 
536 	if (!cpu_online(cpu))
537 		return;
538 
539 	pr_debug("CPU%d removed\n", cpu);
540 	microcode_fini_cpu(cpu);
541 	sysfs_remove_group(&dev->kobj, &mc_attr_group);
542 }
543 
544 static struct subsys_interface mc_cpu_interface = {
545 	.name			= "microcode",
546 	.subsys			= &cpu_subsys,
547 	.add_dev		= mc_device_add,
548 	.remove_dev		= mc_device_remove,
549 };
550 
551 /**
552  * mc_bp_resume - Update boot CPU microcode during resume.
553  */
554 static void mc_bp_resume(void)
555 {
556 	int cpu = smp_processor_id();
557 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
558 
559 	if (uci->valid && uci->mc)
560 		microcode_ops->apply_microcode(cpu);
561 	else if (!uci->mc)
562 		reload_early_microcode();
563 }
564 
565 static struct syscore_ops mc_syscore_ops = {
566 	.resume			= mc_bp_resume,
567 };
568 
569 static int
570 mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
571 {
572 	unsigned int cpu = (unsigned long)hcpu;
573 	struct device *dev;
574 
575 	dev = get_cpu_device(cpu);
576 
577 	switch (action & ~CPU_TASKS_FROZEN) {
578 	case CPU_ONLINE:
579 		microcode_update_cpu(cpu);
580 		pr_debug("CPU%d added\n", cpu);
581 		/*
582 		 * "break" is missing on purpose here because we want to fall
583 		 * through in order to create the sysfs group.
584 		 */
585 
586 	case CPU_DOWN_FAILED:
587 		if (sysfs_create_group(&dev->kobj, &mc_attr_group))
588 			pr_err("Failed to create group for CPU%d\n", cpu);
589 		break;
590 
591 	case CPU_DOWN_PREPARE:
592 		/* Suspend is in progress, only remove the interface */
593 		sysfs_remove_group(&dev->kobj, &mc_attr_group);
594 		pr_debug("CPU%d removed\n", cpu);
595 		break;
596 
597 	/*
598 	 * case CPU_DEAD:
599 	 *
600 	 * When a CPU goes offline, don't free up or invalidate the copy of
601 	 * the microcode in kernel memory, so that we can reuse it when the
602 	 * CPU comes back online without unnecessarily requesting the userspace
603 	 * for it again.
604 	 */
605 	}
606 
607 	/* The CPU refused to come up during a system resume */
608 	if (action == CPU_UP_CANCELED_FROZEN)
609 		microcode_fini_cpu(cpu);
610 
611 	return NOTIFY_OK;
612 }
613 
614 static struct notifier_block mc_cpu_notifier = {
615 	.notifier_call	= mc_cpu_callback,
616 };
617 
618 static struct attribute *cpu_root_microcode_attrs[] = {
619 	&dev_attr_reload.attr,
620 	NULL
621 };
622 
623 static struct attribute_group cpu_root_microcode_group = {
624 	.name  = "microcode",
625 	.attrs = cpu_root_microcode_attrs,
626 };
627 
628 int __init microcode_init(void)
629 {
630 	struct cpuinfo_x86 *c = &boot_cpu_data;
631 	int error;
632 
633 	if (paravirt_enabled() || dis_ucode_ldr)
634 		return -EINVAL;
635 
636 	if (c->x86_vendor == X86_VENDOR_INTEL)
637 		microcode_ops = init_intel_microcode();
638 	else if (c->x86_vendor == X86_VENDOR_AMD)
639 		microcode_ops = init_amd_microcode();
640 	else
641 		pr_err("no support for this CPU vendor\n");
642 
643 	if (!microcode_ops)
644 		return -ENODEV;
645 
646 	microcode_pdev = platform_device_register_simple("microcode", -1,
647 							 NULL, 0);
648 	if (IS_ERR(microcode_pdev))
649 		return PTR_ERR(microcode_pdev);
650 
651 	get_online_cpus();
652 	mutex_lock(&microcode_mutex);
653 
654 	error = subsys_interface_register(&mc_cpu_interface);
655 	if (!error)
656 		perf_check_microcode();
657 	mutex_unlock(&microcode_mutex);
658 	put_online_cpus();
659 
660 	if (error)
661 		goto out_pdev;
662 
663 	error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
664 				   &cpu_root_microcode_group);
665 
666 	if (error) {
667 		pr_err("Error creating microcode group!\n");
668 		goto out_driver;
669 	}
670 
671 	error = microcode_dev_init();
672 	if (error)
673 		goto out_ucode_group;
674 
675 	register_syscore_ops(&mc_syscore_ops);
676 	register_hotcpu_notifier(&mc_cpu_notifier);
677 
678 	pr_info("Microcode Update Driver: v" MICROCODE_VERSION
679 		" <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
680 
681 	return 0;
682 
683  out_ucode_group:
684 	sysfs_remove_group(&cpu_subsys.dev_root->kobj,
685 			   &cpu_root_microcode_group);
686 
687  out_driver:
688 	get_online_cpus();
689 	mutex_lock(&microcode_mutex);
690 
691 	subsys_interface_unregister(&mc_cpu_interface);
692 
693 	mutex_unlock(&microcode_mutex);
694 	put_online_cpus();
695 
696  out_pdev:
697 	platform_device_unregister(microcode_pdev);
698 	return error;
699 
700 }
701