1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * CPU Microcode Update Driver for Linux
4 *
5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
6 * 2006 Shaohua Li <shaohua.li@intel.com>
7 * 2013-2016 Borislav Petkov <bp@alien8.de>
8 *
9 * X86 CPU microcode early update for Linux:
10 *
11 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
12 * H Peter Anvin" <hpa@zytor.com>
13 * (C) 2015 Borislav Petkov <bp@alien8.de>
14 *
15 * This driver allows to upgrade microcode on x86 processors.
16 */
17
18 #define pr_fmt(fmt) "microcode: " fmt
19
20 #include <linux/platform_device.h>
21 #include <linux/stop_machine.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/miscdevice.h>
24 #include <linux/capability.h>
25 #include <linux/firmware.h>
26 #include <linux/cpumask.h>
27 #include <linux/kernel.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/cpu.h>
31 #include <linux/nmi.h>
32 #include <linux/fs.h>
33 #include <linux/mm.h>
34
35 #include <asm/apic.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/perf_event.h>
38 #include <asm/processor.h>
39 #include <asm/cmdline.h>
40 #include <asm/msr.h>
41 #include <asm/setup.h>
42
43 #include "internal.h"
44
45 static struct microcode_ops *microcode_ops;
46 static bool dis_ucode_ldr = false;
47
48 bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
49 module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
50
51 /*
52 * Synchronization.
53 *
54 * All non cpu-hotplug-callback call sites use:
55 *
56 * - cpus_read_lock/unlock() to synchronize with
57 * the cpu-hotplug-callback call sites.
58 *
59 * We guarantee that only a single cpu is being
60 * updated at any particular moment of time.
61 */
62 struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
63
64 /*
65 * Those patch levels cannot be updated to newer ones and thus should be final.
66 */
67 static u32 final_levels[] = {
68 0x01000098,
69 0x0100009f,
70 0x010000af,
71 0, /* T-101 terminator */
72 };
73
74 struct early_load_data early_data;
75
76 /*
77 * Check the current patch level on this CPU.
78 *
79 * Returns:
80 * - true: if update should stop
81 * - false: otherwise
82 */
amd_check_current_patch_level(void)83 static bool amd_check_current_patch_level(void)
84 {
85 u32 lvl, dummy, i;
86 u32 *levels;
87
88 if (x86_cpuid_vendor() != X86_VENDOR_AMD)
89 return false;
90
91 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
92
93 levels = final_levels;
94
95 for (i = 0; levels[i]; i++) {
96 if (lvl == levels[i])
97 return true;
98 }
99 return false;
100 }
101
microcode_loader_disabled(void)102 bool __init microcode_loader_disabled(void)
103 {
104 if (dis_ucode_ldr)
105 return true;
106
107 /*
108 * Disable when:
109 *
110 * 1) The CPU does not support CPUID.
111 *
112 * 2) Bit 31 in CPUID[1]:ECX is clear
113 * The bit is reserved for hypervisor use. This is still not
114 * completely accurate as XEN PV guests don't see that CPUID bit
115 * set, but that's good enough as they don't land on the BSP
116 * path anyway.
117 *
118 * 3) Certain AMD patch levels are not allowed to be
119 * overwritten.
120 */
121 if (!cpuid_feature() ||
122 native_cpuid_ecx(1) & BIT(31) ||
123 amd_check_current_patch_level())
124 dis_ucode_ldr = true;
125
126 return dis_ucode_ldr;
127 }
128
load_ucode_bsp(void)129 void __init load_ucode_bsp(void)
130 {
131 unsigned int cpuid_1_eax;
132 bool intel = true;
133
134 if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
135 dis_ucode_ldr = true;
136
137 if (microcode_loader_disabled())
138 return;
139
140 cpuid_1_eax = native_cpuid_eax(1);
141
142 switch (x86_cpuid_vendor()) {
143 case X86_VENDOR_INTEL:
144 if (x86_family(cpuid_1_eax) < 6)
145 return;
146 break;
147
148 case X86_VENDOR_AMD:
149 if (x86_family(cpuid_1_eax) < 0x10)
150 return;
151 intel = false;
152 break;
153
154 default:
155 return;
156 }
157
158 if (intel)
159 load_ucode_intel_bsp(&early_data);
160 else
161 load_ucode_amd_bsp(&early_data, cpuid_1_eax);
162 }
163
load_ucode_ap(void)164 void load_ucode_ap(void)
165 {
166 unsigned int cpuid_1_eax;
167
168 /*
169 * Can't use microcode_loader_disabled() here - .init section
170 * hell. It doesn't have to either - the BSP variant must've
171 * parsed cmdline already anyway.
172 */
173 if (dis_ucode_ldr)
174 return;
175
176 cpuid_1_eax = native_cpuid_eax(1);
177
178 switch (x86_cpuid_vendor()) {
179 case X86_VENDOR_INTEL:
180 if (x86_family(cpuid_1_eax) >= 6)
181 load_ucode_intel_ap();
182 break;
183 case X86_VENDOR_AMD:
184 if (x86_family(cpuid_1_eax) >= 0x10)
185 load_ucode_amd_ap(cpuid_1_eax);
186 break;
187 default:
188 break;
189 }
190 }
191
find_microcode_in_initrd(const char * path)192 struct cpio_data __init find_microcode_in_initrd(const char *path)
193 {
194 #ifdef CONFIG_BLK_DEV_INITRD
195 unsigned long start = 0;
196 size_t size;
197
198 #ifdef CONFIG_X86_32
199 size = boot_params.hdr.ramdisk_size;
200 /* Early load on BSP has a temporary mapping. */
201 if (size)
202 start = initrd_start_early;
203
204 #else /* CONFIG_X86_64 */
205 size = (unsigned long)boot_params.ext_ramdisk_size << 32;
206 size |= boot_params.hdr.ramdisk_size;
207
208 if (size) {
209 start = (unsigned long)boot_params.ext_ramdisk_image << 32;
210 start |= boot_params.hdr.ramdisk_image;
211 start += PAGE_OFFSET;
212 }
213 #endif
214
215 /*
216 * Fixup the start address: after reserve_initrd() runs, initrd_start
217 * has the virtual address of the beginning of the initrd. It also
218 * possibly relocates the ramdisk. In either case, initrd_start contains
219 * the updated address so use that instead.
220 */
221 if (initrd_start)
222 start = initrd_start;
223
224 return find_cpio_data(path, (void *)start, size, NULL);
225 #else /* !CONFIG_BLK_DEV_INITRD */
226 return (struct cpio_data){ NULL, 0, "" };
227 #endif
228 }
229
reload_early_microcode(unsigned int cpu)230 static void reload_early_microcode(unsigned int cpu)
231 {
232 int vendor, family;
233
234 vendor = x86_cpuid_vendor();
235 family = x86_cpuid_family();
236
237 switch (vendor) {
238 case X86_VENDOR_INTEL:
239 if (family >= 6)
240 reload_ucode_intel();
241 break;
242 case X86_VENDOR_AMD:
243 if (family >= 0x10)
244 reload_ucode_amd(cpu);
245 break;
246 default:
247 break;
248 }
249 }
250
251 /* fake device for request_firmware */
252 static struct platform_device *microcode_pdev;
253
254 #ifdef CONFIG_MICROCODE_LATE_LOADING
255 /*
256 * Late loading dance. Why the heavy-handed stomp_machine effort?
257 *
258 * - HT siblings must be idle and not execute other code while the other sibling
259 * is loading microcode in order to avoid any negative interactions caused by
260 * the loading.
261 *
262 * - In addition, microcode update on the cores must be serialized until this
263 * requirement can be relaxed in the future. Right now, this is conservative
264 * and good.
265 */
266 enum sibling_ctrl {
267 /* Spinwait with timeout */
268 SCTRL_WAIT,
269 /* Invoke the microcode_apply() callback */
270 SCTRL_APPLY,
271 /* Proceed without invoking the microcode_apply() callback */
272 SCTRL_DONE,
273 };
274
275 struct microcode_ctrl {
276 enum sibling_ctrl ctrl;
277 enum ucode_state result;
278 unsigned int ctrl_cpu;
279 bool nmi_enabled;
280 };
281
282 DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
283 static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
284 static atomic_t late_cpus_in, offline_in_nmi;
285 static unsigned int loops_per_usec;
286 static cpumask_t cpu_offline_mask;
287
wait_for_cpus(atomic_t * cnt)288 static noinstr bool wait_for_cpus(atomic_t *cnt)
289 {
290 unsigned int timeout, loops;
291
292 WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0);
293
294 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
295 if (!raw_atomic_read(cnt))
296 return true;
297
298 for (loops = 0; loops < loops_per_usec; loops++)
299 cpu_relax();
300
301 /* If invoked directly, tickle the NMI watchdog */
302 if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
303 instrumentation_begin();
304 touch_nmi_watchdog();
305 instrumentation_end();
306 }
307 }
308 /* Prevent the late comers from making progress and let them time out */
309 raw_atomic_inc(cnt);
310 return false;
311 }
312
wait_for_ctrl(void)313 static noinstr bool wait_for_ctrl(void)
314 {
315 unsigned int timeout, loops;
316
317 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
318 if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
319 return true;
320
321 for (loops = 0; loops < loops_per_usec; loops++)
322 cpu_relax();
323
324 /* If invoked directly, tickle the NMI watchdog */
325 if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
326 instrumentation_begin();
327 touch_nmi_watchdog();
328 instrumentation_end();
329 }
330 }
331 return false;
332 }
333
334 /*
335 * Protected against instrumentation up to the point where the primary
336 * thread completed the update. See microcode_nmi_handler() for details.
337 */
load_secondary_wait(unsigned int ctrl_cpu)338 static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
339 {
340 /* Initial rendezvous to ensure that all CPUs have arrived */
341 if (!wait_for_cpus(&late_cpus_in)) {
342 raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
343 return false;
344 }
345
346 /*
347 * Wait for primary threads to complete. If one of them hangs due
348 * to the update, there is no way out. This is non-recoverable
349 * because the CPU might hold locks or resources and confuse the
350 * scheduler, watchdogs etc. There is no way to safely evacuate the
351 * machine.
352 */
353 if (wait_for_ctrl())
354 return true;
355
356 instrumentation_begin();
357 panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu);
358 instrumentation_end();
359 }
360
361 /*
362 * Protected against instrumentation up to the point where the primary
363 * thread completed the update. See microcode_nmi_handler() for details.
364 */
load_secondary(unsigned int cpu)365 static noinstr void load_secondary(unsigned int cpu)
366 {
367 unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu);
368 enum ucode_state ret;
369
370 if (!load_secondary_wait(ctrl_cpu)) {
371 instrumentation_begin();
372 pr_err_once("load: %d CPUs timed out\n",
373 atomic_read(&late_cpus_in) - 1);
374 instrumentation_end();
375 return;
376 }
377
378 /* Primary thread completed. Allow to invoke instrumentable code */
379 instrumentation_begin();
380 /*
381 * If the primary succeeded then invoke the apply() callback,
382 * otherwise copy the state from the primary thread.
383 */
384 if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
385 ret = microcode_ops->apply_microcode(cpu);
386 else
387 ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
388
389 this_cpu_write(ucode_ctrl.result, ret);
390 this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
391 instrumentation_end();
392 }
393
__load_primary(unsigned int cpu)394 static void __load_primary(unsigned int cpu)
395 {
396 struct cpumask *secondaries = topology_sibling_cpumask(cpu);
397 enum sibling_ctrl ctrl;
398 enum ucode_state ret;
399 unsigned int sibling;
400
401 /* Initial rendezvous to ensure that all CPUs have arrived */
402 if (!wait_for_cpus(&late_cpus_in)) {
403 this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
404 pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1);
405 return;
406 }
407
408 ret = microcode_ops->apply_microcode(cpu);
409 this_cpu_write(ucode_ctrl.result, ret);
410 this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
411
412 /*
413 * If the update was successful, let the siblings run the apply()
414 * callback. If not, tell them it's done. This also covers the
415 * case where the CPU has uniform loading at package or system
416 * scope implemented but does not advertise it.
417 */
418 if (ret == UCODE_UPDATED || ret == UCODE_OK)
419 ctrl = SCTRL_APPLY;
420 else
421 ctrl = SCTRL_DONE;
422
423 for_each_cpu(sibling, secondaries) {
424 if (sibling != cpu)
425 per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
426 }
427 }
428
kick_offline_cpus(unsigned int nr_offl)429 static bool kick_offline_cpus(unsigned int nr_offl)
430 {
431 unsigned int cpu, timeout;
432
433 for_each_cpu(cpu, &cpu_offline_mask) {
434 /* Enable the rendezvous handler and send NMI */
435 per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
436 apic_send_nmi_to_offline_cpu(cpu);
437 }
438
439 /* Wait for them to arrive */
440 for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) {
441 if (atomic_read(&offline_in_nmi) == nr_offl)
442 return true;
443 udelay(1);
444 }
445 /* Let the others time out */
446 return false;
447 }
448
release_offline_cpus(void)449 static void release_offline_cpus(void)
450 {
451 unsigned int cpu;
452
453 for_each_cpu(cpu, &cpu_offline_mask)
454 per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
455 }
456
load_primary(unsigned int cpu)457 static void load_primary(unsigned int cpu)
458 {
459 unsigned int nr_offl = cpumask_weight(&cpu_offline_mask);
460 bool proceed = true;
461
462 /* Kick soft-offlined SMT siblings if required */
463 if (!cpu && nr_offl)
464 proceed = kick_offline_cpus(nr_offl);
465
466 /* If the soft-offlined CPUs did not respond, abort */
467 if (proceed)
468 __load_primary(cpu);
469
470 /* Unconditionally release soft-offlined SMT siblings if required */
471 if (!cpu && nr_offl)
472 release_offline_cpus();
473 }
474
475 /*
476 * Minimal stub rendezvous handler for soft-offlined CPUs which participate
477 * in the NMI rendezvous to protect against a concurrent NMI on affected
478 * CPUs.
479 */
microcode_offline_nmi_handler(void)480 void noinstr microcode_offline_nmi_handler(void)
481 {
482 if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
483 return;
484 raw_cpu_write(ucode_ctrl.nmi_enabled, false);
485 raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
486 raw_atomic_inc(&offline_in_nmi);
487 wait_for_ctrl();
488 }
489
microcode_update_handler(void)490 static noinstr bool microcode_update_handler(void)
491 {
492 unsigned int cpu = raw_smp_processor_id();
493
494 if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
495 instrumentation_begin();
496 load_primary(cpu);
497 instrumentation_end();
498 } else {
499 load_secondary(cpu);
500 }
501
502 instrumentation_begin();
503 touch_nmi_watchdog();
504 instrumentation_end();
505
506 return true;
507 }
508
509 /*
510 * Protection against instrumentation is required for CPUs which are not
511 * safe against an NMI which is delivered to the secondary SMT sibling
512 * while the primary thread updates the microcode. Instrumentation can end
513 * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
514 * which is the opposite of what the NMI rendezvous is trying to achieve.
515 *
516 * The primary thread is safe versus instrumentation as the actual
517 * microcode update handles this correctly. It's only the sibling code
518 * path which must be NMI safe until the primary thread completed the
519 * update.
520 */
microcode_nmi_handler(void)521 bool noinstr microcode_nmi_handler(void)
522 {
523 if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
524 return false;
525
526 raw_cpu_write(ucode_ctrl.nmi_enabled, false);
527 return microcode_update_handler();
528 }
529
load_cpus_stopped(void * unused)530 static int load_cpus_stopped(void *unused)
531 {
532 if (microcode_ops->use_nmi) {
533 /* Enable the NMI handler and raise NMI */
534 this_cpu_write(ucode_ctrl.nmi_enabled, true);
535 apic->send_IPI(smp_processor_id(), NMI_VECTOR);
536 } else {
537 /* Just invoke the handler directly */
538 microcode_update_handler();
539 }
540 return 0;
541 }
542
load_late_stop_cpus(bool is_safe)543 static int load_late_stop_cpus(bool is_safe)
544 {
545 unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
546 unsigned int nr_offl, offline = 0;
547 int old_rev = boot_cpu_data.microcode;
548 struct cpuinfo_x86 prev_info;
549
550 if (!is_safe) {
551 pr_err("Late microcode loading without minimal revision check.\n");
552 pr_err("You should switch to early loading, if possible.\n");
553 }
554
555 atomic_set(&late_cpus_in, num_online_cpus());
556 atomic_set(&offline_in_nmi, 0);
557 loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000);
558
559 /*
560 * Take a snapshot before the microcode update in order to compare and
561 * check whether any bits changed after an update.
562 */
563 store_cpu_caps(&prev_info);
564
565 if (microcode_ops->use_nmi)
566 static_branch_enable_cpuslocked(µcode_nmi_handler_enable);
567
568 stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
569
570 if (microcode_ops->use_nmi)
571 static_branch_disable_cpuslocked(µcode_nmi_handler_enable);
572
573 /* Analyze the results */
574 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
575 switch (per_cpu(ucode_ctrl.result, cpu)) {
576 case UCODE_UPDATED: updated++; break;
577 case UCODE_TIMEOUT: timedout++; break;
578 case UCODE_OK: siblings++; break;
579 case UCODE_OFFLINE: offline++; break;
580 default: failed++; break;
581 }
582 }
583
584 if (microcode_ops->finalize_late_load)
585 microcode_ops->finalize_late_load(!updated);
586
587 if (!updated) {
588 /* Nothing changed. */
589 if (!failed && !timedout)
590 return 0;
591
592 nr_offl = cpumask_weight(&cpu_offline_mask);
593 if (offline < nr_offl) {
594 pr_warn("%u offline siblings did not respond.\n",
595 nr_offl - atomic_read(&offline_in_nmi));
596 return -EIO;
597 }
598 pr_err("update failed: %u CPUs failed %u CPUs timed out\n",
599 failed, timedout);
600 return -EIO;
601 }
602
603 if (!is_safe || failed || timedout)
604 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
605
606 pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
607 if (failed || timedout) {
608 pr_err("load incomplete. %u CPUs timed out or failed\n",
609 num_online_cpus() - (updated + siblings));
610 }
611 pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode);
612 microcode_check(&prev_info);
613
614 return updated + siblings == num_online_cpus() ? 0 : -EIO;
615 }
616
617 /*
618 * This function does two things:
619 *
620 * 1) Ensure that all required CPUs which are present and have been booted
621 * once are online.
622 *
623 * To pass this check, all primary threads must be online.
624 *
625 * If the microcode load is not safe against NMI then all SMT threads
626 * must be online as well because they still react to NMIs when they are
627 * soft-offlined and parked in one of the play_dead() variants. So if a
628 * NMI hits while the primary thread updates the microcode the resulting
629 * behaviour is undefined. The default play_dead() implementation on
630 * modern CPUs uses MWAIT, which is also not guaranteed to be safe
631 * against a microcode update which affects MWAIT.
632 *
633 * As soft-offlined CPUs still react on NMIs, the SMT sibling
634 * restriction can be lifted when the vendor driver signals to use NMI
635 * for rendezvous and the APIC provides a mechanism to send an NMI to a
636 * soft-offlined CPU. The soft-offlined CPUs are then able to
637 * participate in the rendezvous in a trivial stub handler.
638 *
639 * 2) Initialize the per CPU control structure and create a cpumask
640 * which contains "offline"; secondary threads, so they can be handled
641 * correctly by a control CPU.
642 */
setup_cpus(void)643 static bool setup_cpus(void)
644 {
645 struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, };
646 bool allow_smt_offline;
647 unsigned int cpu;
648
649 allow_smt_offline = microcode_ops->nmi_safe ||
650 (microcode_ops->use_nmi && apic->nmi_to_offline_cpu);
651
652 cpumask_clear(&cpu_offline_mask);
653
654 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
655 /*
656 * Offline CPUs sit in one of the play_dead() functions
657 * with interrupts disabled, but they still react on NMIs
658 * and execute arbitrary code. Also MWAIT being updated
659 * while the offline CPU sits there is not necessarily safe
660 * on all CPU variants.
661 *
662 * Mark them in the offline_cpus mask which will be handled
663 * by CPU0 later in the update process.
664 *
665 * Ensure that the primary thread is online so that it is
666 * guaranteed that all cores are updated.
667 */
668 if (!cpu_online(cpu)) {
669 if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
670 pr_err("CPU %u not online, loading aborted\n", cpu);
671 return false;
672 }
673 cpumask_set_cpu(cpu, &cpu_offline_mask);
674 per_cpu(ucode_ctrl, cpu) = ctrl;
675 continue;
676 }
677
678 /*
679 * Initialize the per CPU state. This is core scope for now,
680 * but prepared to take package or system scope into account.
681 */
682 ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
683 per_cpu(ucode_ctrl, cpu) = ctrl;
684 }
685 return true;
686 }
687
load_late_locked(void)688 static int load_late_locked(void)
689 {
690 if (!setup_cpus())
691 return -EBUSY;
692
693 switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) {
694 case UCODE_NEW:
695 return load_late_stop_cpus(false);
696 case UCODE_NEW_SAFE:
697 return load_late_stop_cpus(true);
698 case UCODE_NFOUND:
699 return -ENOENT;
700 case UCODE_OK:
701 return 0;
702 default:
703 return -EBADFD;
704 }
705 }
706
reload_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)707 static ssize_t reload_store(struct device *dev,
708 struct device_attribute *attr,
709 const char *buf, size_t size)
710 {
711 unsigned long val;
712 ssize_t ret;
713
714 ret = kstrtoul(buf, 0, &val);
715 if (ret || val != 1)
716 return -EINVAL;
717
718 cpus_read_lock();
719 ret = load_late_locked();
720 cpus_read_unlock();
721
722 return ret ? : size;
723 }
724
725 static DEVICE_ATTR_WO(reload);
726 #endif
727
version_show(struct device * dev,struct device_attribute * attr,char * buf)728 static ssize_t version_show(struct device *dev,
729 struct device_attribute *attr, char *buf)
730 {
731 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
732
733 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
734 }
735
processor_flags_show(struct device * dev,struct device_attribute * attr,char * buf)736 static ssize_t processor_flags_show(struct device *dev,
737 struct device_attribute *attr, char *buf)
738 {
739 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
740
741 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
742 }
743
744 static DEVICE_ATTR_RO(version);
745 static DEVICE_ATTR_RO(processor_flags);
746
747 static struct attribute *mc_default_attrs[] = {
748 &dev_attr_version.attr,
749 &dev_attr_processor_flags.attr,
750 NULL
751 };
752
753 static const struct attribute_group mc_attr_group = {
754 .attrs = mc_default_attrs,
755 .name = "microcode",
756 };
757
microcode_fini_cpu(int cpu)758 static void microcode_fini_cpu(int cpu)
759 {
760 if (microcode_ops->microcode_fini_cpu)
761 microcode_ops->microcode_fini_cpu(cpu);
762 }
763
764 /**
765 * microcode_bsp_resume - Update boot CPU microcode during resume.
766 */
microcode_bsp_resume(void)767 void microcode_bsp_resume(void)
768 {
769 int cpu = smp_processor_id();
770 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
771
772 if (uci->mc)
773 microcode_ops->apply_microcode(cpu);
774 else
775 reload_early_microcode(cpu);
776 }
777
778 static struct syscore_ops mc_syscore_ops = {
779 .resume = microcode_bsp_resume,
780 };
781
mc_cpu_online(unsigned int cpu)782 static int mc_cpu_online(unsigned int cpu)
783 {
784 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
785 struct device *dev = get_cpu_device(cpu);
786
787 memset(uci, 0, sizeof(*uci));
788
789 microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
790 cpu_data(cpu).microcode = uci->cpu_sig.rev;
791 if (!cpu)
792 boot_cpu_data.microcode = uci->cpu_sig.rev;
793
794 if (sysfs_create_group(&dev->kobj, &mc_attr_group))
795 pr_err("Failed to create group for CPU%d\n", cpu);
796 return 0;
797 }
798
mc_cpu_down_prep(unsigned int cpu)799 static int mc_cpu_down_prep(unsigned int cpu)
800 {
801 struct device *dev = get_cpu_device(cpu);
802
803 microcode_fini_cpu(cpu);
804 sysfs_remove_group(&dev->kobj, &mc_attr_group);
805 return 0;
806 }
807
808 static struct attribute *cpu_root_microcode_attrs[] = {
809 #ifdef CONFIG_MICROCODE_LATE_LOADING
810 &dev_attr_reload.attr,
811 #endif
812 NULL
813 };
814
815 static const struct attribute_group cpu_root_microcode_group = {
816 .name = "microcode",
817 .attrs = cpu_root_microcode_attrs,
818 };
819
microcode_init(void)820 static int __init microcode_init(void)
821 {
822 struct device *dev_root;
823 struct cpuinfo_x86 *c = &boot_cpu_data;
824 int error;
825
826 if (microcode_loader_disabled())
827 return -EINVAL;
828
829 if (c->x86_vendor == X86_VENDOR_INTEL)
830 microcode_ops = init_intel_microcode();
831 else if (c->x86_vendor == X86_VENDOR_AMD)
832 microcode_ops = init_amd_microcode();
833 else
834 pr_err("no support for this CPU vendor\n");
835
836 if (!microcode_ops)
837 return -ENODEV;
838
839 pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
840
841 if (early_data.new_rev)
842 pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
843
844 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
845 if (IS_ERR(microcode_pdev))
846 return PTR_ERR(microcode_pdev);
847
848 dev_root = bus_get_dev_root(&cpu_subsys);
849 if (dev_root) {
850 error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group);
851 put_device(dev_root);
852 if (error) {
853 pr_err("Error creating microcode group!\n");
854 goto out_pdev;
855 }
856 }
857
858 register_syscore_ops(&mc_syscore_ops);
859 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
860 mc_cpu_online, mc_cpu_down_prep);
861
862 return 0;
863
864 out_pdev:
865 platform_device_unregister(microcode_pdev);
866 return error;
867
868 }
869 late_initcall(microcode_init);
870