xref: /linux/arch/x86/kernel/cpu/microcode/internal.h (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1d02a0efdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */
2d02a0efdSThomas Gleixner #ifndef _X86_MICROCODE_INTERNAL_H
3d02a0efdSThomas Gleixner #define _X86_MICROCODE_INTERNAL_H
4d02a0efdSThomas Gleixner 
5d02a0efdSThomas Gleixner #include <linux/earlycpio.h>
6d02a0efdSThomas Gleixner #include <linux/initrd.h>
7d02a0efdSThomas Gleixner 
8d02a0efdSThomas Gleixner #include <asm/cpu.h>
9d02a0efdSThomas Gleixner #include <asm/microcode.h>
10d02a0efdSThomas Gleixner 
11d02a0efdSThomas Gleixner struct device;
12d02a0efdSThomas Gleixner 
13d02a0efdSThomas Gleixner enum ucode_state {
14d02a0efdSThomas Gleixner 	UCODE_OK	= 0,
15d02a0efdSThomas Gleixner 	UCODE_NEW,
169407bda8SThomas Gleixner 	UCODE_NEW_SAFE,
17d02a0efdSThomas Gleixner 	UCODE_UPDATED,
18d02a0efdSThomas Gleixner 	UCODE_NFOUND,
19d02a0efdSThomas Gleixner 	UCODE_ERROR,
204b753955SThomas Gleixner 	UCODE_TIMEOUT,
218f849ff6SThomas Gleixner 	UCODE_OFFLINE,
22d02a0efdSThomas Gleixner };
23d02a0efdSThomas Gleixner 
24d02a0efdSThomas Gleixner struct microcode_ops {
25d02a0efdSThomas Gleixner 	enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
26d02a0efdSThomas Gleixner 	void (*microcode_fini_cpu)(int cpu);
27d02a0efdSThomas Gleixner 
28d02a0efdSThomas Gleixner 	/*
29634ac23aSThomas Gleixner 	 * The generic 'microcode_core' part guarantees that the callbacks
30634ac23aSThomas Gleixner 	 * below run on a target CPU when they are being called.
31d02a0efdSThomas Gleixner 	 * See also the "Synchronization" section in microcode_core.c.
32d02a0efdSThomas Gleixner 	 */
33d02a0efdSThomas Gleixner 	enum ucode_state	(*apply_microcode)(int cpu);
34d02a0efdSThomas Gleixner 	int			(*collect_cpu_info)(int cpu, struct cpu_signature *csig);
352a1dada3SThomas Gleixner 	void			(*finalize_late_load)(int result);
367eb314a2SThomas Gleixner 	unsigned int		nmi_safe	: 1,
377eb314a2SThomas Gleixner 				use_nmi		: 1;
38d02a0efdSThomas Gleixner };
39d02a0efdSThomas Gleixner 
40*080990aaSBorislav Petkov (AMD) struct early_load_data {
41*080990aaSBorislav Petkov (AMD) 	u32 old_rev;
42*080990aaSBorislav Petkov (AMD) 	u32 new_rev;
43*080990aaSBorislav Petkov (AMD) };
44*080990aaSBorislav Petkov (AMD) 
45*080990aaSBorislav Petkov (AMD) extern struct early_load_data early_data;
46d02a0efdSThomas Gleixner extern struct ucode_cpu_info ucode_cpu_info[];
470b62f6cbSThomas Gleixner struct cpio_data find_microcode_in_initrd(const char *path);
48d02a0efdSThomas Gleixner 
49d02a0efdSThomas Gleixner #define MAX_UCODE_COUNT 128
50d02a0efdSThomas Gleixner 
51d02a0efdSThomas Gleixner #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
52d02a0efdSThomas Gleixner #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
53d02a0efdSThomas Gleixner #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
54d02a0efdSThomas Gleixner #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
55d02a0efdSThomas Gleixner #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
56d02a0efdSThomas Gleixner #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
57d02a0efdSThomas Gleixner #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
58d02a0efdSThomas Gleixner 
59d02a0efdSThomas Gleixner #define CPUID_IS(a, b, c, ebx, ecx, edx)	\
60d02a0efdSThomas Gleixner 		(!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c))))
61d02a0efdSThomas Gleixner 
62d02a0efdSThomas Gleixner /*
63d02a0efdSThomas Gleixner  * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
64d02a0efdSThomas Gleixner  * x86_cpuid_vendor() gets vendor id for BSP.
65d02a0efdSThomas Gleixner  *
66d02a0efdSThomas Gleixner  * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
67d02a0efdSThomas Gleixner  * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
68d02a0efdSThomas Gleixner  *
69d02a0efdSThomas Gleixner  * x86_cpuid_vendor() gets vendor information directly from CPUID.
70d02a0efdSThomas Gleixner  */
x86_cpuid_vendor(void)71d02a0efdSThomas Gleixner static inline int x86_cpuid_vendor(void)
72d02a0efdSThomas Gleixner {
73d02a0efdSThomas Gleixner 	u32 eax = 0x00000000;
74d02a0efdSThomas Gleixner 	u32 ebx, ecx = 0, edx;
75d02a0efdSThomas Gleixner 
76d02a0efdSThomas Gleixner 	native_cpuid(&eax, &ebx, &ecx, &edx);
77d02a0efdSThomas Gleixner 
78d02a0efdSThomas Gleixner 	if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
79d02a0efdSThomas Gleixner 		return X86_VENDOR_INTEL;
80d02a0efdSThomas Gleixner 
81d02a0efdSThomas Gleixner 	if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
82d02a0efdSThomas Gleixner 		return X86_VENDOR_AMD;
83d02a0efdSThomas Gleixner 
84d02a0efdSThomas Gleixner 	return X86_VENDOR_UNKNOWN;
85d02a0efdSThomas Gleixner }
86d02a0efdSThomas Gleixner 
x86_cpuid_family(void)87d02a0efdSThomas Gleixner static inline unsigned int x86_cpuid_family(void)
88d02a0efdSThomas Gleixner {
89d02a0efdSThomas Gleixner 	u32 eax = 0x00000001;
90d02a0efdSThomas Gleixner 	u32 ebx, ecx = 0, edx;
91d02a0efdSThomas Gleixner 
92d02a0efdSThomas Gleixner 	native_cpuid(&eax, &ebx, &ecx, &edx);
93d02a0efdSThomas Gleixner 
94d02a0efdSThomas Gleixner 	return x86_family(eax);
95d02a0efdSThomas Gleixner }
96d02a0efdSThomas Gleixner 
97dd5e3e3cSThomas Gleixner extern bool dis_ucode_ldr;
989407bda8SThomas Gleixner extern bool force_minrev;
99d02a0efdSThomas Gleixner 
100d02a0efdSThomas Gleixner #ifdef CONFIG_CPU_SUP_AMD
101*080990aaSBorislav Petkov (AMD) void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
102d02a0efdSThomas Gleixner void load_ucode_amd_ap(unsigned int family);
103d02a0efdSThomas Gleixner int save_microcode_in_initrd_amd(unsigned int family);
104d02a0efdSThomas Gleixner void reload_ucode_amd(unsigned int cpu);
105d02a0efdSThomas Gleixner struct microcode_ops *init_amd_microcode(void);
106d02a0efdSThomas Gleixner void exit_amd_microcode(void);
1074d2b7483SLukas Bulwahn #else /* CONFIG_CPU_SUP_AMD */
load_ucode_amd_bsp(struct early_load_data * ed,unsigned int family)108*080990aaSBorislav Petkov (AMD) static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
load_ucode_amd_ap(unsigned int family)109d02a0efdSThomas Gleixner static inline void load_ucode_amd_ap(unsigned int family) { }
save_microcode_in_initrd_amd(unsigned int family)110d02a0efdSThomas Gleixner static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
reload_ucode_amd(unsigned int cpu)111d02a0efdSThomas Gleixner static inline void reload_ucode_amd(unsigned int cpu) { }
init_amd_microcode(void)112d02a0efdSThomas Gleixner static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
exit_amd_microcode(void)113d02a0efdSThomas Gleixner static inline void exit_amd_microcode(void) { }
1144d2b7483SLukas Bulwahn #endif /* !CONFIG_CPU_SUP_AMD */
115d02a0efdSThomas Gleixner 
116d02a0efdSThomas Gleixner #ifdef CONFIG_CPU_SUP_INTEL
117*080990aaSBorislav Petkov (AMD) void load_ucode_intel_bsp(struct early_load_data *ed);
118d02a0efdSThomas Gleixner void load_ucode_intel_ap(void);
119d02a0efdSThomas Gleixner void reload_ucode_intel(void);
120d02a0efdSThomas Gleixner struct microcode_ops *init_intel_microcode(void);
121d02a0efdSThomas Gleixner #else /* CONFIG_CPU_SUP_INTEL */
load_ucode_intel_bsp(struct early_load_data * ed)122*080990aaSBorislav Petkov (AMD) static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
load_ucode_intel_ap(void)123d02a0efdSThomas Gleixner static inline void load_ucode_intel_ap(void) { }
reload_ucode_intel(void)124d02a0efdSThomas Gleixner static inline void reload_ucode_intel(void) { }
init_intel_microcode(void)125d02a0efdSThomas Gleixner static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
126d02a0efdSThomas Gleixner #endif  /* !CONFIG_CPU_SUP_INTEL */
127d02a0efdSThomas Gleixner 
128d02a0efdSThomas Gleixner #endif /* _X86_MICROCODE_INTERNAL_H */
129