1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _X86_MICROCODE_INTERNAL_H 3 #define _X86_MICROCODE_INTERNAL_H 4 5 #include <linux/earlycpio.h> 6 #include <linux/initrd.h> 7 8 #include <asm/cpu.h> 9 #include <asm/microcode.h> 10 11 struct device; 12 13 enum ucode_state { 14 UCODE_OK = 0, 15 UCODE_NEW, 16 UCODE_NEW_SAFE, 17 UCODE_UPDATED, 18 UCODE_NFOUND, 19 UCODE_ERROR, 20 UCODE_TIMEOUT, 21 UCODE_OFFLINE, 22 }; 23 24 struct microcode_ops { 25 enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); 26 void (*microcode_fini_cpu)(int cpu); 27 28 /* 29 * The generic 'microcode_core' part guarantees that the callbacks 30 * below run on a target CPU when they are being called. 31 * See also the "Synchronization" section in microcode_core.c. 32 */ 33 enum ucode_state (*apply_microcode)(int cpu); 34 int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); 35 void (*finalize_late_load)(int result); 36 unsigned int nmi_safe : 1, 37 use_nmi : 1; 38 }; 39 40 extern struct ucode_cpu_info ucode_cpu_info[]; 41 struct cpio_data find_microcode_in_initrd(const char *path); 42 43 #define MAX_UCODE_COUNT 128 44 45 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) 46 #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') 47 #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') 48 #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') 49 #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') 50 #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') 51 #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') 52 53 #define CPUID_IS(a, b, c, ebx, ecx, edx) \ 54 (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) 55 56 /* 57 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. 58 * x86_cpuid_vendor() gets vendor id for BSP. 59 * 60 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify 61 * coding, we still use x86_cpuid_vendor() to get vendor id for AP. 62 * 63 * x86_cpuid_vendor() gets vendor information directly from CPUID. 64 */ 65 static inline int x86_cpuid_vendor(void) 66 { 67 u32 eax = 0x00000000; 68 u32 ebx, ecx = 0, edx; 69 70 native_cpuid(&eax, &ebx, &ecx, &edx); 71 72 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) 73 return X86_VENDOR_INTEL; 74 75 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) 76 return X86_VENDOR_AMD; 77 78 return X86_VENDOR_UNKNOWN; 79 } 80 81 static inline unsigned int x86_cpuid_family(void) 82 { 83 u32 eax = 0x00000001; 84 u32 ebx, ecx = 0, edx; 85 86 native_cpuid(&eax, &ebx, &ecx, &edx); 87 88 return x86_family(eax); 89 } 90 91 extern bool dis_ucode_ldr; 92 extern bool force_minrev; 93 94 #ifdef CONFIG_CPU_SUP_AMD 95 void load_ucode_amd_bsp(unsigned int family); 96 void load_ucode_amd_ap(unsigned int family); 97 int save_microcode_in_initrd_amd(unsigned int family); 98 void reload_ucode_amd(unsigned int cpu); 99 struct microcode_ops *init_amd_microcode(void); 100 void exit_amd_microcode(void); 101 #else /* CONFIG_CPU_SUP_AMD */ 102 static inline void load_ucode_amd_bsp(unsigned int family) { } 103 static inline void load_ucode_amd_ap(unsigned int family) { } 104 static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 105 static inline void reload_ucode_amd(unsigned int cpu) { } 106 static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } 107 static inline void exit_amd_microcode(void) { } 108 #endif /* !CONFIG_CPU_SUP_AMD */ 109 110 #ifdef CONFIG_CPU_SUP_INTEL 111 void load_ucode_intel_bsp(void); 112 void load_ucode_intel_ap(void); 113 void reload_ucode_intel(void); 114 struct microcode_ops *init_intel_microcode(void); 115 #else /* CONFIG_CPU_SUP_INTEL */ 116 static inline void load_ucode_intel_bsp(void) { } 117 static inline void load_ucode_intel_ap(void) { } 118 static inline void reload_ucode_intel(void) { } 119 static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } 120 #endif /* !CONFIG_CPU_SUP_INTEL */ 121 122 #endif /* _X86_MICROCODE_INTERNAL_H */ 123