1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MICROCODE_H 3 #define _ASM_X86_MICROCODE_H 4 5 #include <asm/cpu.h> 6 #include <linux/earlycpio.h> 7 #include <linux/initrd.h> 8 9 struct ucode_patch { 10 struct list_head plist; 11 void *data; /* Intel uses only this one */ 12 unsigned int size; 13 u32 patch_id; 14 u16 equiv_cpu; 15 }; 16 17 extern struct list_head microcode_cache; 18 19 struct cpu_signature { 20 unsigned int sig; 21 unsigned int pf; 22 unsigned int rev; 23 }; 24 25 struct device; 26 27 enum ucode_state { 28 UCODE_OK = 0, 29 UCODE_NEW, 30 UCODE_UPDATED, 31 UCODE_NFOUND, 32 UCODE_ERROR, 33 }; 34 35 struct microcode_ops { 36 enum ucode_state (*request_microcode_fw) (int cpu, struct device *); 37 38 void (*microcode_fini_cpu) (int cpu); 39 40 /* 41 * The generic 'microcode_core' part guarantees that 42 * the callbacks below run on a target cpu when they 43 * are being called. 44 * See also the "Synchronization" section in microcode_core.c. 45 */ 46 enum ucode_state (*apply_microcode) (int cpu); 47 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); 48 }; 49 50 struct ucode_cpu_info { 51 struct cpu_signature cpu_sig; 52 void *mc; 53 }; 54 extern struct ucode_cpu_info ucode_cpu_info[]; 55 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); 56 57 #ifdef CONFIG_MICROCODE_INTEL 58 extern struct microcode_ops * __init init_intel_microcode(void); 59 #else 60 static inline struct microcode_ops * __init init_intel_microcode(void) 61 { 62 return NULL; 63 } 64 #endif /* CONFIG_MICROCODE_INTEL */ 65 66 #ifdef CONFIG_MICROCODE_AMD 67 extern struct microcode_ops * __init init_amd_microcode(void); 68 extern void __exit exit_amd_microcode(void); 69 #else 70 static inline struct microcode_ops * __init init_amd_microcode(void) 71 { 72 return NULL; 73 } 74 static inline void __exit exit_amd_microcode(void) {} 75 #endif 76 77 #define MAX_UCODE_COUNT 128 78 79 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) 80 #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') 81 #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') 82 #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') 83 #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') 84 #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') 85 #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') 86 87 #define CPUID_IS(a, b, c, ebx, ecx, edx) \ 88 (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) 89 90 /* 91 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. 92 * x86_cpuid_vendor() gets vendor id for BSP. 93 * 94 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify 95 * coding, we still use x86_cpuid_vendor() to get vendor id for AP. 96 * 97 * x86_cpuid_vendor() gets vendor information directly from CPUID. 98 */ 99 static inline int x86_cpuid_vendor(void) 100 { 101 u32 eax = 0x00000000; 102 u32 ebx, ecx = 0, edx; 103 104 native_cpuid(&eax, &ebx, &ecx, &edx); 105 106 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) 107 return X86_VENDOR_INTEL; 108 109 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) 110 return X86_VENDOR_AMD; 111 112 return X86_VENDOR_UNKNOWN; 113 } 114 115 static inline unsigned int x86_cpuid_family(void) 116 { 117 u32 eax = 0x00000001; 118 u32 ebx, ecx = 0, edx; 119 120 native_cpuid(&eax, &ebx, &ecx, &edx); 121 122 return x86_family(eax); 123 } 124 125 #ifdef CONFIG_MICROCODE 126 extern void __init load_ucode_bsp(void); 127 extern void load_ucode_ap(void); 128 void reload_early_microcode(void); 129 extern bool initrd_gone; 130 void microcode_bsp_resume(void); 131 #else 132 static inline void __init load_ucode_bsp(void) { } 133 static inline void load_ucode_ap(void) { } 134 static inline void reload_early_microcode(void) { } 135 static inline void microcode_bsp_resume(void) { } 136 #endif 137 138 #endif /* _ASM_X86_MICROCODE_H */ 139