xref: /linux/arch/x86/kernel/cpu/mce/internal.h (revision 0678df8271820bcf8fb4f877129f05d68a237de4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_MCE_INTERNAL_H__
3 #define __X86_MCE_INTERNAL_H__
4 
5 #undef pr_fmt
6 #define pr_fmt(fmt) "mce: " fmt
7 
8 #include <linux/device.h>
9 #include <asm/mce.h>
10 
11 enum severity_level {
12 	MCE_NO_SEVERITY,
13 	MCE_DEFERRED_SEVERITY,
14 	MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
15 	MCE_KEEP_SEVERITY,
16 	MCE_SOME_SEVERITY,
17 	MCE_AO_SEVERITY,
18 	MCE_UC_SEVERITY,
19 	MCE_AR_SEVERITY,
20 	MCE_PANIC_SEVERITY,
21 };
22 
23 extern struct blocking_notifier_head x86_mce_decoder_chain;
24 
25 #define INITIAL_CHECK_INTERVAL	5 * 60 /* 5 minutes */
26 
27 struct mce_evt_llist {
28 	struct llist_node llnode;
29 	struct mce mce;
30 };
31 
32 void mce_gen_pool_process(struct work_struct *__unused);
33 bool mce_gen_pool_empty(void);
34 int mce_gen_pool_add(struct mce *mce);
35 int mce_gen_pool_init(void);
36 struct llist_node *mce_gen_pool_prepare_records(void);
37 
38 int mce_severity(struct mce *a, struct pt_regs *regs, char **msg, bool is_excp);
39 struct dentry *mce_get_debugfs_dir(void);
40 
41 extern mce_banks_t mce_banks_ce_disabled;
42 
43 #ifdef CONFIG_X86_MCE_INTEL
44 unsigned long cmci_intel_adjust_timer(unsigned long interval);
45 bool mce_intel_cmci_poll(void);
46 void mce_intel_hcpu_update(unsigned long cpu);
47 void cmci_disable_bank(int bank);
48 void intel_init_cmci(void);
49 void intel_init_lmce(void);
50 void intel_clear_lmce(void);
51 bool intel_filter_mce(struct mce *m);
52 bool intel_mce_usable_address(struct mce *m);
53 #else
54 # define cmci_intel_adjust_timer mce_adjust_timer_default
55 static inline bool mce_intel_cmci_poll(void) { return false; }
56 static inline void mce_intel_hcpu_update(unsigned long cpu) { }
57 static inline void cmci_disable_bank(int bank) { }
58 static inline void intel_init_cmci(void) { }
59 static inline void intel_init_lmce(void) { }
60 static inline void intel_clear_lmce(void) { }
61 static inline bool intel_filter_mce(struct mce *m) { return false; }
62 static inline bool intel_mce_usable_address(struct mce *m) { return false; }
63 #endif
64 
65 void mce_timer_kick(unsigned long interval);
66 
67 #ifdef CONFIG_ACPI_APEI
68 int apei_write_mce(struct mce *m);
69 ssize_t apei_read_mce(struct mce *m, u64 *record_id);
70 int apei_check_mce(void);
71 int apei_clear_mce(u64 record_id);
72 #else
73 static inline int apei_write_mce(struct mce *m)
74 {
75 	return -EINVAL;
76 }
77 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
78 {
79 	return 0;
80 }
81 static inline int apei_check_mce(void)
82 {
83 	return 0;
84 }
85 static inline int apei_clear_mce(u64 record_id)
86 {
87 	return -EINVAL;
88 }
89 #endif
90 
91 /*
92  * We consider records to be equivalent if bank+status+addr+misc all match.
93  * This is only used when the system is going down because of a fatal error
94  * to avoid cluttering the console log with essentially repeated information.
95  * In normal processing all errors seen are logged.
96  */
97 static inline bool mce_cmp(struct mce *m1, struct mce *m2)
98 {
99 	return m1->bank != m2->bank ||
100 		m1->status != m2->status ||
101 		m1->addr != m2->addr ||
102 		m1->misc != m2->misc;
103 }
104 
105 extern struct device_attribute dev_attr_trigger;
106 
107 #ifdef CONFIG_X86_MCELOG_LEGACY
108 void mce_work_trigger(void);
109 void mce_register_injector_chain(struct notifier_block *nb);
110 void mce_unregister_injector_chain(struct notifier_block *nb);
111 #else
112 static inline void mce_work_trigger(void)	{ }
113 static inline void mce_register_injector_chain(struct notifier_block *nb)	{ }
114 static inline void mce_unregister_injector_chain(struct notifier_block *nb)	{ }
115 #endif
116 
117 struct mca_config {
118 	__u64 lmce_disabled		: 1,
119 	      disabled			: 1,
120 	      ser			: 1,
121 	      recovery			: 1,
122 	      bios_cmci_threshold	: 1,
123 	      /* Proper #MC exception handler is set */
124 	      initialized		: 1,
125 	      __reserved		: 58;
126 
127 	bool dont_log_ce;
128 	bool cmci_disabled;
129 	bool ignore_ce;
130 	bool print_all;
131 
132 	int monarch_timeout;
133 	int panic_timeout;
134 	u32 rip_msr;
135 	s8 bootlog;
136 };
137 
138 extern struct mca_config mca_cfg;
139 DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
140 
141 struct mce_vendor_flags {
142 	/*
143 	 * Indicates that overflow conditions are not fatal, when set.
144 	 */
145 	__u64 overflow_recov	: 1,
146 
147 	/*
148 	 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
149 	 * Recovery. It indicates support for data poisoning in HW and deferred
150 	 * error interrupts.
151 	 */
152 	succor			: 1,
153 
154 	/*
155 	 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
156 	 * the register space for each MCA bank and also increases number of
157 	 * banks. Also, to accommodate the new banks and registers, the MCA
158 	 * register space is moved to a new MSR range.
159 	 */
160 	smca			: 1,
161 
162 	/* Zen IFU quirk */
163 	zen_ifu_quirk		: 1,
164 
165 	/* AMD-style error thresholding banks present. */
166 	amd_threshold		: 1,
167 
168 	/* Pentium, family 5-style MCA */
169 	p5			: 1,
170 
171 	/* Centaur Winchip C6-style MCA */
172 	winchip			: 1,
173 
174 	/* SandyBridge IFU quirk */
175 	snb_ifu_quirk		: 1,
176 
177 	/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
178 	skx_repmov_quirk	: 1,
179 
180 	__reserved_0		: 55;
181 };
182 
183 extern struct mce_vendor_flags mce_flags;
184 
185 struct mce_bank {
186 	/* subevents to enable */
187 	u64			ctl;
188 
189 	/* initialise bank? */
190 	__u64 init		: 1,
191 
192 	/*
193 	 * (AMD) MCA_CONFIG[McaLsbInStatusSupported]: When set, this bit indicates
194 	 * the LSB field is found in MCA_STATUS and not in MCA_ADDR.
195 	 */
196 	lsb_in_status		: 1,
197 
198 	__reserved_1		: 62;
199 };
200 
201 DECLARE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
202 
203 enum mca_msr {
204 	MCA_CTL,
205 	MCA_STATUS,
206 	MCA_ADDR,
207 	MCA_MISC,
208 };
209 
210 /* Decide whether to add MCE record to MCE event pool or filter it out. */
211 extern bool filter_mce(struct mce *m);
212 
213 #ifdef CONFIG_X86_MCE_AMD
214 extern bool amd_filter_mce(struct mce *m);
215 bool amd_mce_usable_address(struct mce *m);
216 
217 /*
218  * If MCA_CONFIG[McaLsbInStatusSupported] is set, extract ErrAddr in bits
219  * [56:0] of MCA_STATUS, else in bits [55:0] of MCA_ADDR.
220  */
221 static __always_inline void smca_extract_err_addr(struct mce *m)
222 {
223 	u8 lsb;
224 
225 	if (!mce_flags.smca)
226 		return;
227 
228 	if (this_cpu_ptr(mce_banks_array)[m->bank].lsb_in_status) {
229 		lsb = (m->status >> 24) & 0x3f;
230 
231 		m->addr &= GENMASK_ULL(56, lsb);
232 
233 		return;
234 	}
235 
236 	lsb = (m->addr >> 56) & 0x3f;
237 
238 	m->addr &= GENMASK_ULL(55, lsb);
239 }
240 
241 #else
242 static inline bool amd_filter_mce(struct mce *m) { return false; }
243 static inline bool amd_mce_usable_address(struct mce *m) { return false; }
244 static inline void smca_extract_err_addr(struct mce *m) { }
245 #endif
246 
247 #ifdef CONFIG_X86_ANCIENT_MCE
248 void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
249 void winchip_mcheck_init(struct cpuinfo_x86 *c);
250 noinstr void pentium_machine_check(struct pt_regs *regs);
251 noinstr void winchip_machine_check(struct pt_regs *regs);
252 static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
253 #else
254 static __always_inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
255 static __always_inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
256 static __always_inline void enable_p5_mce(void) {}
257 static __always_inline void pentium_machine_check(struct pt_regs *regs) {}
258 static __always_inline void winchip_machine_check(struct pt_regs *regs) {}
259 #endif
260 
261 noinstr u64 mce_rdmsrl(u32 msr);
262 
263 static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
264 {
265 	if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
266 		switch (reg) {
267 		case MCA_CTL:	 return MSR_AMD64_SMCA_MCx_CTL(bank);
268 		case MCA_ADDR:	 return MSR_AMD64_SMCA_MCx_ADDR(bank);
269 		case MCA_MISC:	 return MSR_AMD64_SMCA_MCx_MISC(bank);
270 		case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
271 		}
272 	}
273 
274 	switch (reg) {
275 	case MCA_CTL:	 return MSR_IA32_MCx_CTL(bank);
276 	case MCA_ADDR:	 return MSR_IA32_MCx_ADDR(bank);
277 	case MCA_MISC:	 return MSR_IA32_MCx_MISC(bank);
278 	case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
279 	}
280 
281 	return 0;
282 }
283 
284 extern void (*mc_poll_banks)(void);
285 #endif /* __X86_MCE_INTERNAL_H__ */
286