xref: /linux/arch/powerpc/kernel/mce_power.c (revision 58f2c391cc0560231d7636c39d31b1b26c9396b7)
1 /*
2  * Machine check exception handling CPU-side for power7 and power8
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright 2013 IBM Corporation
19  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20  */
21 
22 #undef DEBUG
23 #define pr_fmt(fmt) "mce_power: " fmt
24 
25 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <asm/mmu.h>
28 #include <asm/mce.h>
29 #include <asm/machdep.h>
30 
31 static void flush_tlb_206(unsigned int num_sets, unsigned int action)
32 {
33 	unsigned long rb;
34 	unsigned int i;
35 
36 	switch (action) {
37 	case TLB_INVAL_SCOPE_GLOBAL:
38 		rb = TLBIEL_INVAL_SET;
39 		break;
40 	case TLB_INVAL_SCOPE_LPID:
41 		rb = TLBIEL_INVAL_SET_LPID;
42 		break;
43 	default:
44 		BUG();
45 		break;
46 	}
47 
48 	asm volatile("ptesync" : : : "memory");
49 	for (i = 0; i < num_sets; i++) {
50 		asm volatile("tlbiel %0" : : "r" (rb));
51 		rb += 1 << TLBIEL_INVAL_SET_SHIFT;
52 	}
53 	asm volatile("ptesync" : : : "memory");
54 }
55 
56 static void flush_tlb_300(unsigned int num_sets, unsigned int action)
57 {
58 	unsigned long rb;
59 	unsigned int i;
60 	unsigned int r;
61 
62 	switch (action) {
63 	case TLB_INVAL_SCOPE_GLOBAL:
64 		rb = TLBIEL_INVAL_SET;
65 		break;
66 	case TLB_INVAL_SCOPE_LPID:
67 		rb = TLBIEL_INVAL_SET_LPID;
68 		break;
69 	default:
70 		BUG();
71 		break;
72 	}
73 
74 	asm volatile("ptesync" : : : "memory");
75 
76 	if (early_radix_enabled())
77 		r = 1;
78 	else
79 		r = 0;
80 
81 	/*
82 	 * First flush table/PWC caches with set 0, then flush the
83 	 * rest of the sets, partition scope. Radix must then do it
84 	 * all again with process scope. Hash just has to flush
85 	 * process table.
86 	 */
87 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
88 			"r"(rb), "r"(0), "i"(2), "i"(0), "r"(r));
89 	for (i = 1; i < num_sets; i++) {
90 		unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
91 
92 		asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
93 				"r"(rb+set), "r"(0), "i"(2), "i"(0), "r"(r));
94 	}
95 
96 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
97 			"r"(rb), "r"(0), "i"(2), "i"(1), "r"(r));
98 	if (early_radix_enabled()) {
99 		for (i = 1; i < num_sets; i++) {
100 			unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
101 
102 			asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
103 				"r"(rb+set), "r"(0), "i"(2), "i"(1), "r"(r));
104 		}
105 	}
106 
107 	asm volatile("ptesync" : : : "memory");
108 }
109 
110 /*
111  * Generic routines to flush TLB on POWER processors. These routines
112  * are used as flush_tlb hook in the cpu_spec.
113  *
114  * action => TLB_INVAL_SCOPE_GLOBAL:  Invalidate all TLBs.
115  *	     TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
116  */
117 void __flush_tlb_power7(unsigned int action)
118 {
119 	flush_tlb_206(POWER7_TLB_SETS, action);
120 }
121 
122 void __flush_tlb_power8(unsigned int action)
123 {
124 	flush_tlb_206(POWER8_TLB_SETS, action);
125 }
126 
127 void __flush_tlb_power9(unsigned int action)
128 {
129 	unsigned int num_sets;
130 
131 	if (radix_enabled())
132 		num_sets = POWER9_TLB_SETS_RADIX;
133 	else
134 		num_sets = POWER9_TLB_SETS_HASH;
135 
136 	flush_tlb_300(num_sets, action);
137 }
138 
139 
140 /* flush SLBs and reload */
141 #ifdef CONFIG_PPC_STD_MMU_64
142 static void flush_and_reload_slb(void)
143 {
144 	struct slb_shadow *slb;
145 	unsigned long i, n;
146 
147 	/* Invalidate all SLBs */
148 	asm volatile("slbmte %0,%0; slbia" : : "r" (0));
149 
150 #ifdef CONFIG_KVM_BOOK3S_HANDLER
151 	/*
152 	 * If machine check is hit when in guest or in transition, we will
153 	 * only flush the SLBs and continue.
154 	 */
155 	if (get_paca()->kvm_hstate.in_guest)
156 		return;
157 #endif
158 
159 	/* For host kernel, reload the SLBs from shadow SLB buffer. */
160 	slb = get_slb_shadow();
161 	if (!slb)
162 		return;
163 
164 	n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
165 
166 	/* Load up the SLB entries from shadow SLB */
167 	for (i = 0; i < n; i++) {
168 		unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
169 		unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
170 
171 		rb = (rb & ~0xFFFul) | i;
172 		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
173 	}
174 }
175 #endif
176 
177 static void flush_erat(void)
178 {
179 	asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
180 }
181 
182 #define MCE_FLUSH_SLB 1
183 #define MCE_FLUSH_TLB 2
184 #define MCE_FLUSH_ERAT 3
185 
186 static int mce_flush(int what)
187 {
188 #ifdef CONFIG_PPC_STD_MMU_64
189 	if (what == MCE_FLUSH_SLB) {
190 		flush_and_reload_slb();
191 		return 1;
192 	}
193 #endif
194 	if (what == MCE_FLUSH_ERAT) {
195 		flush_erat();
196 		return 1;
197 	}
198 	if (what == MCE_FLUSH_TLB) {
199 		if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
200 			cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
201 			return 1;
202 		}
203 	}
204 
205 	return 0;
206 }
207 
208 #define SRR1_MC_LOADSTORE(srr1)	((srr1) & PPC_BIT(42))
209 
210 struct mce_ierror_table {
211 	unsigned long srr1_mask;
212 	unsigned long srr1_value;
213 	bool nip_valid; /* nip is a valid indicator of faulting address */
214 	unsigned int error_type;
215 	unsigned int error_subtype;
216 	unsigned int initiator;
217 	unsigned int severity;
218 };
219 
220 static const struct mce_ierror_table mce_p7_ierror_table[] = {
221 { 0x00000000001c0000, 0x0000000000040000, true,
222   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
223   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
224 { 0x00000000001c0000, 0x0000000000080000, true,
225   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
226   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
227 { 0x00000000001c0000, 0x00000000000c0000, true,
228   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
229   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
230 { 0x00000000001c0000, 0x0000000000100000, true,
231   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
232   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
233 { 0x00000000001c0000, 0x0000000000140000, true,
234   MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
235   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
236 { 0x00000000001c0000, 0x0000000000180000, true,
237   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
238   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
239 { 0x00000000001c0000, 0x00000000001c0000, true,
240   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
241   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
242 { 0, 0, 0, 0, 0, 0 } };
243 
244 static const struct mce_ierror_table mce_p8_ierror_table[] = {
245 { 0x00000000081c0000, 0x0000000000040000, true,
246   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
247   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
248 { 0x00000000081c0000, 0x0000000000080000, true,
249   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
250   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
251 { 0x00000000081c0000, 0x00000000000c0000, true,
252   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
253   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
254 { 0x00000000081c0000, 0x0000000000100000, true,
255   MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
256   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
257 { 0x00000000081c0000, 0x0000000000140000, true,
258   MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
259   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
260 { 0x00000000081c0000, 0x0000000000180000, true,
261   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
262   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
263 { 0x00000000081c0000, 0x00000000001c0000, true,
264   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
265   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
266 { 0x00000000081c0000, 0x0000000008000000, true,
267   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
268   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
269 { 0x00000000081c0000, 0x0000000008040000, true,
270   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
271   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
272 { 0, 0, 0, 0, 0, 0 } };
273 
274 static const struct mce_ierror_table mce_p9_ierror_table[] = {
275 { 0x00000000081c0000, 0x0000000000040000, true,
276   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
277   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
278 { 0x00000000081c0000, 0x0000000000080000, true,
279   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
280   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
281 { 0x00000000081c0000, 0x00000000000c0000, true,
282   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
283   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
284 { 0x00000000081c0000, 0x0000000000100000, true,
285   MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
286   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
287 { 0x00000000081c0000, 0x0000000000140000, true,
288   MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
289   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
290 { 0x00000000081c0000, 0x0000000000180000, true,
291   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
292   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
293 { 0x00000000081c0000, 0x00000000001c0000, true,
294   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_IFETCH_FOREIGN,
295   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
296 { 0x00000000081c0000, 0x0000000008000000, true,
297   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
298   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
299 { 0x00000000081c0000, 0x0000000008040000, true,
300   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
301   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
302 { 0x00000000081c0000, 0x00000000080c0000, true,
303   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_IFETCH,
304   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
305 { 0x00000000081c0000, 0x0000000008100000, true,
306   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH,
307   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
308 { 0x00000000081c0000, 0x0000000008140000, false,
309   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_STORE,
310   MCE_INITIATOR_CPU,  MCE_SEV_FATAL, }, /* ASYNC is fatal */
311 { 0x00000000081c0000, 0x0000000008180000, false,
312   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
313   MCE_INITIATOR_CPU,  MCE_SEV_FATAL, }, /* ASYNC is fatal */
314 { 0x00000000081c0000, 0x00000000081c0000, true,
315   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
316   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
317 { 0, 0, 0, 0, 0, 0 } };
318 
319 struct mce_derror_table {
320 	unsigned long dsisr_value;
321 	bool dar_valid; /* dar is a valid indicator of faulting address */
322 	unsigned int error_type;
323 	unsigned int error_subtype;
324 	unsigned int initiator;
325 	unsigned int severity;
326 };
327 
328 static const struct mce_derror_table mce_p7_derror_table[] = {
329 { 0x00008000, false,
330   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_LOAD_STORE,
331   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
332 { 0x00004000, true,
333   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
334   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
335 { 0x00000800, true,
336   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
337   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
338 { 0x00000400, true,
339   MCE_ERROR_TYPE_TLB,  MCE_TLB_ERROR_MULTIHIT,
340   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
341 { 0x00000100, true,
342   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_PARITY,
343   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
344 { 0x00000080, true,
345   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_MULTIHIT,
346   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
347 { 0x00000040, true,
348   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
349   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
350 { 0, false, 0, 0, 0, 0 } };
351 
352 static const struct mce_derror_table mce_p8_derror_table[] = {
353 { 0x00008000, false,
354   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_LOAD_STORE,
355   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
356 { 0x00004000, true,
357   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
358   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
359 { 0x00002000, true,
360   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
361   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
362 { 0x00001000, true,
363   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
364   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
365 { 0x00000800, true,
366   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
367   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
368 { 0x00000400, true,
369   MCE_ERROR_TYPE_TLB,  MCE_TLB_ERROR_MULTIHIT,
370   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
371 { 0x00000200, true,
372   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
373   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
374 { 0x00000100, true,
375   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_PARITY,
376   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
377 { 0x00000080, true,
378   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_MULTIHIT,
379   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
380 { 0, false, 0, 0, 0, 0 } };
381 
382 static const struct mce_derror_table mce_p9_derror_table[] = {
383 { 0x00008000, false,
384   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_LOAD_STORE,
385   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
386 { 0x00004000, true,
387   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
388   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
389 { 0x00002000, true,
390   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
391   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
392 { 0x00001000, true,
393   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
394   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
395 { 0x00000800, true,
396   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
397   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
398 { 0x00000400, true,
399   MCE_ERROR_TYPE_TLB,  MCE_TLB_ERROR_MULTIHIT,
400   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
401 { 0x00000200, false,
402   MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
403   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
404 { 0x00000100, true,
405   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_PARITY,
406   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
407 { 0x00000080, true,
408   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_MULTIHIT,
409   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
410 { 0x00000040, true,
411   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_LOAD,
412   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
413 { 0x00000020, false,
414   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
415   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
416 { 0x00000010, false,
417   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
418   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
419 { 0x00000008, false,
420   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_LOAD_STORE_FOREIGN,
421   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
422 { 0, false, 0, 0, 0, 0 } };
423 
424 static int mce_handle_ierror(struct pt_regs *regs,
425 		const struct mce_ierror_table table[],
426 		struct mce_error_info *mce_err, uint64_t *addr)
427 {
428 	uint64_t srr1 = regs->msr;
429 	int handled = 0;
430 	int i;
431 
432 	*addr = 0;
433 
434 	for (i = 0; table[i].srr1_mask; i++) {
435 		if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
436 			continue;
437 
438 		/* attempt to correct the error */
439 		switch (table[i].error_type) {
440 		case MCE_ERROR_TYPE_SLB:
441 			handled = mce_flush(MCE_FLUSH_SLB);
442 			break;
443 		case MCE_ERROR_TYPE_ERAT:
444 			handled = mce_flush(MCE_FLUSH_ERAT);
445 			break;
446 		case MCE_ERROR_TYPE_TLB:
447 			handled = mce_flush(MCE_FLUSH_TLB);
448 			break;
449 		}
450 
451 		/* now fill in mce_error_info */
452 		mce_err->error_type = table[i].error_type;
453 		switch (table[i].error_type) {
454 		case MCE_ERROR_TYPE_UE:
455 			mce_err->u.ue_error_type = table[i].error_subtype;
456 			break;
457 		case MCE_ERROR_TYPE_SLB:
458 			mce_err->u.slb_error_type = table[i].error_subtype;
459 			break;
460 		case MCE_ERROR_TYPE_ERAT:
461 			mce_err->u.erat_error_type = table[i].error_subtype;
462 			break;
463 		case MCE_ERROR_TYPE_TLB:
464 			mce_err->u.tlb_error_type = table[i].error_subtype;
465 			break;
466 		case MCE_ERROR_TYPE_USER:
467 			mce_err->u.user_error_type = table[i].error_subtype;
468 			break;
469 		case MCE_ERROR_TYPE_RA:
470 			mce_err->u.ra_error_type = table[i].error_subtype;
471 			break;
472 		case MCE_ERROR_TYPE_LINK:
473 			mce_err->u.link_error_type = table[i].error_subtype;
474 			break;
475 		}
476 		mce_err->severity = table[i].severity;
477 		mce_err->initiator = table[i].initiator;
478 		if (table[i].nip_valid)
479 			*addr = regs->nip;
480 		return handled;
481 	}
482 
483 	mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
484 	mce_err->severity = MCE_SEV_ERROR_SYNC;
485 	mce_err->initiator = MCE_INITIATOR_CPU;
486 
487 	return 0;
488 }
489 
490 static int mce_handle_derror(struct pt_regs *regs,
491 		const struct mce_derror_table table[],
492 		struct mce_error_info *mce_err, uint64_t *addr)
493 {
494 	uint64_t dsisr = regs->dsisr;
495 	int handled = 0;
496 	int found = 0;
497 	int i;
498 
499 	*addr = 0;
500 
501 	for (i = 0; table[i].dsisr_value; i++) {
502 		if (!(dsisr & table[i].dsisr_value))
503 			continue;
504 
505 		/* attempt to correct the error */
506 		switch (table[i].error_type) {
507 		case MCE_ERROR_TYPE_SLB:
508 			if (mce_flush(MCE_FLUSH_SLB))
509 				handled = 1;
510 			break;
511 		case MCE_ERROR_TYPE_ERAT:
512 			if (mce_flush(MCE_FLUSH_ERAT))
513 				handled = 1;
514 			break;
515 		case MCE_ERROR_TYPE_TLB:
516 			if (mce_flush(MCE_FLUSH_TLB))
517 				handled = 1;
518 			break;
519 		}
520 
521 		/*
522 		 * Attempt to handle multiple conditions, but only return
523 		 * one. Ensure uncorrectable errors are first in the table
524 		 * to match.
525 		 */
526 		if (found)
527 			continue;
528 
529 		/* now fill in mce_error_info */
530 		mce_err->error_type = table[i].error_type;
531 		switch (table[i].error_type) {
532 		case MCE_ERROR_TYPE_UE:
533 			mce_err->u.ue_error_type = table[i].error_subtype;
534 			break;
535 		case MCE_ERROR_TYPE_SLB:
536 			mce_err->u.slb_error_type = table[i].error_subtype;
537 			break;
538 		case MCE_ERROR_TYPE_ERAT:
539 			mce_err->u.erat_error_type = table[i].error_subtype;
540 			break;
541 		case MCE_ERROR_TYPE_TLB:
542 			mce_err->u.tlb_error_type = table[i].error_subtype;
543 			break;
544 		case MCE_ERROR_TYPE_USER:
545 			mce_err->u.user_error_type = table[i].error_subtype;
546 			break;
547 		case MCE_ERROR_TYPE_RA:
548 			mce_err->u.ra_error_type = table[i].error_subtype;
549 			break;
550 		case MCE_ERROR_TYPE_LINK:
551 			mce_err->u.link_error_type = table[i].error_subtype;
552 			break;
553 		}
554 		mce_err->severity = table[i].severity;
555 		mce_err->initiator = table[i].initiator;
556 		if (table[i].dar_valid)
557 			*addr = regs->dar;
558 
559 		found = 1;
560 	}
561 
562 	if (found)
563 		return handled;
564 
565 	mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
566 	mce_err->severity = MCE_SEV_ERROR_SYNC;
567 	mce_err->initiator = MCE_INITIATOR_CPU;
568 
569 	return 0;
570 }
571 
572 static long mce_handle_ue_error(struct pt_regs *regs)
573 {
574 	long handled = 0;
575 
576 	/*
577 	 * On specific SCOM read via MMIO we may get a machine check
578 	 * exception with SRR0 pointing inside opal. If that is the
579 	 * case OPAL may have recovery address to re-read SCOM data in
580 	 * different way and hence we can recover from this MC.
581 	 */
582 
583 	if (ppc_md.mce_check_early_recovery) {
584 		if (ppc_md.mce_check_early_recovery(regs))
585 			handled = 1;
586 	}
587 	return handled;
588 }
589 
590 static long mce_handle_error(struct pt_regs *regs,
591 		const struct mce_derror_table dtable[],
592 		const struct mce_ierror_table itable[])
593 {
594 	struct mce_error_info mce_err = { 0 };
595 	uint64_t addr;
596 	uint64_t srr1 = regs->msr;
597 	long handled;
598 
599 	if (SRR1_MC_LOADSTORE(srr1))
600 		handled = mce_handle_derror(regs, dtable, &mce_err, &addr);
601 	else
602 		handled = mce_handle_ierror(regs, itable, &mce_err, &addr);
603 
604 	if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
605 		handled = mce_handle_ue_error(regs);
606 
607 	save_mce_event(regs, handled, &mce_err, regs->nip, addr);
608 
609 	return handled;
610 }
611 
612 long __machine_check_early_realmode_p7(struct pt_regs *regs)
613 {
614 	/* P7 DD1 leaves top bits of DSISR undefined */
615 	regs->dsisr &= 0x0000ffff;
616 
617 	return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
618 }
619 
620 long __machine_check_early_realmode_p8(struct pt_regs *regs)
621 {
622 	return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
623 }
624 
625 long __machine_check_early_realmode_p9(struct pt_regs *regs)
626 {
627 	/*
628 	 * On POWER9 DD2.1 and below, it's possible to get a machine check
629 	 * caused by a paste instruction where only DSISR bit 25 is set. This
630 	 * will result in the MCE handler seeing an unknown event and the kernel
631 	 * crashing. An MCE that occurs like this is spurious, so we don't need
632 	 * to do anything in terms of servicing it. If there is something that
633 	 * needs to be serviced, the CPU will raise the MCE again with the
634 	 * correct DSISR so that it can be serviced properly. So detect this
635 	 * case and mark it as handled.
636 	 */
637 	if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
638 		return 1;
639 
640 	return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
641 }
642