xref: /linux/arch/powerpc/kernel/mce_power.c (revision ba41e1e1ccb9771ce41a3b8e2121f95486e76ac9)
1 /*
2  * Machine check exception handling CPU-side for power7 and power8
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright 2013 IBM Corporation
19  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20  */
21 
22 #undef DEBUG
23 #define pr_fmt(fmt) "mce_power: " fmt
24 
25 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <asm/mmu.h>
28 #include <asm/mce.h>
29 #include <asm/machdep.h>
30 #include <asm/pgtable.h>
31 #include <asm/pte-walk.h>
32 #include <asm/sstep.h>
33 #include <asm/exception-64s.h>
34 
35 /*
36  * Convert an address related to an mm to a PFN. NOTE: we are in real
37  * mode, we could potentially race with page table updates.
38  */
39 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
40 {
41 	pte_t *ptep;
42 	unsigned long flags;
43 	struct mm_struct *mm;
44 
45 	if (user_mode(regs))
46 		mm = current->mm;
47 	else
48 		mm = &init_mm;
49 
50 	local_irq_save(flags);
51 	if (mm == current->mm)
52 		ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
53 	else
54 		ptep = find_init_mm_pte(addr, NULL);
55 	local_irq_restore(flags);
56 	if (!ptep || pte_special(*ptep))
57 		return ULONG_MAX;
58 	return pte_pfn(*ptep);
59 }
60 
61 static void flush_tlb_206(unsigned int num_sets, unsigned int action)
62 {
63 	unsigned long rb;
64 	unsigned int i;
65 
66 	switch (action) {
67 	case TLB_INVAL_SCOPE_GLOBAL:
68 		rb = TLBIEL_INVAL_SET;
69 		break;
70 	case TLB_INVAL_SCOPE_LPID:
71 		rb = TLBIEL_INVAL_SET_LPID;
72 		break;
73 	default:
74 		BUG();
75 		break;
76 	}
77 
78 	asm volatile("ptesync" : : : "memory");
79 	for (i = 0; i < num_sets; i++) {
80 		asm volatile("tlbiel %0" : : "r" (rb));
81 		rb += 1 << TLBIEL_INVAL_SET_SHIFT;
82 	}
83 	asm volatile("ptesync" : : : "memory");
84 }
85 
86 static void flush_tlb_300(unsigned int num_sets, unsigned int action)
87 {
88 	unsigned long rb;
89 	unsigned int i;
90 	unsigned int r;
91 
92 	switch (action) {
93 	case TLB_INVAL_SCOPE_GLOBAL:
94 		rb = TLBIEL_INVAL_SET;
95 		break;
96 	case TLB_INVAL_SCOPE_LPID:
97 		rb = TLBIEL_INVAL_SET_LPID;
98 		break;
99 	default:
100 		BUG();
101 		break;
102 	}
103 
104 	asm volatile("ptesync" : : : "memory");
105 
106 	if (early_radix_enabled())
107 		r = 1;
108 	else
109 		r = 0;
110 
111 	/*
112 	 * First flush table/PWC caches with set 0, then flush the
113 	 * rest of the sets, partition scope. Radix must then do it
114 	 * all again with process scope. Hash just has to flush
115 	 * process table.
116 	 */
117 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
118 			"r"(rb), "r"(0), "i"(2), "i"(0), "r"(r));
119 	for (i = 1; i < num_sets; i++) {
120 		unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
121 
122 		asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
123 				"r"(rb+set), "r"(0), "i"(2), "i"(0), "r"(r));
124 	}
125 
126 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
127 			"r"(rb), "r"(0), "i"(2), "i"(1), "r"(r));
128 	if (early_radix_enabled()) {
129 		for (i = 1; i < num_sets; i++) {
130 			unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
131 
132 			asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
133 				"r"(rb+set), "r"(0), "i"(2), "i"(1), "r"(r));
134 		}
135 	}
136 
137 	asm volatile("ptesync" : : : "memory");
138 }
139 
140 /*
141  * Generic routines to flush TLB on POWER processors. These routines
142  * are used as flush_tlb hook in the cpu_spec.
143  *
144  * action => TLB_INVAL_SCOPE_GLOBAL:  Invalidate all TLBs.
145  *	     TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
146  */
147 void __flush_tlb_power7(unsigned int action)
148 {
149 	flush_tlb_206(POWER7_TLB_SETS, action);
150 }
151 
152 void __flush_tlb_power8(unsigned int action)
153 {
154 	flush_tlb_206(POWER8_TLB_SETS, action);
155 }
156 
157 void __flush_tlb_power9(unsigned int action)
158 {
159 	unsigned int num_sets;
160 
161 	if (early_radix_enabled())
162 		num_sets = POWER9_TLB_SETS_RADIX;
163 	else
164 		num_sets = POWER9_TLB_SETS_HASH;
165 
166 	flush_tlb_300(num_sets, action);
167 }
168 
169 
170 /* flush SLBs and reload */
171 #ifdef CONFIG_PPC_STD_MMU_64
172 static void flush_and_reload_slb(void)
173 {
174 	struct slb_shadow *slb;
175 	unsigned long i, n;
176 
177 	/* Invalidate all SLBs */
178 	asm volatile("slbmte %0,%0; slbia" : : "r" (0));
179 
180 #ifdef CONFIG_KVM_BOOK3S_HANDLER
181 	/*
182 	 * If machine check is hit when in guest or in transition, we will
183 	 * only flush the SLBs and continue.
184 	 */
185 	if (get_paca()->kvm_hstate.in_guest)
186 		return;
187 #endif
188 
189 	/* For host kernel, reload the SLBs from shadow SLB buffer. */
190 	slb = get_slb_shadow();
191 	if (!slb)
192 		return;
193 
194 	n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
195 
196 	/* Load up the SLB entries from shadow SLB */
197 	for (i = 0; i < n; i++) {
198 		unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
199 		unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
200 
201 		rb = (rb & ~0xFFFul) | i;
202 		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
203 	}
204 }
205 #endif
206 
207 static void flush_erat(void)
208 {
209 	asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
210 }
211 
212 #define MCE_FLUSH_SLB 1
213 #define MCE_FLUSH_TLB 2
214 #define MCE_FLUSH_ERAT 3
215 
216 static int mce_flush(int what)
217 {
218 #ifdef CONFIG_PPC_STD_MMU_64
219 	if (what == MCE_FLUSH_SLB) {
220 		flush_and_reload_slb();
221 		return 1;
222 	}
223 #endif
224 	if (what == MCE_FLUSH_ERAT) {
225 		flush_erat();
226 		return 1;
227 	}
228 	if (what == MCE_FLUSH_TLB) {
229 		if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
230 			cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
231 			return 1;
232 		}
233 	}
234 
235 	return 0;
236 }
237 
238 #define SRR1_MC_LOADSTORE(srr1)	((srr1) & PPC_BIT(42))
239 
240 struct mce_ierror_table {
241 	unsigned long srr1_mask;
242 	unsigned long srr1_value;
243 	bool nip_valid; /* nip is a valid indicator of faulting address */
244 	unsigned int error_type;
245 	unsigned int error_subtype;
246 	unsigned int initiator;
247 	unsigned int severity;
248 };
249 
250 static const struct mce_ierror_table mce_p7_ierror_table[] = {
251 { 0x00000000001c0000, 0x0000000000040000, true,
252   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
253   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
254 { 0x00000000001c0000, 0x0000000000080000, true,
255   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
256   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
257 { 0x00000000001c0000, 0x00000000000c0000, true,
258   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
259   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
260 { 0x00000000001c0000, 0x0000000000100000, true,
261   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
262   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
263 { 0x00000000001c0000, 0x0000000000140000, true,
264   MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
265   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
266 { 0x00000000001c0000, 0x0000000000180000, true,
267   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
268   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
269 { 0x00000000001c0000, 0x00000000001c0000, true,
270   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
271   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
272 { 0, 0, 0, 0, 0, 0 } };
273 
274 static const struct mce_ierror_table mce_p8_ierror_table[] = {
275 { 0x00000000081c0000, 0x0000000000040000, true,
276   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
277   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
278 { 0x00000000081c0000, 0x0000000000080000, true,
279   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
280   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
281 { 0x00000000081c0000, 0x00000000000c0000, true,
282   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
283   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
284 { 0x00000000081c0000, 0x0000000000100000, true,
285   MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
286   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
287 { 0x00000000081c0000, 0x0000000000140000, true,
288   MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
289   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
290 { 0x00000000081c0000, 0x0000000000180000, true,
291   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
292   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
293 { 0x00000000081c0000, 0x00000000001c0000, true,
294   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
295   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
296 { 0x00000000081c0000, 0x0000000008000000, true,
297   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
298   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
299 { 0x00000000081c0000, 0x0000000008040000, true,
300   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
301   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
302 { 0, 0, 0, 0, 0, 0 } };
303 
304 static const struct mce_ierror_table mce_p9_ierror_table[] = {
305 { 0x00000000081c0000, 0x0000000000040000, true,
306   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_IFETCH,
307   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
308 { 0x00000000081c0000, 0x0000000000080000, true,
309   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
310   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
311 { 0x00000000081c0000, 0x00000000000c0000, true,
312   MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
313   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
314 { 0x00000000081c0000, 0x0000000000100000, true,
315   MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
316   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
317 { 0x00000000081c0000, 0x0000000000140000, true,
318   MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
319   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
320 { 0x00000000081c0000, 0x0000000000180000, true,
321   MCE_ERROR_TYPE_UE,  MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
322   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
323 { 0x00000000081c0000, 0x00000000001c0000, true,
324   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_IFETCH_FOREIGN,
325   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
326 { 0x00000000081c0000, 0x0000000008000000, true,
327   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
328   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
329 { 0x00000000081c0000, 0x0000000008040000, true,
330   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
331   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
332 { 0x00000000081c0000, 0x00000000080c0000, true,
333   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_IFETCH,
334   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
335 { 0x00000000081c0000, 0x0000000008100000, true,
336   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH,
337   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
338 { 0x00000000081c0000, 0x0000000008140000, false,
339   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_STORE,
340   MCE_INITIATOR_CPU,  MCE_SEV_FATAL, }, /* ASYNC is fatal */
341 { 0x00000000081c0000, 0x0000000008180000, false,
342   MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
343   MCE_INITIATOR_CPU,  MCE_SEV_FATAL, }, /* ASYNC is fatal */
344 { 0x00000000081c0000, 0x00000000081c0000, true,
345   MCE_ERROR_TYPE_RA,  MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
346   MCE_INITIATOR_CPU,  MCE_SEV_ERROR_SYNC, },
347 { 0, 0, 0, 0, 0, 0 } };
348 
349 struct mce_derror_table {
350 	unsigned long dsisr_value;
351 	bool dar_valid; /* dar is a valid indicator of faulting address */
352 	unsigned int error_type;
353 	unsigned int error_subtype;
354 	unsigned int initiator;
355 	unsigned int severity;
356 };
357 
358 static const struct mce_derror_table mce_p7_derror_table[] = {
359 { 0x00008000, false,
360   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_LOAD_STORE,
361   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
362 { 0x00004000, true,
363   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
364   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
365 { 0x00000800, true,
366   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
367   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
368 { 0x00000400, true,
369   MCE_ERROR_TYPE_TLB,  MCE_TLB_ERROR_MULTIHIT,
370   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
371 { 0x00000100, true,
372   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_PARITY,
373   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
374 { 0x00000080, true,
375   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_MULTIHIT,
376   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
377 { 0x00000040, true,
378   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
379   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
380 { 0, false, 0, 0, 0, 0 } };
381 
382 static const struct mce_derror_table mce_p8_derror_table[] = {
383 { 0x00008000, false,
384   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_LOAD_STORE,
385   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
386 { 0x00004000, true,
387   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
388   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
389 { 0x00002000, true,
390   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
391   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
392 { 0x00001000, true,
393   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
394   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
395 { 0x00000800, true,
396   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
397   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
398 { 0x00000400, true,
399   MCE_ERROR_TYPE_TLB,  MCE_TLB_ERROR_MULTIHIT,
400   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
401 { 0x00000200, true,
402   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
403   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
404 { 0x00000100, true,
405   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_PARITY,
406   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
407 { 0x00000080, true,
408   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_MULTIHIT,
409   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
410 { 0, false, 0, 0, 0, 0 } };
411 
412 static const struct mce_derror_table mce_p9_derror_table[] = {
413 { 0x00008000, false,
414   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_LOAD_STORE,
415   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
416 { 0x00004000, true,
417   MCE_ERROR_TYPE_UE,   MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
418   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
419 { 0x00002000, true,
420   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
421   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
422 { 0x00001000, true,
423   MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
424   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
425 { 0x00000800, true,
426   MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
427   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
428 { 0x00000400, true,
429   MCE_ERROR_TYPE_TLB,  MCE_TLB_ERROR_MULTIHIT,
430   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
431 { 0x00000200, false,
432   MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
433   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
434 { 0x00000100, true,
435   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_PARITY,
436   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
437 { 0x00000080, true,
438   MCE_ERROR_TYPE_SLB,  MCE_SLB_ERROR_MULTIHIT,
439   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
440 { 0x00000040, true,
441   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_LOAD,
442   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
443 { 0x00000020, false,
444   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
445   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
446 { 0x00000010, false,
447   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
448   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
449 { 0x00000008, false,
450   MCE_ERROR_TYPE_RA,   MCE_RA_ERROR_LOAD_STORE_FOREIGN,
451   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
452 { 0, false, 0, 0, 0, 0 } };
453 
454 static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
455 					uint64_t *phys_addr)
456 {
457 	/*
458 	 * Carefully look at the NIP to determine
459 	 * the instruction to analyse. Reading the NIP
460 	 * in real-mode is tricky and can lead to recursive
461 	 * faults
462 	 */
463 	int instr;
464 	unsigned long pfn, instr_addr;
465 	struct instruction_op op;
466 	struct pt_regs tmp = *regs;
467 
468 	pfn = addr_to_pfn(regs, regs->nip);
469 	if (pfn != ULONG_MAX) {
470 		instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
471 		instr = *(unsigned int *)(instr_addr);
472 		if (!analyse_instr(&op, &tmp, instr)) {
473 			pfn = addr_to_pfn(regs, op.ea);
474 			*addr = op.ea;
475 			*phys_addr = (pfn << PAGE_SHIFT);
476 			return 0;
477 		}
478 		/*
479 		 * analyse_instr() might fail if the instruction
480 		 * is not a load/store, although this is unexpected
481 		 * for load/store errors or if we got the NIP
482 		 * wrong
483 		 */
484 	}
485 	*addr = 0;
486 	return -1;
487 }
488 
489 static int mce_handle_ierror(struct pt_regs *regs,
490 		const struct mce_ierror_table table[],
491 		struct mce_error_info *mce_err, uint64_t *addr)
492 {
493 	uint64_t srr1 = regs->msr;
494 	int handled = 0;
495 	int i;
496 
497 	*addr = 0;
498 
499 	for (i = 0; table[i].srr1_mask; i++) {
500 		if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
501 			continue;
502 
503 		/* attempt to correct the error */
504 		switch (table[i].error_type) {
505 		case MCE_ERROR_TYPE_SLB:
506 			handled = mce_flush(MCE_FLUSH_SLB);
507 			break;
508 		case MCE_ERROR_TYPE_ERAT:
509 			handled = mce_flush(MCE_FLUSH_ERAT);
510 			break;
511 		case MCE_ERROR_TYPE_TLB:
512 			handled = mce_flush(MCE_FLUSH_TLB);
513 			break;
514 		}
515 
516 		/* now fill in mce_error_info */
517 		mce_err->error_type = table[i].error_type;
518 		switch (table[i].error_type) {
519 		case MCE_ERROR_TYPE_UE:
520 			mce_err->u.ue_error_type = table[i].error_subtype;
521 			break;
522 		case MCE_ERROR_TYPE_SLB:
523 			mce_err->u.slb_error_type = table[i].error_subtype;
524 			break;
525 		case MCE_ERROR_TYPE_ERAT:
526 			mce_err->u.erat_error_type = table[i].error_subtype;
527 			break;
528 		case MCE_ERROR_TYPE_TLB:
529 			mce_err->u.tlb_error_type = table[i].error_subtype;
530 			break;
531 		case MCE_ERROR_TYPE_USER:
532 			mce_err->u.user_error_type = table[i].error_subtype;
533 			break;
534 		case MCE_ERROR_TYPE_RA:
535 			mce_err->u.ra_error_type = table[i].error_subtype;
536 			break;
537 		case MCE_ERROR_TYPE_LINK:
538 			mce_err->u.link_error_type = table[i].error_subtype;
539 			break;
540 		}
541 		mce_err->severity = table[i].severity;
542 		mce_err->initiator = table[i].initiator;
543 		if (table[i].nip_valid)
544 			*addr = regs->nip;
545 		return handled;
546 	}
547 
548 	mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
549 	mce_err->severity = MCE_SEV_ERROR_SYNC;
550 	mce_err->initiator = MCE_INITIATOR_CPU;
551 
552 	return 0;
553 }
554 
555 static int mce_handle_derror(struct pt_regs *regs,
556 		const struct mce_derror_table table[],
557 		struct mce_error_info *mce_err, uint64_t *addr,
558 		uint64_t *phys_addr)
559 {
560 	uint64_t dsisr = regs->dsisr;
561 	int handled = 0;
562 	int found = 0;
563 	int i;
564 
565 	*addr = 0;
566 
567 	for (i = 0; table[i].dsisr_value; i++) {
568 		if (!(dsisr & table[i].dsisr_value))
569 			continue;
570 
571 		/* attempt to correct the error */
572 		switch (table[i].error_type) {
573 		case MCE_ERROR_TYPE_SLB:
574 			if (mce_flush(MCE_FLUSH_SLB))
575 				handled = 1;
576 			break;
577 		case MCE_ERROR_TYPE_ERAT:
578 			if (mce_flush(MCE_FLUSH_ERAT))
579 				handled = 1;
580 			break;
581 		case MCE_ERROR_TYPE_TLB:
582 			if (mce_flush(MCE_FLUSH_TLB))
583 				handled = 1;
584 			break;
585 		}
586 
587 		/*
588 		 * Attempt to handle multiple conditions, but only return
589 		 * one. Ensure uncorrectable errors are first in the table
590 		 * to match.
591 		 */
592 		if (found)
593 			continue;
594 
595 		/* now fill in mce_error_info */
596 		mce_err->error_type = table[i].error_type;
597 		switch (table[i].error_type) {
598 		case MCE_ERROR_TYPE_UE:
599 			mce_err->u.ue_error_type = table[i].error_subtype;
600 			break;
601 		case MCE_ERROR_TYPE_SLB:
602 			mce_err->u.slb_error_type = table[i].error_subtype;
603 			break;
604 		case MCE_ERROR_TYPE_ERAT:
605 			mce_err->u.erat_error_type = table[i].error_subtype;
606 			break;
607 		case MCE_ERROR_TYPE_TLB:
608 			mce_err->u.tlb_error_type = table[i].error_subtype;
609 			break;
610 		case MCE_ERROR_TYPE_USER:
611 			mce_err->u.user_error_type = table[i].error_subtype;
612 			break;
613 		case MCE_ERROR_TYPE_RA:
614 			mce_err->u.ra_error_type = table[i].error_subtype;
615 			break;
616 		case MCE_ERROR_TYPE_LINK:
617 			mce_err->u.link_error_type = table[i].error_subtype;
618 			break;
619 		}
620 		mce_err->severity = table[i].severity;
621 		mce_err->initiator = table[i].initiator;
622 		if (table[i].dar_valid)
623 			*addr = regs->dar;
624 		else if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
625 				table[i].error_type == MCE_ERROR_TYPE_UE) {
626 			/*
627 			 * We do a maximum of 4 nested MCE calls, see
628 			 * kernel/exception-64s.h
629 			 */
630 			if (get_paca()->in_mce < MAX_MCE_DEPTH)
631 				if (!mce_find_instr_ea_and_pfn(regs, addr,
632 								phys_addr))
633 					handled = 1;
634 		}
635 		found = 1;
636 	}
637 
638 	if (found)
639 		return handled;
640 
641 	mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
642 	mce_err->severity = MCE_SEV_ERROR_SYNC;
643 	mce_err->initiator = MCE_INITIATOR_CPU;
644 
645 	return 0;
646 }
647 
648 static long mce_handle_ue_error(struct pt_regs *regs)
649 {
650 	long handled = 0;
651 
652 	/*
653 	 * On specific SCOM read via MMIO we may get a machine check
654 	 * exception with SRR0 pointing inside opal. If that is the
655 	 * case OPAL may have recovery address to re-read SCOM data in
656 	 * different way and hence we can recover from this MC.
657 	 */
658 
659 	if (ppc_md.mce_check_early_recovery) {
660 		if (ppc_md.mce_check_early_recovery(regs))
661 			handled = 1;
662 	}
663 	return handled;
664 }
665 
666 static long mce_handle_error(struct pt_regs *regs,
667 		const struct mce_derror_table dtable[],
668 		const struct mce_ierror_table itable[])
669 {
670 	struct mce_error_info mce_err = { 0 };
671 	uint64_t addr, phys_addr;
672 	uint64_t srr1 = regs->msr;
673 	long handled;
674 
675 	if (SRR1_MC_LOADSTORE(srr1))
676 		handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
677 				&phys_addr);
678 	else
679 		handled = mce_handle_ierror(regs, itable, &mce_err, &addr);
680 
681 	if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
682 		handled = mce_handle_ue_error(regs);
683 
684 	save_mce_event(regs, handled, &mce_err, regs->nip, addr, phys_addr);
685 
686 	return handled;
687 }
688 
689 long __machine_check_early_realmode_p7(struct pt_regs *regs)
690 {
691 	/* P7 DD1 leaves top bits of DSISR undefined */
692 	regs->dsisr &= 0x0000ffff;
693 
694 	return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
695 }
696 
697 long __machine_check_early_realmode_p8(struct pt_regs *regs)
698 {
699 	return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
700 }
701 
702 long __machine_check_early_realmode_p9(struct pt_regs *regs)
703 {
704 	return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
705 }
706