xref: /linux/arch/powerpc/kernel/mce.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Machine check exception handling.
4  *
5  * Copyright 2013 IBM Corporation
6  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
7  */
8 
9 #undef DEBUG
10 #define pr_fmt(fmt) "mce: " fmt
11 
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 
23 #include <asm/interrupt.h>
24 #include <asm/machdep.h>
25 #include <asm/mce.h>
26 #include <asm/nmi.h>
27 
28 #include "setup.h"
29 
30 static void machine_check_ue_event(struct machine_check_event *evt);
31 static void machine_process_ue_event(struct work_struct *work);
32 
33 static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
34 
35 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
36 
37 int mce_register_notifier(struct notifier_block *nb)
38 {
39 	return blocking_notifier_chain_register(&mce_notifier_list, nb);
40 }
41 EXPORT_SYMBOL_GPL(mce_register_notifier);
42 
43 int mce_unregister_notifier(struct notifier_block *nb)
44 {
45 	return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
46 }
47 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
48 
49 static void mce_set_error_info(struct machine_check_event *mce,
50 			       struct mce_error_info *mce_err)
51 {
52 	mce->error_type = mce_err->error_type;
53 	switch (mce_err->error_type) {
54 	case MCE_ERROR_TYPE_UE:
55 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
56 		break;
57 	case MCE_ERROR_TYPE_SLB:
58 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
59 		break;
60 	case MCE_ERROR_TYPE_ERAT:
61 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
62 		break;
63 	case MCE_ERROR_TYPE_TLB:
64 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
65 		break;
66 	case MCE_ERROR_TYPE_USER:
67 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
68 		break;
69 	case MCE_ERROR_TYPE_RA:
70 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
71 		break;
72 	case MCE_ERROR_TYPE_LINK:
73 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
74 		break;
75 	case MCE_ERROR_TYPE_UNKNOWN:
76 	default:
77 		break;
78 	}
79 }
80 
81 void mce_irq_work_queue(void)
82 {
83 	/* Raise decrementer interrupt */
84 	arch_irq_work_raise();
85 	set_mce_pending_irq_work();
86 }
87 
88 /*
89  * Decode and save high level MCE information into per cpu buffer which
90  * is an array of machine_check_event structure.
91  */
92 void save_mce_event(struct pt_regs *regs, long handled,
93 		    struct mce_error_info *mce_err,
94 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
95 {
96 	int index = local_paca->mce_info->mce_nest_count++;
97 	struct machine_check_event *mce;
98 
99 	mce = &local_paca->mce_info->mce_event[index];
100 	/*
101 	 * Return if we don't have enough space to log mce event.
102 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
103 	 * the check below will stop buffer overrun.
104 	 */
105 	if (index >= MAX_MC_EVT)
106 		return;
107 
108 	/* Populate generic machine check info */
109 	mce->version = MCE_V1;
110 	mce->srr0 = nip;
111 	mce->srr1 = regs->msr;
112 	mce->gpr3 = regs->gpr[3];
113 	mce->in_use = 1;
114 	mce->cpu = get_paca()->paca_index;
115 
116 	/* Mark it recovered if we have handled it and MSR(RI=1). */
117 	if (handled && (regs->msr & MSR_RI))
118 		mce->disposition = MCE_DISPOSITION_RECOVERED;
119 	else
120 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
121 
122 	mce->initiator = mce_err->initiator;
123 	mce->severity = mce_err->severity;
124 	mce->sync_error = mce_err->sync_error;
125 	mce->error_class = mce_err->error_class;
126 
127 	/*
128 	 * Populate the mce error_type and type-specific error_type.
129 	 */
130 	mce_set_error_info(mce, mce_err);
131 	if (mce->error_type == MCE_ERROR_TYPE_UE)
132 		mce->u.ue_error.ignore_event = mce_err->ignore_event;
133 
134 	if (!addr)
135 		return;
136 
137 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
138 		mce->u.tlb_error.effective_address_provided = true;
139 		mce->u.tlb_error.effective_address = addr;
140 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
141 		mce->u.slb_error.effective_address_provided = true;
142 		mce->u.slb_error.effective_address = addr;
143 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
144 		mce->u.erat_error.effective_address_provided = true;
145 		mce->u.erat_error.effective_address = addr;
146 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
147 		mce->u.user_error.effective_address_provided = true;
148 		mce->u.user_error.effective_address = addr;
149 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
150 		mce->u.ra_error.effective_address_provided = true;
151 		mce->u.ra_error.effective_address = addr;
152 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
153 		mce->u.link_error.effective_address_provided = true;
154 		mce->u.link_error.effective_address = addr;
155 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
156 		mce->u.ue_error.effective_address_provided = true;
157 		mce->u.ue_error.effective_address = addr;
158 		if (phys_addr != ULONG_MAX) {
159 			mce->u.ue_error.physical_address_provided = true;
160 			mce->u.ue_error.physical_address = phys_addr;
161 			machine_check_ue_event(mce);
162 		}
163 	}
164 	return;
165 }
166 
167 /*
168  * get_mce_event:
169  *	mce	Pointer to machine_check_event structure to be filled.
170  *	release Flag to indicate whether to free the event slot or not.
171  *		0 <= do not release the mce event. Caller will invoke
172  *		     release_mce_event() once event has been consumed.
173  *		1 <= release the slot.
174  *
175  *	return	1 = success
176  *		0 = failure
177  *
178  * get_mce_event() will be called by platform specific machine check
179  * handle routine and in KVM.
180  * When we call get_mce_event(), we are still in interrupt context and
181  * preemption will not be scheduled until ret_from_expect() routine
182  * is called.
183  */
184 int get_mce_event(struct machine_check_event *mce, bool release)
185 {
186 	int index = local_paca->mce_info->mce_nest_count - 1;
187 	struct machine_check_event *mc_evt;
188 	int ret = 0;
189 
190 	/* Sanity check */
191 	if (index < 0)
192 		return ret;
193 
194 	/* Check if we have MCE info to process. */
195 	if (index < MAX_MC_EVT) {
196 		mc_evt = &local_paca->mce_info->mce_event[index];
197 		/* Copy the event structure and release the original */
198 		if (mce)
199 			*mce = *mc_evt;
200 		if (release)
201 			mc_evt->in_use = 0;
202 		ret = 1;
203 	}
204 	/* Decrement the count to free the slot. */
205 	if (release)
206 		local_paca->mce_info->mce_nest_count--;
207 
208 	return ret;
209 }
210 
211 void release_mce_event(void)
212 {
213 	get_mce_event(NULL, true);
214 }
215 
216 static void machine_check_ue_work(void)
217 {
218 	schedule_work(&mce_ue_event_work);
219 }
220 
221 /*
222  * Queue up the MCE event which then can be handled later.
223  */
224 static void machine_check_ue_event(struct machine_check_event *evt)
225 {
226 	int index;
227 
228 	index = local_paca->mce_info->mce_ue_count++;
229 	/* If queue is full, just return for now. */
230 	if (index >= MAX_MC_EVT) {
231 		local_paca->mce_info->mce_ue_count--;
232 		return;
233 	}
234 	memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
235 	       evt, sizeof(*evt));
236 
237 	/* Queue work to process this event later. */
238 	mce_irq_work_queue();
239 }
240 
241 /*
242  * Queue up the MCE event which then can be handled later.
243  */
244 void machine_check_queue_event(void)
245 {
246 	int index;
247 	struct machine_check_event evt;
248 
249 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
250 		return;
251 
252 	index = local_paca->mce_info->mce_queue_count++;
253 	/* If queue is full, just return for now. */
254 	if (index >= MAX_MC_EVT) {
255 		local_paca->mce_info->mce_queue_count--;
256 		return;
257 	}
258 	memcpy(&local_paca->mce_info->mce_event_queue[index],
259 	       &evt, sizeof(evt));
260 
261 	mce_irq_work_queue();
262 }
263 
264 void mce_common_process_ue(struct pt_regs *regs,
265 			   struct mce_error_info *mce_err)
266 {
267 	const struct exception_table_entry *entry;
268 
269 	entry = search_kernel_exception_table(regs->nip);
270 	if (entry) {
271 		mce_err->ignore_event = true;
272 		regs_set_return_ip(regs, extable_fixup(entry));
273 	}
274 }
275 
276 /*
277  * process pending MCE event from the mce event queue. This function will be
278  * called during syscall exit.
279  */
280 static void machine_process_ue_event(struct work_struct *work)
281 {
282 	int index;
283 	struct machine_check_event *evt;
284 
285 	while (local_paca->mce_info->mce_ue_count > 0) {
286 		index = local_paca->mce_info->mce_ue_count - 1;
287 		evt = &local_paca->mce_info->mce_ue_event_queue[index];
288 		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
289 #ifdef CONFIG_MEMORY_FAILURE
290 		/*
291 		 * This should probably queued elsewhere, but
292 		 * oh! well
293 		 *
294 		 * Don't report this machine check because the caller has a
295 		 * asked us to ignore the event, it has a fixup handler which
296 		 * will do the appropriate error handling and reporting.
297 		 */
298 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
299 			if (evt->u.ue_error.ignore_event) {
300 				local_paca->mce_info->mce_ue_count--;
301 				continue;
302 			}
303 
304 			if (evt->u.ue_error.physical_address_provided) {
305 				unsigned long pfn;
306 
307 				pfn = evt->u.ue_error.physical_address >>
308 					PAGE_SHIFT;
309 				memory_failure(pfn, 0);
310 			} else
311 				pr_warn("Failed to identify bad address from "
312 					"where the uncorrectable error (UE) "
313 					"was generated\n");
314 		}
315 #endif
316 		local_paca->mce_info->mce_ue_count--;
317 	}
318 }
319 /*
320  * process pending MCE event from the mce event queue. This function will be
321  * called during syscall exit.
322  */
323 static void machine_check_process_queued_event(void)
324 {
325 	int index;
326 	struct machine_check_event *evt;
327 
328 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
329 
330 	/*
331 	 * For now just print it to console.
332 	 * TODO: log this error event to FSP or nvram.
333 	 */
334 	while (local_paca->mce_info->mce_queue_count > 0) {
335 		index = local_paca->mce_info->mce_queue_count - 1;
336 		evt = &local_paca->mce_info->mce_event_queue[index];
337 
338 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
339 		    evt->u.ue_error.ignore_event) {
340 			local_paca->mce_info->mce_queue_count--;
341 			continue;
342 		}
343 		machine_check_print_event_info(evt, false, false);
344 		local_paca->mce_info->mce_queue_count--;
345 	}
346 }
347 
348 void set_mce_pending_irq_work(void)
349 {
350 	local_paca->mce_pending_irq_work = 1;
351 }
352 
353 void clear_mce_pending_irq_work(void)
354 {
355 	local_paca->mce_pending_irq_work = 0;
356 }
357 
358 void mce_run_irq_context_handlers(void)
359 {
360 	if (unlikely(local_paca->mce_pending_irq_work)) {
361 		if (ppc_md.machine_check_log_err)
362 			ppc_md.machine_check_log_err();
363 		machine_check_process_queued_event();
364 		machine_check_ue_work();
365 		clear_mce_pending_irq_work();
366 	}
367 }
368 
369 void machine_check_print_event_info(struct machine_check_event *evt,
370 				    bool user_mode, bool in_guest)
371 {
372 	const char *level, *sevstr, *subtype, *err_type, *initiator;
373 	uint64_t ea = 0, pa = 0;
374 	int n = 0;
375 	char dar_str[50];
376 	char pa_str[50];
377 	static const char *mc_ue_types[] = {
378 		"Indeterminate",
379 		"Instruction fetch",
380 		"Page table walk ifetch",
381 		"Load/Store",
382 		"Page table walk Load/Store",
383 	};
384 	static const char *mc_slb_types[] = {
385 		"Indeterminate",
386 		"Parity",
387 		"Multihit",
388 	};
389 	static const char *mc_erat_types[] = {
390 		"Indeterminate",
391 		"Parity",
392 		"Multihit",
393 	};
394 	static const char *mc_tlb_types[] = {
395 		"Indeterminate",
396 		"Parity",
397 		"Multihit",
398 	};
399 	static const char *mc_user_types[] = {
400 		"Indeterminate",
401 		"tlbie(l) invalid",
402 		"scv invalid",
403 	};
404 	static const char *mc_ra_types[] = {
405 		"Indeterminate",
406 		"Instruction fetch (bad)",
407 		"Instruction fetch (foreign/control memory)",
408 		"Page table walk ifetch (bad)",
409 		"Page table walk ifetch (foreign/control memory)",
410 		"Load (bad)",
411 		"Store (bad)",
412 		"Page table walk Load/Store (bad)",
413 		"Page table walk Load/Store (foreign/control memory)",
414 		"Load/Store (foreign/control memory)",
415 	};
416 	static const char *mc_link_types[] = {
417 		"Indeterminate",
418 		"Instruction fetch (timeout)",
419 		"Page table walk ifetch (timeout)",
420 		"Load (timeout)",
421 		"Store (timeout)",
422 		"Page table walk Load/Store (timeout)",
423 	};
424 	static const char *mc_error_class[] = {
425 		"Unknown",
426 		"Hardware error",
427 		"Probable Hardware error (some chance of software cause)",
428 		"Software error",
429 		"Probable Software error (some chance of hardware cause)",
430 	};
431 
432 	/* Print things out */
433 	if (evt->version != MCE_V1) {
434 		pr_err("Machine Check Exception, Unknown event version %d !\n",
435 		       evt->version);
436 		return;
437 	}
438 	switch (evt->severity) {
439 	case MCE_SEV_NO_ERROR:
440 		level = KERN_INFO;
441 		sevstr = "Harmless";
442 		break;
443 	case MCE_SEV_WARNING:
444 		level = KERN_WARNING;
445 		sevstr = "Warning";
446 		break;
447 	case MCE_SEV_SEVERE:
448 		level = KERN_ERR;
449 		sevstr = "Severe";
450 		break;
451 	case MCE_SEV_FATAL:
452 	default:
453 		level = KERN_ERR;
454 		sevstr = "Fatal";
455 		break;
456 	}
457 
458 	switch(evt->initiator) {
459 	case MCE_INITIATOR_CPU:
460 		initiator = "CPU";
461 		break;
462 	case MCE_INITIATOR_PCI:
463 		initiator = "PCI";
464 		break;
465 	case MCE_INITIATOR_ISA:
466 		initiator = "ISA";
467 		break;
468 	case MCE_INITIATOR_MEMORY:
469 		initiator = "Memory";
470 		break;
471 	case MCE_INITIATOR_POWERMGM:
472 		initiator = "Power Management";
473 		break;
474 	case MCE_INITIATOR_UNKNOWN:
475 	default:
476 		initiator = "Unknown";
477 		break;
478 	}
479 
480 	switch (evt->error_type) {
481 	case MCE_ERROR_TYPE_UE:
482 		err_type = "UE";
483 		subtype = evt->u.ue_error.ue_error_type <
484 			ARRAY_SIZE(mc_ue_types) ?
485 			mc_ue_types[evt->u.ue_error.ue_error_type]
486 			: "Unknown";
487 		if (evt->u.ue_error.effective_address_provided)
488 			ea = evt->u.ue_error.effective_address;
489 		if (evt->u.ue_error.physical_address_provided)
490 			pa = evt->u.ue_error.physical_address;
491 		break;
492 	case MCE_ERROR_TYPE_SLB:
493 		err_type = "SLB";
494 		subtype = evt->u.slb_error.slb_error_type <
495 			ARRAY_SIZE(mc_slb_types) ?
496 			mc_slb_types[evt->u.slb_error.slb_error_type]
497 			: "Unknown";
498 		if (evt->u.slb_error.effective_address_provided)
499 			ea = evt->u.slb_error.effective_address;
500 		break;
501 	case MCE_ERROR_TYPE_ERAT:
502 		err_type = "ERAT";
503 		subtype = evt->u.erat_error.erat_error_type <
504 			ARRAY_SIZE(mc_erat_types) ?
505 			mc_erat_types[evt->u.erat_error.erat_error_type]
506 			: "Unknown";
507 		if (evt->u.erat_error.effective_address_provided)
508 			ea = evt->u.erat_error.effective_address;
509 		break;
510 	case MCE_ERROR_TYPE_TLB:
511 		err_type = "TLB";
512 		subtype = evt->u.tlb_error.tlb_error_type <
513 			ARRAY_SIZE(mc_tlb_types) ?
514 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
515 			: "Unknown";
516 		if (evt->u.tlb_error.effective_address_provided)
517 			ea = evt->u.tlb_error.effective_address;
518 		break;
519 	case MCE_ERROR_TYPE_USER:
520 		err_type = "User";
521 		subtype = evt->u.user_error.user_error_type <
522 			ARRAY_SIZE(mc_user_types) ?
523 			mc_user_types[evt->u.user_error.user_error_type]
524 			: "Unknown";
525 		if (evt->u.user_error.effective_address_provided)
526 			ea = evt->u.user_error.effective_address;
527 		break;
528 	case MCE_ERROR_TYPE_RA:
529 		err_type = "Real address";
530 		subtype = evt->u.ra_error.ra_error_type <
531 			ARRAY_SIZE(mc_ra_types) ?
532 			mc_ra_types[evt->u.ra_error.ra_error_type]
533 			: "Unknown";
534 		if (evt->u.ra_error.effective_address_provided)
535 			ea = evt->u.ra_error.effective_address;
536 		break;
537 	case MCE_ERROR_TYPE_LINK:
538 		err_type = "Link";
539 		subtype = evt->u.link_error.link_error_type <
540 			ARRAY_SIZE(mc_link_types) ?
541 			mc_link_types[evt->u.link_error.link_error_type]
542 			: "Unknown";
543 		if (evt->u.link_error.effective_address_provided)
544 			ea = evt->u.link_error.effective_address;
545 		break;
546 	case MCE_ERROR_TYPE_DCACHE:
547 		err_type = "D-Cache";
548 		subtype = "Unknown";
549 		break;
550 	case MCE_ERROR_TYPE_ICACHE:
551 		err_type = "I-Cache";
552 		subtype = "Unknown";
553 		break;
554 	default:
555 	case MCE_ERROR_TYPE_UNKNOWN:
556 		err_type = "Unknown";
557 		subtype = "";
558 		break;
559 	}
560 
561 	dar_str[0] = pa_str[0] = '\0';
562 	if (ea && evt->srr0 != ea) {
563 		/* Load/Store address */
564 		n = sprintf(dar_str, "DAR: %016llx ", ea);
565 		if (pa)
566 			sprintf(dar_str + n, "paddr: %016llx ", pa);
567 	} else if (pa) {
568 		sprintf(pa_str, " paddr: %016llx", pa);
569 	}
570 
571 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
572 		level, evt->cpu, sevstr, in_guest ? "Guest" : "",
573 		err_type, subtype, dar_str,
574 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
575 		"Recovered" : "Not recovered");
576 
577 	if (in_guest || user_mode) {
578 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
579 			level, evt->cpu, current->pid, current->comm,
580 			in_guest ? "Guest " : "", evt->srr0, pa_str);
581 	} else {
582 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
583 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
584 	}
585 
586 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
587 
588 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
589 		mc_error_class[evt->error_class] : "Unknown";
590 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
591 
592 #ifdef CONFIG_PPC_64S_HASH_MMU
593 	/* Display faulty slb contents for SLB errors. */
594 	if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
595 		slb_dump_contents(local_paca->mce_faulty_slbs);
596 #endif
597 }
598 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
599 
600 /*
601  * This function is called in real mode. Strictly no printk's please.
602  *
603  * regs->nip and regs->msr contains srr0 and ssr1.
604  */
605 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
606 {
607 	long handled = 0;
608 
609 	hv_nmi_check_nonrecoverable(regs);
610 
611 	/*
612 	 * See if platform is capable of handling machine check.
613 	 */
614 	if (ppc_md.machine_check_early)
615 		handled = ppc_md.machine_check_early(regs);
616 
617 	return handled;
618 }
619 
620 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
621 static enum {
622 	DTRIG_UNKNOWN,
623 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
624 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
625 } hmer_debug_trig_function;
626 
627 static int init_debug_trig_function(void)
628 {
629 	int pvr;
630 	struct device_node *cpun;
631 	struct property *prop = NULL;
632 	const char *str;
633 
634 	/* First look in the device tree */
635 	preempt_disable();
636 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
637 	if (cpun) {
638 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
639 					    prop, str) {
640 			if (strcmp(str, "bit17-vector-ci-load") == 0)
641 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
642 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
643 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
644 		}
645 		of_node_put(cpun);
646 	}
647 	preempt_enable();
648 
649 	/* If we found the property, don't look at PVR */
650 	if (prop)
651 		goto out;
652 
653 	pvr = mfspr(SPRN_PVR);
654 	/* Check for POWER9 Nimbus (scale-out) */
655 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
656 		/* DD2.2 and later */
657 		if ((pvr & 0xfff) >= 0x202)
658 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
659 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
660 		else if ((pvr & 0xfff) >= 0x200)
661 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
662 	}
663 
664  out:
665 	switch (hmer_debug_trig_function) {
666 	case DTRIG_VECTOR_CI:
667 		pr_debug("HMI debug trigger used for vector CI load\n");
668 		break;
669 	case DTRIG_SUSPEND_ESCAPE:
670 		pr_debug("HMI debug trigger used for TM suspend escape\n");
671 		break;
672 	default:
673 		break;
674 	}
675 	return 0;
676 }
677 __initcall(init_debug_trig_function);
678 
679 /*
680  * Handle HMIs that occur as a result of a debug trigger.
681  * Return values:
682  * -1 means this is not a HMI cause that we know about
683  *  0 means no further handling is required
684  *  1 means further handling is required
685  */
686 long hmi_handle_debugtrig(struct pt_regs *regs)
687 {
688 	unsigned long hmer = mfspr(SPRN_HMER);
689 	long ret = 0;
690 
691 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
692 	if (!((hmer & HMER_DEBUG_TRIG)
693 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
694 		return -1;
695 
696 	hmer &= ~HMER_DEBUG_TRIG;
697 	/* HMER is a write-AND register */
698 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
699 
700 	switch (hmer_debug_trig_function) {
701 	case DTRIG_VECTOR_CI:
702 		/*
703 		 * Now to avoid problems with soft-disable we
704 		 * only do the emulation if we are coming from
705 		 * host user space
706 		 */
707 		if (regs && user_mode(regs))
708 			ret = local_paca->hmi_p9_special_emu = 1;
709 
710 		break;
711 
712 	default:
713 		break;
714 	}
715 
716 	/*
717 	 * See if any other HMI causes remain to be handled
718 	 */
719 	if (hmer & mfspr(SPRN_HMEER))
720 		return -1;
721 
722 	return ret;
723 }
724 
725 /*
726  * Return values:
727  */
728 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
729 {
730 	int ret;
731 
732 	local_paca->hmi_irqs++;
733 
734 	ret = hmi_handle_debugtrig(regs);
735 	if (ret >= 0)
736 		return ret;
737 
738 	wait_for_subcore_guest_exit();
739 
740 	if (ppc_md.hmi_exception_early)
741 		ppc_md.hmi_exception_early(regs);
742 
743 	wait_for_tb_resync();
744 
745 	return 1;
746 }
747 
748 void __init mce_init(void)
749 {
750 	struct mce_info *mce_info;
751 	u64 limit;
752 	int i;
753 
754 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
755 	for_each_possible_cpu(i) {
756 		mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
757 						  __alignof__(*mce_info),
758 						  MEMBLOCK_LOW_LIMIT,
759 						  limit, cpu_to_node(i));
760 		if (!mce_info)
761 			goto err;
762 		paca_ptrs[i]->mce_info = mce_info;
763 	}
764 	return;
765 err:
766 	panic("Failed to allocate memory for MCE event data\n");
767 }
768