xref: /titanic_44/usr/src/uts/sun4v/os/error.c (revision 160abee025ef30c34521b981edd40ffcaab560aa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/machsystm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/cpuvar.h>
32 #include <sys/async.h>
33 #include <sys/ontrap.h>
34 #include <sys/ddifm.h>
35 #include <sys/hypervisor_api.h>
36 #include <sys/errorq.h>
37 #include <sys/promif.h>
38 #include <sys/prom_plat.h>
39 #include <sys/x_call.h>
40 #include <sys/error.h>
41 #include <sys/fm/util.h>
42 #include <sys/ivintr.h>
43 #include <sys/archsystm.h>
44 
45 #define	MAX_CE_FLTS		10
46 #define	MAX_ASYNC_FLTS		6
47 
48 errorq_t *ue_queue;			/* queue of uncorrectable errors */
49 errorq_t *ce_queue;			/* queue of correctable errors */
50 
51 /*
52  * Being used by memory test driver.
53  * ce_verbose_memory - covers CEs in DIMMs
54  * ce_verbose_other - covers "others" (ecache, IO, etc.)
55  *
56  * If the value is 0, nothing is logged.
57  * If the value is 1, the error is logged to the log file, but not console.
58  * If the value is 2, the error is logged to the log file and console.
59  */
60 int	ce_verbose_memory = 1;
61 int	ce_verbose_other = 1;
62 
63 int	ce_show_data = 0;
64 int	ce_debug = 0;
65 int	ue_debug = 0;
66 int	reset_debug = 0;
67 
68 /*
69  * Tunables for controlling the handling of asynchronous faults (AFTs). Setting
70  * these to non-default values on a non-DEBUG kernel is NOT supported.
71  */
72 int	aft_verbose = 0;	/* log AFT messages > 1 to log only */
73 int	aft_panic = 0;		/* panic (not reboot) on fatal usermode AFLT */
74 int	aft_testfatal = 0;	/* force all AFTs to panic immediately */
75 
76 /*
77  * Used for vbsc hostshutdown (power-off button)
78  */
79 int	err_shutdown_triggered = 0;	/* only once */
80 uint64_t err_shutdown_inum = 0;	/* used to pull the trigger */
81 
82 /*
83  * Used to print NRE/RE via system variable or kmdb
84  */
85 int		printerrh = 0;		/* see /etc/system */
86 static void	errh_er_print(errh_er_t *, const char *);
87 kmutex_t	errh_print_lock;
88 
89 /*
90  * Defined in bus_func.c but initialised in error_init
91  */
92 extern kmutex_t bfd_lock;
93 
94 static uint32_t rq_overflow_count = 0;		/* counter for rq overflow */
95 
96 static void cpu_queue_one_event(errh_async_flt_t *);
97 static uint32_t count_entries_on_queue(uint64_t, uint64_t, uint32_t);
98 static void errh_page_retire(errh_async_flt_t *, uchar_t);
99 static int errh_error_protected(struct regs *, struct async_flt *, int *);
100 static void errh_rq_full(struct async_flt *);
101 static void ue_drain(void *, struct async_flt *, errorq_elem_t *);
102 static void ce_drain(void *, struct async_flt *, errorq_elem_t *);
103 static void errh_handle_attr(errh_async_flt_t *);
104 static void errh_handle_asr(errh_async_flt_t *);
105 
106 /*ARGSUSED*/
107 void
108 process_resumable_error(struct regs *rp, uint32_t head_offset,
109     uint32_t tail_offset)
110 {
111 	struct machcpu *mcpup;
112 	struct async_flt *aflt;
113 	errh_async_flt_t errh_flt;
114 	errh_er_t *head_va;
115 
116 	mcpup = &(CPU->cpu_m);
117 
118 	while (head_offset != tail_offset) {
119 		/* kernel buffer starts right after the resumable queue */
120 		head_va = (errh_er_t *)(mcpup->cpu_rq_va + head_offset +
121 		    CPU_RQ_SIZE);
122 		/* Copy the error report to local buffer */
123 		bzero(&errh_flt, sizeof (errh_async_flt_t));
124 		bcopy((char *)head_va, &(errh_flt.errh_er),
125 		    sizeof (errh_er_t));
126 
127 		mcpup->cpu_rq_lastre = head_va;
128 		if (printerrh)
129 			errh_er_print(&errh_flt.errh_er, "RQ");
130 
131 		/* Increment the queue head */
132 		head_offset += Q_ENTRY_SIZE;
133 		/* Wrap around */
134 		head_offset &= (CPU_RQ_SIZE - 1);
135 
136 		/* set error handle to zero so it can hold new error report */
137 		head_va->ehdl = 0;
138 
139 		switch (errh_flt.errh_er.desc) {
140 		case ERRH_DESC_UCOR_RE:
141 			/*
142 			 * Check error attribute, handle individual error
143 			 * if it is needed.
144 			 */
145 			errh_handle_attr(&errh_flt);
146 			break;
147 
148 		case ERRH_DESC_WARN_RE:
149 			/*
150 			 * Power-off requested, but handle it one time only.
151 			 */
152 			if (!err_shutdown_triggered) {
153 				setsoftint(err_shutdown_inum);
154 				++err_shutdown_triggered;
155 			}
156 			continue;
157 
158 		default:
159 			cmn_err(CE_WARN, "Error Descriptor 0x%llx "
160 			    " invalid in resumable error handler",
161 			    (long long) errh_flt.errh_er.desc);
162 			continue;
163 		}
164 
165 		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
166 		aflt->flt_id = gethrtime();
167 		aflt->flt_bus_id = getprocessorid();
168 		aflt->flt_class = CPU_FAULT;
169 		aflt->flt_prot = AFLT_PROT_NONE;
170 		aflt->flt_priv = (((errh_flt.errh_er.attr & ERRH_MODE_MASK)
171 		    >> ERRH_MODE_SHIFT) == ERRH_MODE_PRIV);
172 
173 		if (errh_flt.errh_er.attr & ERRH_ATTR_CPU)
174 			/* If it is an error on other cpu */
175 			aflt->flt_panic = 1;
176 		else
177 			aflt->flt_panic = 0;
178 
179 		/*
180 		 * Handle resumable queue full case.
181 		 */
182 		if (errh_flt.errh_er.attr & ERRH_ATTR_RQF) {
183 			(void) errh_rq_full(aflt);
184 		}
185 
186 		/*
187 		 * Queue the error on ce or ue queue depend on flt_panic.
188 		 * Even if flt_panic is set, the code still keep processing
189 		 * the rest element on rq until the panic starts.
190 		 */
191 		(void) cpu_queue_one_event(&errh_flt);
192 
193 		/*
194 		 * Panic here if aflt->flt_panic has been set.
195 		 * Enqueued errors will be logged as part of the panic flow.
196 		 */
197 		if (aflt->flt_panic) {
198 			fm_panic("Unrecoverable error on another CPU");
199 		}
200 	}
201 }
202 
203 void
204 process_nonresumable_error(struct regs *rp, uint64_t flags,
205     uint32_t head_offset, uint32_t tail_offset)
206 {
207 	struct machcpu *mcpup;
208 	struct async_flt *aflt;
209 	errh_async_flt_t errh_flt;
210 	errh_er_t *head_va;
211 	int trampolined = 0;
212 	int expected = DDI_FM_ERR_UNEXPECTED;
213 	uint64_t exec_mode;
214 	uint8_t u_spill_fill;
215 
216 	mcpup = &(CPU->cpu_m);
217 
218 	while (head_offset != tail_offset) {
219 		/* kernel buffer starts right after the nonresumable queue */
220 		head_va = (errh_er_t *)(mcpup->cpu_nrq_va + head_offset +
221 		    CPU_NRQ_SIZE);
222 
223 		/* Copy the error report to local buffer */
224 		bzero(&errh_flt, sizeof (errh_async_flt_t));
225 
226 		bcopy((char *)head_va, &(errh_flt.errh_er),
227 		    sizeof (errh_er_t));
228 
229 		mcpup->cpu_nrq_lastnre = head_va;
230 		if (printerrh)
231 			errh_er_print(&errh_flt.errh_er, "NRQ");
232 
233 		/* Increment the queue head */
234 		head_offset += Q_ENTRY_SIZE;
235 		/* Wrap around */
236 		head_offset &= (CPU_NRQ_SIZE - 1);
237 
238 		/* set error handle to zero so it can hold new error report */
239 		head_va->ehdl = 0;
240 
241 		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
242 
243 		trampolined = 0;
244 
245 		if (errh_flt.errh_er.attr & ERRH_ATTR_PIO)
246 			aflt->flt_class = BUS_FAULT;
247 		else
248 			aflt->flt_class = CPU_FAULT;
249 
250 		aflt->flt_id = gethrtime();
251 		aflt->flt_bus_id = getprocessorid();
252 		aflt->flt_pc = (caddr_t)rp->r_pc;
253 		exec_mode = (errh_flt.errh_er.attr & ERRH_MODE_MASK)
254 		    >> ERRH_MODE_SHIFT;
255 		aflt->flt_priv = (exec_mode == ERRH_MODE_PRIV ||
256 		    exec_mode == ERRH_MODE_UNKNOWN);
257 		aflt->flt_prot = AFLT_PROT_NONE;
258 		aflt->flt_tl = (uchar_t)(flags & ERRH_TL_MASK);
259 		aflt->flt_panic = ((aflt->flt_tl != 0) ||
260 		    (aft_testfatal != 0));
261 
262 		/*
263 		 * For the first error packet on the queue, check if it
264 		 * happened in user fill/spill trap.
265 		 */
266 		if (flags & ERRH_U_SPILL_FILL) {
267 			u_spill_fill = 1;
268 			/* clear the user fill/spill flag in flags */
269 			flags = (uint64_t)aflt->flt_tl;
270 		} else
271 			u_spill_fill = 0;
272 
273 		switch (errh_flt.errh_er.desc) {
274 		case ERRH_DESC_PR_NRE:
275 			if (u_spill_fill) {
276 				aflt->flt_panic = 0;
277 				break;
278 			}
279 			/*
280 			 * Fall through, precise fault also need to check
281 			 * to see if it was protected.
282 			 */
283 			/*FALLTHRU*/
284 
285 		case ERRH_DESC_DEF_NRE:
286 			/*
287 			 * If the trap occurred in privileged mode at TL=0,
288 			 * we need to check to see if we were executing
289 			 * in kernel under on_trap() or t_lofault
290 			 * protection. If so, and if it was a PIO or MEM
291 			 * error, then modify the saved registers so that
292 			 * we return from the trap to the appropriate
293 			 * trampoline routine.
294 			 */
295 			if (aflt->flt_priv == 1 && aflt->flt_tl == 0 &&
296 			    ((errh_flt.errh_er.attr & ERRH_ATTR_PIO) ||
297 			    (errh_flt.errh_er.attr & ERRH_ATTR_MEM))) {
298 				trampolined =
299 				    errh_error_protected(rp, aflt, &expected);
300 			}
301 
302 			if (!aflt->flt_priv || aflt->flt_prot ==
303 			    AFLT_PROT_COPY) {
304 				aflt->flt_panic |= aft_panic;
305 			} else if (!trampolined &&
306 			    (aflt->flt_class != BUS_FAULT)) {
307 				aflt->flt_panic = 1;
308 			}
309 
310 			/*
311 			 * Check error attribute, handle individual error
312 			 * if it is needed.
313 			 */
314 			errh_handle_attr(&errh_flt);
315 
316 			/*
317 			 * If PIO error, we need to query the bus nexus
318 			 * for fatal errors.
319 			 */
320 			if (aflt->flt_class == BUS_FAULT) {
321 				aflt->flt_addr = errh_flt.errh_er.ra;
322 				errh_cpu_run_bus_error_handlers(aflt,
323 				    expected);
324 			}
325 
326 			break;
327 
328 		case ERRH_DESC_USER_DCORE:
329 			/*
330 			 * User generated panic. Call panic directly
331 			 * since there are no FMA e-reports to
332 			 * display.
333 			 */
334 
335 			panic("Panic - Generated at user request");
336 
337 			break;
338 
339 		default:
340 			cmn_err(CE_WARN, "Panic - Error Descriptor 0x%llx "
341 			    " invalid in non-resumable error handler",
342 			    (long long) errh_flt.errh_er.desc);
343 			aflt->flt_panic = 1;
344 			break;
345 		}
346 
347 		/*
348 		 * Queue the error report for further processing. If
349 		 * flt_panic is set, code still process other errors
350 		 * in the queue until the panic routine stops the
351 		 * kernel.
352 		 */
353 		(void) cpu_queue_one_event(&errh_flt);
354 
355 		/*
356 		 * Panic here if aflt->flt_panic has been set.
357 		 * Enqueued errors will be logged as part of the panic flow.
358 		 */
359 		if (aflt->flt_panic) {
360 			fm_panic("Unrecoverable hardware error");
361 		}
362 
363 		/*
364 		 * Call page_retire() to handle memory errors.
365 		 */
366 		if (errh_flt.errh_er.attr & ERRH_ATTR_MEM)
367 			errh_page_retire(&errh_flt, PR_UE);
368 
369 		/*
370 		 * If we queued an error and the it was in user mode, or
371 		 * protected by t_lofault, or user_spill_fill is set, we
372 		 * set AST flag so the queue will be drained before
373 		 * returning to user mode.
374 		 */
375 		if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY ||
376 		    u_spill_fill) {
377 			int pcb_flag = 0;
378 
379 			if (aflt->flt_class == CPU_FAULT)
380 				pcb_flag |= ASYNC_HWERR;
381 			else if (aflt->flt_class == BUS_FAULT)
382 				pcb_flag |= ASYNC_BERR;
383 
384 			ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
385 			aston(curthread);
386 		}
387 	}
388 }
389 
390 /*
391  * For PIO errors, this routine calls nexus driver's error
392  * callback routines. If the callback routine returns fatal, and
393  * we are in kernel or unknow mode without any error protection,
394  * we need to turn on the panic flag.
395  */
396 void
397 errh_cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
398 {
399 	int status;
400 	ddi_fm_error_t de;
401 
402 	bzero(&de, sizeof (ddi_fm_error_t));
403 
404 	de.fme_version = DDI_FME_VERSION;
405 	de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
406 	de.fme_flag = expected;
407 	de.fme_bus_specific = (void *)aflt->flt_addr;
408 	status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
409 
410 	/*
411 	 * If error is protected, it will jump to proper routine
412 	 * to handle the handle; if it is in user level, we just
413 	 * kill the user process; if the driver thinks the error is
414 	 * not fatal, we can drive on. If none of above are true,
415 	 * we panic
416 	 */
417 	if ((aflt->flt_prot == AFLT_PROT_NONE) && (aflt->flt_priv == 1) &&
418 	    (status == DDI_FM_FATAL))
419 		aflt->flt_panic = 1;
420 }
421 
422 /*
423  * This routine checks to see if we are under any error protection when
424  * the error happens. If we are under error protection, we unwind to
425  * the protection and indicate fault.
426  */
427 static int
428 errh_error_protected(struct regs *rp, struct async_flt *aflt, int *expected)
429 {
430 	int trampolined = 0;
431 	ddi_acc_hdl_t *hp;
432 
433 	if (curthread->t_ontrap != NULL) {
434 		on_trap_data_t *otp = curthread->t_ontrap;
435 
436 		if (otp->ot_prot & OT_DATA_EC) {
437 			aflt->flt_prot = AFLT_PROT_EC;
438 			otp->ot_trap |= OT_DATA_EC;
439 			rp->r_pc = otp->ot_trampoline;
440 			rp->r_npc = rp->r_pc +4;
441 			trampolined = 1;
442 		}
443 
444 		if (otp->ot_prot & OT_DATA_ACCESS) {
445 			aflt->flt_prot = AFLT_PROT_ACCESS;
446 			otp->ot_trap |= OT_DATA_ACCESS;
447 			rp->r_pc = otp->ot_trampoline;
448 			rp->r_npc = rp->r_pc + 4;
449 			trampolined = 1;
450 			/*
451 			 * for peek and caut_gets
452 			 * errors are expected
453 			 */
454 			hp = (ddi_acc_hdl_t *)otp->ot_handle;
455 			if (!hp)
456 				*expected = DDI_FM_ERR_PEEK;
457 			else if (hp->ah_acc.devacc_attr_access ==
458 			    DDI_CAUTIOUS_ACC)
459 				*expected = DDI_FM_ERR_EXPECTED;
460 		}
461 	} else if (curthread->t_lofault) {
462 		aflt->flt_prot = AFLT_PROT_COPY;
463 		rp->r_g1 = EFAULT;
464 		rp->r_pc = curthread->t_lofault;
465 		rp->r_npc = rp->r_pc + 4;
466 		trampolined = 1;
467 	}
468 
469 	return (trampolined);
470 }
471 
472 /*
473  * Queue one event.
474  */
475 static void
476 cpu_queue_one_event(errh_async_flt_t *errh_fltp)
477 {
478 	struct async_flt *aflt = (struct async_flt *)errh_fltp;
479 	errorq_t *eqp;
480 
481 	if (aflt->flt_panic)
482 		eqp = ue_queue;
483 	else
484 		eqp = ce_queue;
485 
486 	errorq_dispatch(eqp, errh_fltp, sizeof (errh_async_flt_t),
487 	    aflt->flt_panic);
488 }
489 
490 /*
491  * The cpu_async_log_err() function is called by the ce/ue_drain() function to
492  * handle logging for CPU events that are dequeued.  As such, it can be invoked
493  * from softint context, from AST processing in the trap() flow, or from the
494  * panic flow.  We decode the CPU-specific data, and log appropriate messages.
495  */
496 void
497 cpu_async_log_err(void *flt)
498 {
499 	errh_async_flt_t *errh_fltp = (errh_async_flt_t *)flt;
500 	errh_er_t *errh_erp = (errh_er_t *)&errh_fltp->errh_er;
501 
502 	switch (errh_erp->desc) {
503 	case ERRH_DESC_UCOR_RE:
504 		if (errh_erp->attr & ERRH_ATTR_MEM) {
505 			/*
506 			 * Turn on the PR_UE flag. The page will be
507 			 * scrubbed when it is freed.
508 			 */
509 			errh_page_retire(errh_fltp, PR_UE);
510 		}
511 
512 		break;
513 
514 	case ERRH_DESC_PR_NRE:
515 	case ERRH_DESC_DEF_NRE:
516 		if (errh_erp->attr & ERRH_ATTR_MEM) {
517 			/*
518 			 * For non-resumable memory error, retire
519 			 * the page here.
520 			 */
521 			errh_page_retire(errh_fltp, PR_UE);
522 
523 			/*
524 			 * If we are going to panic, scrub the page first
525 			 */
526 			if (errh_fltp->cmn_asyncflt.flt_panic)
527 				mem_scrub(errh_fltp->errh_er.ra,
528 				    errh_fltp->errh_er.sz);
529 		}
530 		break;
531 
532 	default:
533 		break;
534 	}
535 }
536 
537 /*
538  * Called from ce_drain().
539  */
540 void
541 cpu_ce_log_err(struct async_flt *aflt)
542 {
543 	switch (aflt->flt_class) {
544 	case CPU_FAULT:
545 		cpu_async_log_err(aflt);
546 		break;
547 
548 	case BUS_FAULT:
549 		cpu_async_log_err(aflt);
550 		break;
551 
552 	default:
553 		break;
554 	}
555 }
556 
557 /*
558  * Called from ue_drain().
559  */
560 void
561 cpu_ue_log_err(struct async_flt *aflt)
562 {
563 	switch (aflt->flt_class) {
564 	case CPU_FAULT:
565 		cpu_async_log_err(aflt);
566 		break;
567 
568 	case BUS_FAULT:
569 		cpu_async_log_err(aflt);
570 		break;
571 
572 	default:
573 		break;
574 	}
575 }
576 
577 /*
578  * Turn on flag on the error memory region.
579  */
580 static void
581 errh_page_retire(errh_async_flt_t *errh_fltp, uchar_t flag)
582 {
583 	uint64_t flt_real_addr_start = errh_fltp->errh_er.ra;
584 	uint64_t flt_real_addr_end = flt_real_addr_start +
585 	    errh_fltp->errh_er.sz - 1;
586 	int64_t current_addr;
587 
588 	if (errh_fltp->errh_er.sz == 0)
589 		return;
590 
591 	for (current_addr = flt_real_addr_start;
592 	    current_addr < flt_real_addr_end; current_addr += MMU_PAGESIZE) {
593 		(void) page_retire(current_addr, flag);
594 	}
595 }
596 
597 void
598 mem_scrub(uint64_t paddr, uint64_t len)
599 {
600 	uint64_t pa, length, scrubbed_len;
601 
602 	pa = paddr;
603 	length = len;
604 	scrubbed_len = 0;
605 
606 	while (length > 0) {
607 		if (hv_mem_scrub(pa, length, &scrubbed_len) != H_EOK)
608 			break;
609 
610 		pa += scrubbed_len;
611 		length -= scrubbed_len;
612 	}
613 }
614 
615 /*
616  * Call hypervisor to flush the memory region.
617  * Both va and len must be MMU_PAGESIZE aligned.
618  * Returns the total number of bytes flushed.
619  */
620 uint64_t
621 mem_sync(caddr_t orig_va, size_t orig_len)
622 {
623 	uint64_t pa, length, flushed;
624 	uint64_t chunk_len = MMU_PAGESIZE;
625 	uint64_t total_flushed = 0;
626 	uint64_t va, len;
627 
628 	if (orig_len == 0)
629 		return (total_flushed);
630 
631 	/* align va */
632 	va = P2ALIGN_TYPED(orig_va, MMU_PAGESIZE, uint64_t);
633 	/* round up len to MMU_PAGESIZE aligned */
634 	len = P2ROUNDUP_TYPED(orig_va + orig_len, MMU_PAGESIZE, uint64_t) - va;
635 
636 	while (len > 0) {
637 		pa = va_to_pa((caddr_t)va);
638 		if (pa == (uint64_t)-1)
639 			return (total_flushed);
640 
641 		length = chunk_len;
642 		flushed = 0;
643 
644 		while (length > 0) {
645 			if (hv_mem_sync(pa, length, &flushed) != H_EOK)
646 				return (total_flushed);
647 
648 			pa += flushed;
649 			length -= flushed;
650 			total_flushed += flushed;
651 		}
652 
653 		va += chunk_len;
654 		len -= chunk_len;
655 	}
656 
657 	return (total_flushed);
658 }
659 
660 /*
661  * If resumable queue is full, we need to check if any cpu is in
662  * error state. If not, we drive on. If yes, we need to panic. The
663  * hypervisor call hv_cpu_state() is being used for checking the
664  * cpu state.  And reset %tick_compr in case tick-compare was lost.
665  */
666 static void
667 errh_rq_full(struct async_flt *afltp)
668 {
669 	processorid_t who;
670 	uint64_t cpu_state;
671 	uint64_t retval;
672 	uint64_t current_tick;
673 
674 	current_tick = (uint64_t)gettick();
675 	tickcmpr_set(current_tick);
676 
677 	for (who = 0; who < NCPU; who++)
678 		if (CPU_IN_SET(cpu_ready_set, who)) {
679 			retval = hv_cpu_state(who, &cpu_state);
680 			if (retval != H_EOK || cpu_state == CPU_STATE_ERROR) {
681 				afltp->flt_panic = 1;
682 				break;
683 			}
684 		}
685 }
686 
687 /*
688  * Return processor specific async error structure
689  * size used.
690  */
691 int
692 cpu_aflt_size(void)
693 {
694 	return (sizeof (errh_async_flt_t));
695 }
696 
697 #define	SZ_TO_ETRS_SHIFT	6
698 
699 /*
700  * Message print out when resumable queue is overflown
701  */
702 /*ARGSUSED*/
703 void
704 rq_overflow(struct regs *rp, uint64_t head_offset,
705     uint64_t tail_offset)
706 {
707 	rq_overflow_count++;
708 }
709 
710 /*
711  * Handler to process a fatal error.  This routine can be called from a
712  * softint, called from trap()'s AST handling, or called from the panic flow.
713  */
714 /*ARGSUSED*/
715 static void
716 ue_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
717 {
718 	cpu_ue_log_err(aflt);
719 }
720 
721 /*
722  * Handler to process a correctable error.  This routine can be called from a
723  * softint.  We just call the CPU module's logging routine.
724  */
725 /*ARGSUSED*/
726 static void
727 ce_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
728 {
729 	cpu_ce_log_err(aflt);
730 }
731 
732 /*
733  * Handler to process vbsc hostshutdown (power-off button).
734  */
735 static int
736 err_shutdown_softintr()
737 {
738 	cmn_err(CE_WARN, "Power-off requested, system will now shutdown.");
739 	do_shutdown();
740 
741 	/*
742 	 * just in case do_shutdown() fails
743 	 */
744 	(void) timeout((void(*)(void *))power_down, NULL, 100 * hz);
745 	return (DDI_INTR_CLAIMED);
746 }
747 
748 /*
749  * Allocate error queue sizes based on max_ncpus.  max_ncpus is set just
750  * after ncpunode has been determined.  ncpus is set in start_other_cpus
751  * which is called after error_init() but may change dynamically.
752  */
753 void
754 error_init(void)
755 {
756 	char tmp_name[MAXSYSNAME];
757 	pnode_t node;
758 	size_t size = cpu_aflt_size();
759 
760 	/*
761 	 * Initialize the correctable and uncorrectable error queues.
762 	 */
763 	ue_queue = errorq_create("ue_queue", (errorq_func_t)ue_drain, NULL,
764 	    MAX_ASYNC_FLTS * (max_ncpus + 1), size, PIL_2, ERRORQ_VITAL);
765 
766 	ce_queue = errorq_create("ce_queue", (errorq_func_t)ce_drain, NULL,
767 	    MAX_CE_FLTS * (max_ncpus + 1), size, PIL_1, 0);
768 
769 	if (ue_queue == NULL || ce_queue == NULL)
770 		panic("failed to create required system error queue");
771 
772 	/*
773 	 * Setup interrupt handler for power-off button.
774 	 */
775 	err_shutdown_inum = add_softintr(PIL_9,
776 	    (softintrfunc)err_shutdown_softintr, NULL, SOFTINT_ST);
777 
778 	/*
779 	 * Initialize the busfunc list mutex.  This must be a PIL_15 spin lock
780 	 * because we will need to acquire it from cpu_async_error().
781 	 */
782 	mutex_init(&bfd_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
783 
784 	/* Only allow one cpu at a time to dump errh errors. */
785 	mutex_init(&errh_print_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
786 
787 	node = prom_rootnode();
788 	if ((node == OBP_NONODE) || (node == OBP_BADNODE)) {
789 		cmn_err(CE_CONT, "error_init: node 0x%x\n", (uint_t)node);
790 		return;
791 	}
792 
793 	if (((size = prom_getproplen(node, "reset-reason")) != -1) &&
794 	    (size <= MAXSYSNAME) &&
795 	    (prom_getprop(node, "reset-reason", tmp_name) != -1)) {
796 		if (reset_debug) {
797 			cmn_err(CE_CONT, "System booting after %s\n", tmp_name);
798 		} else if (strncmp(tmp_name, "FATAL", 5) == 0) {
799 			cmn_err(CE_CONT,
800 			    "System booting after fatal error %s\n", tmp_name);
801 		}
802 	}
803 }
804 
805 /*
806  * Nonresumable queue is full, panic here
807  */
808 /*ARGSUSED*/
809 void
810 nrq_overflow(struct regs *rp)
811 {
812 	fm_panic("Nonresumable queue full");
813 }
814 
815 /*
816  * This is the place for special error handling for individual errors.
817  */
818 static void
819 errh_handle_attr(errh_async_flt_t *errh_fltp)
820 {
821 	switch (errh_fltp->errh_er.attr & ~ERRH_MODE_MASK) {
822 	case ERRH_ATTR_CPU:
823 	case ERRH_ATTR_MEM:
824 	case ERRH_ATTR_PIO:
825 	case ERRH_ATTR_IRF:
826 	case ERRH_ATTR_FRF:
827 	case ERRH_ATTR_SHUT:
828 		break;
829 
830 	case ERRH_ATTR_ASR:
831 		errh_handle_asr(errh_fltp);
832 		break;
833 
834 	case ERRH_ATTR_ASI:
835 	case ERRH_ATTR_PREG:
836 	case ERRH_ATTR_RQF:
837 		break;
838 
839 	default:
840 		break;
841 	}
842 }
843 
844 /*
845  * Handle ASR bit set in ATTR
846  */
847 static void
848 errh_handle_asr(errh_async_flt_t *errh_fltp)
849 {
850 	uint64_t current_tick;
851 
852 	switch (errh_fltp->errh_er.reg) {
853 	case ASR_REG_VALID | ASR_REG_TICK:
854 		/*
855 		 * For Tick Compare Register error, it only happens when
856 		 * the register is being read or compared with the %tick
857 		 * register. Since we lost the contents of the register,
858 		 * we set the %tick_compr in the future. An interrupt will
859 		 * happen when %tick matches the value field of %tick_compr.
860 		 */
861 		current_tick = (uint64_t)gettick();
862 		tickcmpr_set(current_tick);
863 		/* Do not panic */
864 		errh_fltp->cmn_asyncflt.flt_panic = 0;
865 		break;
866 
867 	default:
868 		break;
869 	}
870 }
871 
872 /*
873  * Dump the error packet
874  */
875 /*ARGSUSED*/
876 static void
877 errh_er_print(errh_er_t *errh_erp, const char *queue)
878 {
879 	typedef union {
880 		uint64_t w;
881 		uint16_t s[4];
882 	} errhp_t;
883 	errhp_t *p = (errhp_t *)errh_erp;
884 	int i;
885 
886 	mutex_enter(&errh_print_lock);
887 	switch (errh_erp->desc) {
888 	case ERRH_DESC_UCOR_RE:
889 		cmn_err(CE_CONT, "\nResumable Uncorrectable Error ");
890 		break;
891 	case ERRH_DESC_PR_NRE:
892 		cmn_err(CE_CONT, "\nNonresumable Precise Error ");
893 		break;
894 	case ERRH_DESC_DEF_NRE:
895 		cmn_err(CE_CONT, "\nNonresumable Deferred Error ");
896 		break;
897 	default:
898 		cmn_err(CE_CONT, "\nError packet ");
899 		break;
900 	}
901 	cmn_err(CE_CONT, "received on %s\n", queue);
902 
903 	/*
904 	 * Print Q_ENTRY_SIZE bytes of epacket with 8 bytes per line
905 	 */
906 	for (i = Q_ENTRY_SIZE; i > 0; i -= 8, ++p) {
907 		cmn_err(CE_CONT, "%016lx: %04x %04x %04x %04x\n", (uint64_t)p,
908 		    p->s[0], p->s[1], p->s[2], p->s[3]);
909 	}
910 	mutex_exit(&errh_print_lock);
911 }
912