xref: /titanic_51/usr/src/uts/sun4v/os/error.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/machsystm.h>
28 #include <sys/sysmacros.h>
29 #include <sys/cpuvar.h>
30 #include <sys/async.h>
31 #include <sys/ontrap.h>
32 #include <sys/ddifm.h>
33 #include <sys/hypervisor_api.h>
34 #include <sys/errorq.h>
35 #include <sys/promif.h>
36 #include <sys/prom_plat.h>
37 #include <sys/x_call.h>
38 #include <sys/error.h>
39 #include <sys/fm/util.h>
40 #include <sys/ivintr.h>
41 #include <sys/archsystm.h>
42 
43 #define	MAX_CE_FLTS		10
44 #define	MAX_ASYNC_FLTS		6
45 
46 errorq_t *ue_queue;			/* queue of uncorrectable errors */
47 errorq_t *ce_queue;			/* queue of correctable errors */
48 
49 /*
50  * Being used by memory test driver.
51  * ce_verbose_memory - covers CEs in DIMMs
52  * ce_verbose_other - covers "others" (ecache, IO, etc.)
53  *
54  * If the value is 0, nothing is logged.
55  * If the value is 1, the error is logged to the log file, but not console.
56  * If the value is 2, the error is logged to the log file and console.
57  */
58 int	ce_verbose_memory = 1;
59 int	ce_verbose_other = 1;
60 
61 int	ce_show_data = 0;
62 int	ce_debug = 0;
63 int	ue_debug = 0;
64 int	reset_debug = 0;
65 
66 /*
67  * Tunables for controlling the handling of asynchronous faults (AFTs). Setting
68  * these to non-default values on a non-DEBUG kernel is NOT supported.
69  */
70 int	aft_verbose = 0;	/* log AFT messages > 1 to log only */
71 int	aft_panic = 0;		/* panic (not reboot) on fatal usermode AFLT */
72 int	aft_testfatal = 0;	/* force all AFTs to panic immediately */
73 
74 /*
75  * Used for vbsc hostshutdown (power-off button)
76  */
77 int	err_shutdown_triggered = 0;	/* only once */
78 uint64_t err_shutdown_inum = 0;	/* used to pull the trigger */
79 
80 /*
81  * Used to print NRE/RE via system variable or kmdb
82  */
83 int		printerrh = 0;		/* see /etc/system */
84 static void	errh_er_print(errh_er_t *, const char *);
85 kmutex_t	errh_print_lock;
86 
87 /*
88  * Defined in bus_func.c but initialised in error_init
89  */
90 extern kmutex_t bfd_lock;
91 
92 static uint32_t rq_overflow_count = 0;		/* counter for rq overflow */
93 
94 static void cpu_queue_one_event(errh_async_flt_t *);
95 static uint32_t count_entries_on_queue(uint64_t, uint64_t, uint32_t);
96 static void errh_page_retire(errh_async_flt_t *, uchar_t);
97 static int errh_error_protected(struct regs *, struct async_flt *, int *);
98 static void errh_rq_full(struct async_flt *);
99 static void ue_drain(void *, struct async_flt *, errorq_elem_t *);
100 static void ce_drain(void *, struct async_flt *, errorq_elem_t *);
101 static void errh_handle_attr(errh_async_flt_t *);
102 static void errh_handle_asr(errh_async_flt_t *);
103 
104 /*ARGSUSED*/
105 void
106 process_resumable_error(struct regs *rp, uint32_t head_offset,
107     uint32_t tail_offset)
108 {
109 	struct machcpu *mcpup;
110 	struct async_flt *aflt;
111 	errh_async_flt_t errh_flt;
112 	errh_er_t *head_va;
113 
114 	mcpup = &(CPU->cpu_m);
115 
116 	while (head_offset != tail_offset) {
117 		/* kernel buffer starts right after the resumable queue */
118 		head_va = (errh_er_t *)(mcpup->cpu_rq_va + head_offset +
119 		    CPU_RQ_SIZE);
120 		/* Copy the error report to local buffer */
121 		bzero(&errh_flt, sizeof (errh_async_flt_t));
122 		bcopy((char *)head_va, &(errh_flt.errh_er),
123 		    sizeof (errh_er_t));
124 
125 		mcpup->cpu_rq_lastre = head_va;
126 		if (printerrh)
127 			errh_er_print(&errh_flt.errh_er, "RQ");
128 
129 		/* Increment the queue head */
130 		head_offset += Q_ENTRY_SIZE;
131 		/* Wrap around */
132 		head_offset &= (CPU_RQ_SIZE - 1);
133 
134 		/* set error handle to zero so it can hold new error report */
135 		head_va->ehdl = 0;
136 
137 		switch (errh_flt.errh_er.desc) {
138 		case ERRH_DESC_UCOR_RE:
139 			/*
140 			 * Check error attribute, handle individual error
141 			 * if it is needed.
142 			 */
143 			errh_handle_attr(&errh_flt);
144 			break;
145 
146 		case ERRH_DESC_WARN_RE:
147 			/*
148 			 * Power-off requested, but handle it one time only.
149 			 */
150 			if (!err_shutdown_triggered) {
151 				setsoftint(err_shutdown_inum);
152 				++err_shutdown_triggered;
153 			}
154 			continue;
155 
156 		default:
157 			cmn_err(CE_WARN, "Error Descriptor 0x%llx "
158 			    " invalid in resumable error handler",
159 			    (long long) errh_flt.errh_er.desc);
160 			continue;
161 		}
162 
163 		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
164 		aflt->flt_id = gethrtime();
165 		aflt->flt_bus_id = getprocessorid();
166 		aflt->flt_class = CPU_FAULT;
167 		aflt->flt_prot = AFLT_PROT_NONE;
168 		aflt->flt_priv = (((errh_flt.errh_er.attr & ERRH_MODE_MASK)
169 		    >> ERRH_MODE_SHIFT) == ERRH_MODE_PRIV);
170 
171 		if (errh_flt.errh_er.attr & ERRH_ATTR_CPU)
172 			/* If it is an error on other cpu */
173 			aflt->flt_panic = 1;
174 		else
175 			aflt->flt_panic = 0;
176 
177 		/*
178 		 * Handle resumable queue full case.
179 		 */
180 		if (errh_flt.errh_er.attr & ERRH_ATTR_RQF) {
181 			(void) errh_rq_full(aflt);
182 		}
183 
184 		/*
185 		 * Queue the error on ce or ue queue depend on flt_panic.
186 		 * Even if flt_panic is set, the code still keep processing
187 		 * the rest element on rq until the panic starts.
188 		 */
189 		(void) cpu_queue_one_event(&errh_flt);
190 
191 		/*
192 		 * Panic here if aflt->flt_panic has been set.
193 		 * Enqueued errors will be logged as part of the panic flow.
194 		 */
195 		if (aflt->flt_panic) {
196 			fm_panic("Unrecoverable error on another CPU");
197 		}
198 	}
199 }
200 
201 void
202 process_nonresumable_error(struct regs *rp, uint64_t flags,
203     uint32_t head_offset, uint32_t tail_offset)
204 {
205 	struct machcpu *mcpup;
206 	struct async_flt *aflt;
207 	errh_async_flt_t errh_flt;
208 	errh_er_t *head_va;
209 	int trampolined = 0;
210 	int expected = DDI_FM_ERR_UNEXPECTED;
211 	uint64_t exec_mode;
212 	uint8_t u_spill_fill;
213 
214 	mcpup = &(CPU->cpu_m);
215 
216 	while (head_offset != tail_offset) {
217 		/* kernel buffer starts right after the nonresumable queue */
218 		head_va = (errh_er_t *)(mcpup->cpu_nrq_va + head_offset +
219 		    CPU_NRQ_SIZE);
220 
221 		/* Copy the error report to local buffer */
222 		bzero(&errh_flt, sizeof (errh_async_flt_t));
223 
224 		bcopy((char *)head_va, &(errh_flt.errh_er),
225 		    sizeof (errh_er_t));
226 
227 		mcpup->cpu_nrq_lastnre = head_va;
228 		if (printerrh)
229 			errh_er_print(&errh_flt.errh_er, "NRQ");
230 
231 		/* Increment the queue head */
232 		head_offset += Q_ENTRY_SIZE;
233 		/* Wrap around */
234 		head_offset &= (CPU_NRQ_SIZE - 1);
235 
236 		/* set error handle to zero so it can hold new error report */
237 		head_va->ehdl = 0;
238 
239 		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
240 
241 		trampolined = 0;
242 
243 		if (errh_flt.errh_er.attr & ERRH_ATTR_PIO)
244 			aflt->flt_class = BUS_FAULT;
245 		else
246 			aflt->flt_class = CPU_FAULT;
247 
248 		aflt->flt_id = gethrtime();
249 		aflt->flt_bus_id = getprocessorid();
250 		aflt->flt_pc = (caddr_t)rp->r_pc;
251 		exec_mode = (errh_flt.errh_er.attr & ERRH_MODE_MASK)
252 		    >> ERRH_MODE_SHIFT;
253 		aflt->flt_priv = (exec_mode == ERRH_MODE_PRIV ||
254 		    exec_mode == ERRH_MODE_UNKNOWN);
255 		aflt->flt_prot = AFLT_PROT_NONE;
256 		aflt->flt_tl = (uchar_t)(flags & ERRH_TL_MASK);
257 		aflt->flt_panic = ((aflt->flt_tl != 0) ||
258 		    (aft_testfatal != 0));
259 
260 		/*
261 		 * For the first error packet on the queue, check if it
262 		 * happened in user fill/spill trap.
263 		 */
264 		if (flags & ERRH_U_SPILL_FILL) {
265 			u_spill_fill = 1;
266 			/* clear the user fill/spill flag in flags */
267 			flags = (uint64_t)aflt->flt_tl;
268 		} else
269 			u_spill_fill = 0;
270 
271 		switch (errh_flt.errh_er.desc) {
272 		case ERRH_DESC_PR_NRE:
273 			if (u_spill_fill) {
274 				aflt->flt_panic = 0;
275 				break;
276 			}
277 			/*
278 			 * Fall through, precise fault also need to check
279 			 * to see if it was protected.
280 			 */
281 			/*FALLTHRU*/
282 
283 		case ERRH_DESC_DEF_NRE:
284 			/*
285 			 * If the trap occurred in privileged mode at TL=0,
286 			 * we need to check to see if we were executing
287 			 * in kernel under on_trap() or t_lofault
288 			 * protection. If so, and if it was a PIO or MEM
289 			 * error, then modify the saved registers so that
290 			 * we return from the trap to the appropriate
291 			 * trampoline routine.
292 			 */
293 			if (aflt->flt_priv == 1 && aflt->flt_tl == 0 &&
294 			    ((errh_flt.errh_er.attr & ERRH_ATTR_PIO) ||
295 			    (errh_flt.errh_er.attr & ERRH_ATTR_MEM))) {
296 				trampolined =
297 				    errh_error_protected(rp, aflt, &expected);
298 			}
299 
300 			if (!aflt->flt_priv || aflt->flt_prot ==
301 			    AFLT_PROT_COPY) {
302 				aflt->flt_panic |= aft_panic;
303 			} else if (!trampolined &&
304 			    (aflt->flt_class != BUS_FAULT)) {
305 				aflt->flt_panic = 1;
306 			}
307 
308 			/*
309 			 * Check error attribute, handle individual error
310 			 * if it is needed.
311 			 */
312 			errh_handle_attr(&errh_flt);
313 
314 			/*
315 			 * If PIO error, we need to query the bus nexus
316 			 * for fatal errors.
317 			 */
318 			if (aflt->flt_class == BUS_FAULT) {
319 				aflt->flt_addr = errh_flt.errh_er.ra;
320 				errh_cpu_run_bus_error_handlers(aflt,
321 				    expected);
322 			}
323 
324 			break;
325 
326 		case ERRH_DESC_USER_DCORE:
327 			/*
328 			 * User generated panic. Call panic directly
329 			 * since there are no FMA e-reports to
330 			 * display.
331 			 */
332 
333 			panic("Panic - Generated at user request");
334 
335 			break;
336 
337 		default:
338 			cmn_err(CE_WARN, "Panic - Error Descriptor 0x%llx "
339 			    " invalid in non-resumable error handler",
340 			    (long long) errh_flt.errh_er.desc);
341 			aflt->flt_panic = 1;
342 			break;
343 		}
344 
345 		/*
346 		 * Queue the error report for further processing. If
347 		 * flt_panic is set, code still process other errors
348 		 * in the queue until the panic routine stops the
349 		 * kernel.
350 		 */
351 		(void) cpu_queue_one_event(&errh_flt);
352 
353 		/*
354 		 * Panic here if aflt->flt_panic has been set.
355 		 * Enqueued errors will be logged as part of the panic flow.
356 		 */
357 		if (aflt->flt_panic) {
358 			fm_panic("Unrecoverable hardware error");
359 		}
360 
361 		/*
362 		 * Call page_retire() to handle memory errors.
363 		 */
364 		if (errh_flt.errh_er.attr & ERRH_ATTR_MEM)
365 			errh_page_retire(&errh_flt, PR_UE);
366 
367 		/*
368 		 * If we queued an error and the it was in user mode, or
369 		 * protected by t_lofault, or user_spill_fill is set, we
370 		 * set AST flag so the queue will be drained before
371 		 * returning to user mode.
372 		 */
373 		if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY ||
374 		    u_spill_fill) {
375 			int pcb_flag = 0;
376 
377 			if (aflt->flt_class == CPU_FAULT)
378 				pcb_flag |= ASYNC_HWERR;
379 			else if (aflt->flt_class == BUS_FAULT)
380 				pcb_flag |= ASYNC_BERR;
381 
382 			ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
383 			aston(curthread);
384 		}
385 	}
386 }
387 
388 /*
389  * For PIO errors, this routine calls nexus driver's error
390  * callback routines. If the callback routine returns fatal, and
391  * we are in kernel or unknow mode without any error protection,
392  * we need to turn on the panic flag.
393  */
394 void
395 errh_cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
396 {
397 	int status;
398 	ddi_fm_error_t de;
399 
400 	bzero(&de, sizeof (ddi_fm_error_t));
401 
402 	de.fme_version = DDI_FME_VERSION;
403 	de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
404 	de.fme_flag = expected;
405 	de.fme_bus_specific = (void *)aflt->flt_addr;
406 	status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
407 
408 	/*
409 	 * If error is protected, it will jump to proper routine
410 	 * to handle the handle; if it is in user level, we just
411 	 * kill the user process; if the driver thinks the error is
412 	 * not fatal, we can drive on. If none of above are true,
413 	 * we panic
414 	 */
415 	if ((aflt->flt_prot == AFLT_PROT_NONE) && (aflt->flt_priv == 1) &&
416 	    (status == DDI_FM_FATAL))
417 		aflt->flt_panic = 1;
418 }
419 
420 /*
421  * This routine checks to see if we are under any error protection when
422  * the error happens. If we are under error protection, we unwind to
423  * the protection and indicate fault.
424  */
425 static int
426 errh_error_protected(struct regs *rp, struct async_flt *aflt, int *expected)
427 {
428 	int trampolined = 0;
429 	ddi_acc_hdl_t *hp;
430 
431 	if (curthread->t_ontrap != NULL) {
432 		on_trap_data_t *otp = curthread->t_ontrap;
433 
434 		if (otp->ot_prot & OT_DATA_EC) {
435 			aflt->flt_prot = AFLT_PROT_EC;
436 			otp->ot_trap |= OT_DATA_EC;
437 			rp->r_pc = otp->ot_trampoline;
438 			rp->r_npc = rp->r_pc +4;
439 			trampolined = 1;
440 		}
441 
442 		if (otp->ot_prot & OT_DATA_ACCESS) {
443 			aflt->flt_prot = AFLT_PROT_ACCESS;
444 			otp->ot_trap |= OT_DATA_ACCESS;
445 			rp->r_pc = otp->ot_trampoline;
446 			rp->r_npc = rp->r_pc + 4;
447 			trampolined = 1;
448 			/*
449 			 * for peek and caut_gets
450 			 * errors are expected
451 			 */
452 			hp = (ddi_acc_hdl_t *)otp->ot_handle;
453 			if (!hp)
454 				*expected = DDI_FM_ERR_PEEK;
455 			else if (hp->ah_acc.devacc_attr_access ==
456 			    DDI_CAUTIOUS_ACC)
457 				*expected = DDI_FM_ERR_EXPECTED;
458 		}
459 	} else if (curthread->t_lofault) {
460 		aflt->flt_prot = AFLT_PROT_COPY;
461 		rp->r_g1 = EFAULT;
462 		rp->r_pc = curthread->t_lofault;
463 		rp->r_npc = rp->r_pc + 4;
464 		trampolined = 1;
465 	}
466 
467 	return (trampolined);
468 }
469 
470 /*
471  * Queue one event.
472  */
473 static void
474 cpu_queue_one_event(errh_async_flt_t *errh_fltp)
475 {
476 	struct async_flt *aflt = (struct async_flt *)errh_fltp;
477 	errorq_t *eqp;
478 
479 	if (aflt->flt_panic)
480 		eqp = ue_queue;
481 	else
482 		eqp = ce_queue;
483 
484 	errorq_dispatch(eqp, errh_fltp, sizeof (errh_async_flt_t),
485 	    aflt->flt_panic);
486 }
487 
488 /*
489  * The cpu_async_log_err() function is called by the ce/ue_drain() function to
490  * handle logging for CPU events that are dequeued.  As such, it can be invoked
491  * from softint context, from AST processing in the trap() flow, or from the
492  * panic flow.  We decode the CPU-specific data, and log appropriate messages.
493  */
494 void
495 cpu_async_log_err(void *flt)
496 {
497 	errh_async_flt_t *errh_fltp = (errh_async_flt_t *)flt;
498 	errh_er_t *errh_erp = (errh_er_t *)&errh_fltp->errh_er;
499 
500 	switch (errh_erp->desc) {
501 	case ERRH_DESC_UCOR_RE:
502 		if (errh_erp->attr & ERRH_ATTR_MEM) {
503 			/*
504 			 * Turn on the PR_UE flag. The page will be
505 			 * scrubbed when it is freed.
506 			 */
507 			errh_page_retire(errh_fltp, PR_UE);
508 		}
509 
510 		break;
511 
512 	case ERRH_DESC_PR_NRE:
513 	case ERRH_DESC_DEF_NRE:
514 		if (errh_erp->attr & ERRH_ATTR_MEM) {
515 			/*
516 			 * For non-resumable memory error, retire
517 			 * the page here.
518 			 */
519 			errh_page_retire(errh_fltp, PR_UE);
520 
521 			/*
522 			 * If we are going to panic, scrub the page first
523 			 */
524 			if (errh_fltp->cmn_asyncflt.flt_panic)
525 				mem_scrub(errh_fltp->errh_er.ra,
526 				    errh_fltp->errh_er.sz);
527 		}
528 		break;
529 
530 	default:
531 		break;
532 	}
533 }
534 
535 /*
536  * Called from ce_drain().
537  */
538 void
539 cpu_ce_log_err(struct async_flt *aflt)
540 {
541 	switch (aflt->flt_class) {
542 	case CPU_FAULT:
543 		cpu_async_log_err(aflt);
544 		break;
545 
546 	case BUS_FAULT:
547 		cpu_async_log_err(aflt);
548 		break;
549 
550 	default:
551 		break;
552 	}
553 }
554 
555 /*
556  * Called from ue_drain().
557  */
558 void
559 cpu_ue_log_err(struct async_flt *aflt)
560 {
561 	switch (aflt->flt_class) {
562 	case CPU_FAULT:
563 		cpu_async_log_err(aflt);
564 		break;
565 
566 	case BUS_FAULT:
567 		cpu_async_log_err(aflt);
568 		break;
569 
570 	default:
571 		break;
572 	}
573 }
574 
575 /*
576  * Turn on flag on the error memory region.
577  */
578 static void
579 errh_page_retire(errh_async_flt_t *errh_fltp, uchar_t flag)
580 {
581 	uint64_t flt_real_addr_start = errh_fltp->errh_er.ra;
582 	uint64_t flt_real_addr_end = flt_real_addr_start +
583 	    errh_fltp->errh_er.sz - 1;
584 	int64_t current_addr;
585 
586 	if (errh_fltp->errh_er.sz == 0)
587 		return;
588 
589 	for (current_addr = flt_real_addr_start;
590 	    current_addr < flt_real_addr_end; current_addr += MMU_PAGESIZE) {
591 		(void) page_retire(current_addr, flag);
592 	}
593 }
594 
595 void
596 mem_scrub(uint64_t paddr, uint64_t len)
597 {
598 	uint64_t pa, length, scrubbed_len;
599 
600 	pa = paddr;
601 	length = len;
602 	scrubbed_len = 0;
603 
604 	while (length > 0) {
605 		if (hv_mem_scrub(pa, length, &scrubbed_len) != H_EOK)
606 			break;
607 
608 		pa += scrubbed_len;
609 		length -= scrubbed_len;
610 	}
611 }
612 
613 /*
614  * Call hypervisor to flush the memory region.
615  * Both va and len must be MMU_PAGESIZE aligned.
616  * Returns the total number of bytes flushed.
617  */
618 uint64_t
619 mem_sync(caddr_t orig_va, size_t orig_len)
620 {
621 	uint64_t pa, length, flushed;
622 	uint64_t chunk_len = MMU_PAGESIZE;
623 	uint64_t total_flushed = 0;
624 	uint64_t va, len;
625 
626 	if (orig_len == 0)
627 		return (total_flushed);
628 
629 	/* align va */
630 	va = P2ALIGN_TYPED(orig_va, MMU_PAGESIZE, uint64_t);
631 	/* round up len to MMU_PAGESIZE aligned */
632 	len = P2ROUNDUP_TYPED(orig_va + orig_len, MMU_PAGESIZE, uint64_t) - va;
633 
634 	while (len > 0) {
635 		pa = va_to_pa((caddr_t)va);
636 		if (pa == (uint64_t)-1)
637 			return (total_flushed);
638 
639 		length = chunk_len;
640 		flushed = 0;
641 
642 		while (length > 0) {
643 			if (hv_mem_sync(pa, length, &flushed) != H_EOK)
644 				return (total_flushed);
645 
646 			pa += flushed;
647 			length -= flushed;
648 			total_flushed += flushed;
649 		}
650 
651 		va += chunk_len;
652 		len -= chunk_len;
653 	}
654 
655 	return (total_flushed);
656 }
657 
658 /*
659  * If resumable queue is full, we need to check if any cpu is in
660  * error state. If not, we drive on. If yes, we need to panic. The
661  * hypervisor call hv_cpu_state() is being used for checking the
662  * cpu state.  And reset %tick_compr in case tick-compare was lost.
663  */
664 static void
665 errh_rq_full(struct async_flt *afltp)
666 {
667 	processorid_t who;
668 	uint64_t cpu_state;
669 	uint64_t retval;
670 	uint64_t current_tick;
671 
672 	current_tick = (uint64_t)gettick();
673 	tickcmpr_set(current_tick);
674 
675 	for (who = 0; who < NCPU; who++)
676 		if (CPU_IN_SET(cpu_ready_set, who)) {
677 			retval = hv_cpu_state(who, &cpu_state);
678 			if (retval != H_EOK || cpu_state == CPU_STATE_ERROR) {
679 				afltp->flt_panic = 1;
680 				break;
681 			}
682 		}
683 }
684 
685 /*
686  * Return processor specific async error structure
687  * size used.
688  */
689 int
690 cpu_aflt_size(void)
691 {
692 	return (sizeof (errh_async_flt_t));
693 }
694 
695 #define	SZ_TO_ETRS_SHIFT	6
696 
697 /*
698  * Message print out when resumable queue is overflown
699  */
700 /*ARGSUSED*/
701 void
702 rq_overflow(struct regs *rp, uint64_t head_offset,
703     uint64_t tail_offset)
704 {
705 	rq_overflow_count++;
706 }
707 
708 /*
709  * Handler to process a fatal error.  This routine can be called from a
710  * softint, called from trap()'s AST handling, or called from the panic flow.
711  */
712 /*ARGSUSED*/
713 static void
714 ue_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
715 {
716 	cpu_ue_log_err(aflt);
717 }
718 
719 /*
720  * Handler to process a correctable error.  This routine can be called from a
721  * softint.  We just call the CPU module's logging routine.
722  */
723 /*ARGSUSED*/
724 static void
725 ce_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
726 {
727 	cpu_ce_log_err(aflt);
728 }
729 
730 /*
731  * Handler to process vbsc hostshutdown (power-off button).
732  */
733 static int
734 err_shutdown_softintr()
735 {
736 	cmn_err(CE_WARN, "Power-off requested, system will now shutdown.");
737 	do_shutdown();
738 
739 	/*
740 	 * just in case do_shutdown() fails
741 	 */
742 	(void) timeout((void(*)(void *))power_down, NULL, 100 * hz);
743 	return (DDI_INTR_CLAIMED);
744 }
745 
746 /*
747  * Allocate error queue sizes based on max_ncpus.  max_ncpus is set just
748  * after ncpunode has been determined.  ncpus is set in start_other_cpus
749  * which is called after error_init() but may change dynamically.
750  */
751 void
752 error_init(void)
753 {
754 	char tmp_name[MAXSYSNAME];
755 	pnode_t node;
756 	size_t size = cpu_aflt_size();
757 
758 	/*
759 	 * Initialize the correctable and uncorrectable error queues.
760 	 */
761 	ue_queue = errorq_create("ue_queue", (errorq_func_t)ue_drain, NULL,
762 	    MAX_ASYNC_FLTS * (max_ncpus + 1), size, PIL_2, ERRORQ_VITAL);
763 
764 	ce_queue = errorq_create("ce_queue", (errorq_func_t)ce_drain, NULL,
765 	    MAX_CE_FLTS * (max_ncpus + 1), size, PIL_1, 0);
766 
767 	if (ue_queue == NULL || ce_queue == NULL)
768 		panic("failed to create required system error queue");
769 
770 	/*
771 	 * Setup interrupt handler for power-off button.
772 	 */
773 	err_shutdown_inum = add_softintr(PIL_9,
774 	    (softintrfunc)err_shutdown_softintr, NULL, SOFTINT_ST);
775 
776 	/*
777 	 * Initialize the busfunc list mutex.  This must be a PIL_15 spin lock
778 	 * because we will need to acquire it from cpu_async_error().
779 	 */
780 	mutex_init(&bfd_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
781 
782 	/* Only allow one cpu at a time to dump errh errors. */
783 	mutex_init(&errh_print_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
784 
785 	node = prom_rootnode();
786 	if ((node == OBP_NONODE) || (node == OBP_BADNODE)) {
787 		cmn_err(CE_CONT, "error_init: node 0x%x\n", (uint_t)node);
788 		return;
789 	}
790 
791 	if (((size = prom_getproplen(node, "reset-reason")) != -1) &&
792 	    (size <= MAXSYSNAME) &&
793 	    (prom_getprop(node, "reset-reason", tmp_name) != -1)) {
794 		if (reset_debug) {
795 			cmn_err(CE_CONT, "System booting after %s\n", tmp_name);
796 		} else if (strncmp(tmp_name, "FATAL", 5) == 0) {
797 			cmn_err(CE_CONT,
798 			    "System booting after fatal error %s\n", tmp_name);
799 		}
800 	}
801 }
802 
803 /*
804  * Nonresumable queue is full, panic here
805  */
806 /*ARGSUSED*/
807 void
808 nrq_overflow(struct regs *rp)
809 {
810 	fm_panic("Nonresumable queue full");
811 }
812 
813 /*
814  * This is the place for special error handling for individual errors.
815  */
816 static void
817 errh_handle_attr(errh_async_flt_t *errh_fltp)
818 {
819 	switch (errh_fltp->errh_er.attr & ~ERRH_MODE_MASK) {
820 	case ERRH_ATTR_CPU:
821 	case ERRH_ATTR_MEM:
822 	case ERRH_ATTR_PIO:
823 	case ERRH_ATTR_IRF:
824 	case ERRH_ATTR_FRF:
825 	case ERRH_ATTR_SHUT:
826 		break;
827 
828 	case ERRH_ATTR_ASR:
829 		errh_handle_asr(errh_fltp);
830 		break;
831 
832 	case ERRH_ATTR_ASI:
833 	case ERRH_ATTR_PREG:
834 	case ERRH_ATTR_RQF:
835 		break;
836 
837 	default:
838 		break;
839 	}
840 }
841 
842 /*
843  * Handle ASR bit set in ATTR
844  */
845 static void
846 errh_handle_asr(errh_async_flt_t *errh_fltp)
847 {
848 	uint64_t current_tick;
849 
850 	switch (errh_fltp->errh_er.reg) {
851 	case ASR_REG_VALID | ASR_REG_TICK:
852 		/*
853 		 * For Tick Compare Register error, it only happens when
854 		 * the register is being read or compared with the %tick
855 		 * register. Since we lost the contents of the register,
856 		 * we set the %tick_compr in the future. An interrupt will
857 		 * happen when %tick matches the value field of %tick_compr.
858 		 */
859 		current_tick = (uint64_t)gettick();
860 		tickcmpr_set(current_tick);
861 		/* Do not panic */
862 		errh_fltp->cmn_asyncflt.flt_panic = 0;
863 		break;
864 
865 	default:
866 		break;
867 	}
868 }
869 
870 /*
871  * Dump the error packet
872  */
873 /*ARGSUSED*/
874 static void
875 errh_er_print(errh_er_t *errh_erp, const char *queue)
876 {
877 	typedef union {
878 		uint64_t w;
879 		uint16_t s[4];
880 	} errhp_t;
881 	errhp_t *p = (errhp_t *)errh_erp;
882 	int i;
883 
884 	mutex_enter(&errh_print_lock);
885 	switch (errh_erp->desc) {
886 	case ERRH_DESC_UCOR_RE:
887 		cmn_err(CE_CONT, "\nResumable Uncorrectable Error ");
888 		break;
889 	case ERRH_DESC_PR_NRE:
890 		cmn_err(CE_CONT, "\nNonresumable Precise Error ");
891 		break;
892 	case ERRH_DESC_DEF_NRE:
893 		cmn_err(CE_CONT, "\nNonresumable Deferred Error ");
894 		break;
895 	default:
896 		cmn_err(CE_CONT, "\nError packet ");
897 		break;
898 	}
899 	cmn_err(CE_CONT, "received on %s\n", queue);
900 
901 	/*
902 	 * Print Q_ENTRY_SIZE bytes of epacket with 8 bytes per line
903 	 */
904 	for (i = Q_ENTRY_SIZE; i > 0; i -= 8, ++p) {
905 		cmn_err(CE_CONT, "%016lx: %04x %04x %04x %04x\n", (uint64_t)p,
906 		    p->s[0], p->s[1], p->s[2], p->s[3]);
907 	}
908 	mutex_exit(&errh_print_lock);
909 }
910