xref: /illumos-gate/usr/src/uts/sun4u/cpu/opl_olympus.c (revision 0ccf9e790d232720597416743840df88825a9317)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 #include <sys/ddi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/archsystm.h>
33 #include <sys/vmsystm.h>
34 #include <sys/machparam.h>
35 #include <sys/machsystm.h>
36 #include <sys/machthread.h>
37 #include <sys/cpu.h>
38 #include <sys/cmp.h>
39 #include <sys/elf_SPARC.h>
40 #include <vm/vm_dep.h>
41 #include <vm/hat_sfmmu.h>
42 #include <vm/seg_kpm.h>
43 #include <sys/cpuvar.h>
44 #include <sys/opl_olympus_regs.h>
45 #include <sys/opl_module.h>
46 #include <sys/async.h>
47 #include <sys/cmn_err.h>
48 #include <sys/debug.h>
49 #include <sys/dditypes.h>
50 #include <sys/cpu_module.h>
51 #include <sys/sysmacros.h>
52 #include <sys/intreg.h>
53 #include <sys/clock.h>
54 #include <sys/platform_module.h>
55 #include <sys/ontrap.h>
56 #include <sys/panic.h>
57 #include <sys/memlist.h>
58 #include <sys/ndifm.h>
59 #include <sys/ddifm.h>
60 #include <sys/fm/protocol.h>
61 #include <sys/fm/util.h>
62 #include <sys/fm/cpu/SPARC64-VI.h>
63 #include <sys/dtrace.h>
64 #include <sys/watchpoint.h>
65 #include <sys/promif.h>
66 
67 /*
68  * Internal functions.
69  */
70 static int cpu_sync_log_err(void *flt);
71 static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *);
72 static void opl_cpu_sync_error(struct regs *, ulong_t, ulong_t, uint_t, uint_t);
73 static int  cpu_flt_in_memory(opl_async_flt_t *, uint64_t);
74 
75 /*
76  * Error counters resetting interval.
77  */
78 static int opl_async_check_interval = 60;		/* 1 min */
79 
80 uint_t cpu_impl_dual_pgsz = 1;
81 
82 /*
83  * PA[22:0] represent Displacement in Jupiter
84  * configuration space.
85  */
86 uint_t	root_phys_addr_lo_mask = 0x7fffffu;
87 
88 /*
89  * set in /etc/system to control logging of user BERR/TO's
90  */
91 int cpu_berr_to_verbose = 0;
92 
93 static int min_ecache_size;
94 static uint_t priv_hcl_1;
95 static uint_t priv_hcl_2;
96 static uint_t priv_hcl_4;
97 static uint_t priv_hcl_8;
98 
99 /*
100  * Olympus error log
101  */
102 static opl_errlog_t	*opl_err_log;
103 
104 /*
105  * UE is classified into four classes (MEM, CHANNEL, CPU, PATH).
106  * No any other ecc_type_info insertion is allowed in between the following
107  * four UE classess.
108  */
109 ecc_type_to_info_t ecc_type_to_info[] = {
110 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
111 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
112 	FM_EREPORT_CPU_UE_MEM,
113 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
114 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
115 	FM_EREPORT_CPU_UE_CHANNEL,
116 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
117 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
118 	FM_EREPORT_CPU_UE_CPU,
119 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
120 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
121 	FM_EREPORT_CPU_UE_PATH,
122 	SFSR_BERR, "BERR ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
123 	"Bus Error",  FM_EREPORT_PAYLOAD_SYNC,
124 	FM_EREPORT_CPU_BERR,
125 	SFSR_TO, "TO ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
126 	"Bus Timeout",  FM_EREPORT_PAYLOAD_SYNC,
127 	FM_EREPORT_CPU_BTO,
128 	SFSR_TLB_MUL, "TLB_MUL ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
129 	"TLB MultiHit",  FM_EREPORT_PAYLOAD_SYNC,
130 	FM_EREPORT_CPU_MTLB,
131 	SFSR_TLB_PRT, "TLB_PRT ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
132 	"TLB Parity",  FM_EREPORT_PAYLOAD_SYNC,
133 	FM_EREPORT_CPU_TLBP,
134 
135 	UGESR_IAUG_CRE, "IAUG_CRE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
136 	"IAUG CRE",  FM_EREPORT_PAYLOAD_URGENT,
137 	FM_EREPORT_CPU_CRE,
138 	UGESR_IAUG_TSBCTXT, "IAUG_TSBCTXT",
139 	OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
140 	"IAUG TSBCTXT",  FM_EREPORT_PAYLOAD_URGENT,
141 	FM_EREPORT_CPU_TSBCTX,
142 	UGESR_IUG_TSBP, "IUG_TSBP", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
143 	"IUG TSBP",  FM_EREPORT_PAYLOAD_URGENT,
144 	FM_EREPORT_CPU_TSBP,
145 	UGESR_IUG_PSTATE, "IUG_PSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
146 	"IUG PSTATE",  FM_EREPORT_PAYLOAD_URGENT,
147 	FM_EREPORT_CPU_PSTATE,
148 	UGESR_IUG_TSTATE, "IUG_TSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
149 	"IUG TSTATE",  FM_EREPORT_PAYLOAD_URGENT,
150 	FM_EREPORT_CPU_TSTATE,
151 	UGESR_IUG_F, "IUG_F", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
152 	"IUG FREG",  FM_EREPORT_PAYLOAD_URGENT,
153 	FM_EREPORT_CPU_IUG_F,
154 	UGESR_IUG_R, "IUG_R", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
155 	"IUG RREG",  FM_EREPORT_PAYLOAD_URGENT,
156 	FM_EREPORT_CPU_IUG_R,
157 	UGESR_AUG_SDC, "AUG_SDC", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
158 	"AUG SDC",  FM_EREPORT_PAYLOAD_URGENT,
159 	FM_EREPORT_CPU_SDC,
160 	UGESR_IUG_WDT, "IUG_WDT", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
161 	"IUG WDT",  FM_EREPORT_PAYLOAD_URGENT,
162 	FM_EREPORT_CPU_WDT,
163 	UGESR_IUG_DTLB, "IUG_DTLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
164 	"IUG DTLB",  FM_EREPORT_PAYLOAD_URGENT,
165 	FM_EREPORT_CPU_DTLB,
166 	UGESR_IUG_ITLB, "IUG_ITLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
167 	"IUG ITLB",  FM_EREPORT_PAYLOAD_URGENT,
168 	FM_EREPORT_CPU_ITLB,
169 	UGESR_IUG_COREERR, "IUG_COREERR",
170 	OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
171 	"IUG COREERR",  FM_EREPORT_PAYLOAD_URGENT,
172 	FM_EREPORT_CPU_CORE,
173 	UGESR_MULTI_DAE, "MULTI_DAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
174 	"MULTI DAE",  FM_EREPORT_PAYLOAD_URGENT,
175 	FM_EREPORT_CPU_DAE,
176 	UGESR_MULTI_IAE, "MULTI_IAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
177 	"MULTI IAE",  FM_EREPORT_PAYLOAD_URGENT,
178 	FM_EREPORT_CPU_IAE,
179 	UGESR_MULTI_UGE, "MULTI_UGE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
180 	"MULTI UGE",  FM_EREPORT_PAYLOAD_URGENT,
181 	FM_EREPORT_CPU_UGE,
182 	0,		NULL,		0,		0,
183 	NULL,  0,	   0,
184 };
185 
186 int (*p2get_mem_info)(int synd_code, uint64_t paddr,
187 		uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
188 		int *segsp, int *banksp, int *mcidp);
189 
190 
191 /*
192  * Setup trap handlers for 0xA, 0x32, 0x40 trap types.
193  */
194 void
195 cpu_init_trap(void)
196 {
197 	OPL_SET_TRAP(tt0_iae, opl_serr_instr);
198 	OPL_SET_TRAP(tt1_iae, opl_serr_instr);
199 	OPL_SET_TRAP(tt0_dae, opl_serr_instr);
200 	OPL_SET_TRAP(tt1_dae, opl_serr_instr);
201 	OPL_SET_TRAP(tt0_asdat, opl_ugerr_instr);
202 	OPL_SET_TRAP(tt1_asdat, opl_ugerr_instr);
203 }
204 
205 static int
206 getintprop(pnode_t node, char *name, int deflt)
207 {
208 	int	value;
209 
210 	switch (prom_getproplen(node, name)) {
211 	case sizeof (int):
212 		(void) prom_getprop(node, name, (caddr_t)&value);
213 		break;
214 
215 	default:
216 		value = deflt;
217 		break;
218 	}
219 
220 	return (value);
221 }
222 
223 /*
224  * Set the magic constants of the implementation.
225  */
226 /*ARGSUSED*/
227 void
228 cpu_fiximp(pnode_t dnode)
229 {
230 	int i, a;
231 	extern int vac_size, vac_shift;
232 	extern uint_t vac_mask;
233 
234 	static struct {
235 		char	*name;
236 		int	*var;
237 		int	defval;
238 	} prop[] = {
239 		"l1-dcache-size", &dcache_size, OPL_DCACHE_SIZE,
240 		"l1-dcache-line-size", &dcache_linesize, OPL_DCACHE_LSIZE,
241 		"l1-icache-size", &icache_size, OPL_ICACHE_SIZE,
242 		"l1-icache-line-size", &icache_linesize, OPL_ICACHE_LSIZE,
243 		"l2-cache-size", &ecache_size, OPL_ECACHE_SIZE,
244 		"l2-cache-line-size", &ecache_alignsize, OPL_ECACHE_LSIZE,
245 		"l2-cache-associativity", &ecache_associativity, OPL_ECACHE_NWAY
246 	};
247 
248 	for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++)
249 		*prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval);
250 
251 	ecache_setsize = ecache_size / ecache_associativity;
252 
253 	vac_size = OPL_VAC_SIZE;
254 	vac_mask = MMU_PAGEMASK & (vac_size - 1);
255 	i = 0; a = vac_size;
256 	while (a >>= 1)
257 		++i;
258 	vac_shift = i;
259 	shm_alignment = vac_size;
260 	vac = 1;
261 }
262 
263 #ifdef	OLYMPUS_C_REV_B_ERRATA_XCALL
264 /*
265  * Quick and dirty way to redefine locally in
266  * OPL the value of IDSR_BN_SETS to 31 instead
267  * of the standard 32 value. This is to workaround
268  * REV_B of Olympus_c processor's problem in handling
269  * more than 31 xcall broadcast.
270  */
271 #undef	IDSR_BN_SETS
272 #define	IDSR_BN_SETS    31
273 #endif	/* OLYMPUS_C_REV_B_ERRATA_XCALL */
274 
275 void
276 send_mondo_set(cpuset_t set)
277 {
278 	int lo, busy, nack, shipped = 0;
279 	uint16_t i, cpuids[IDSR_BN_SETS];
280 	uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
281 	uint64_t starttick, endtick, tick, lasttick;
282 #if (NCPU > IDSR_BN_SETS)
283 	int index = 0;
284 	int ncpuids = 0;
285 #endif
286 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
287 	int bn_sets = IDSR_BN_SETS;
288 	uint64_t ver;
289 
290 	ASSERT(NCPU > bn_sets);
291 #endif
292 
293 	ASSERT(!CPUSET_ISNULL(set));
294 	starttick = lasttick = gettick();
295 
296 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
297 	ver = ultra_getver();
298 	if (((ULTRA_VER_IMPL(ver)) == OLYMPUS_C_IMPL) &&
299 		((OLYMPUS_REV_MASK(ver)) == OLYMPUS_C_A))
300 		bn_sets = 1;
301 #endif
302 
303 #if (NCPU <= IDSR_BN_SETS)
304 	for (i = 0; i < NCPU; i++)
305 		if (CPU_IN_SET(set, i)) {
306 			shipit(i, shipped);
307 			nackmask |= IDSR_NACK_BIT(shipped);
308 			cpuids[shipped++] = i;
309 			CPUSET_DEL(set, i);
310 			if (CPUSET_ISNULL(set))
311 				break;
312 		}
313 	CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
314 #else
315 	for (i = 0; i < NCPU; i++)
316 		if (CPU_IN_SET(set, i)) {
317 			ncpuids++;
318 
319 			/*
320 			 * Ship only to the first (IDSR_BN_SETS) CPUs.  If we
321 			 * find we have shipped to more than (IDSR_BN_SETS)
322 			 * CPUs, set "index" to the highest numbered CPU in
323 			 * the set so we can ship to other CPUs a bit later on.
324 			 */
325 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
326 			if (shipped < bn_sets) {
327 #else
328 			if (shipped < IDSR_BN_SETS) {
329 #endif
330 				shipit(i, shipped);
331 				nackmask |= IDSR_NACK_BIT(shipped);
332 				cpuids[shipped++] = i;
333 				CPUSET_DEL(set, i);
334 				if (CPUSET_ISNULL(set))
335 					break;
336 			} else
337 				index = (int)i;
338 		}
339 
340 	CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
341 #endif
342 
343 	busymask = IDSR_NACK_TO_BUSY(nackmask);
344 	busy = nack = 0;
345 	endtick = starttick + xc_tick_limit;
346 	for (;;) {
347 		idsr = getidsr();
348 #if (NCPU <= IDSR_BN_SETS)
349 		if (idsr == 0)
350 			break;
351 #else
352 		if (idsr == 0 && shipped == ncpuids)
353 			break;
354 #endif
355 		tick = gettick();
356 		/*
357 		 * If there is a big jump between the current tick
358 		 * count and lasttick, we have probably hit a break
359 		 * point.  Adjust endtick accordingly to avoid panic.
360 		 */
361 		if (tick > (lasttick + xc_tick_jump_limit))
362 			endtick += (tick - lasttick);
363 		lasttick = tick;
364 		if (tick > endtick) {
365 			if (panic_quiesce)
366 				return;
367 			cmn_err(CE_CONT, "send mondo timeout "
368 				"[%d NACK %d BUSY]\nIDSR 0x%"
369 				"" PRIx64 "  cpuids:", nack, busy, idsr);
370 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
371 			for (i = 0; i < bn_sets; i++) {
372 #else
373 			for (i = 0; i < IDSR_BN_SETS; i++) {
374 #endif
375 				if (idsr & (IDSR_NACK_BIT(i) |
376 				    IDSR_BUSY_BIT(i))) {
377 					cmn_err(CE_CONT, " 0x%x",
378 						cpuids[i]);
379 				}
380 			}
381 			cmn_err(CE_CONT, "\n");
382 			cmn_err(CE_PANIC, "send_mondo_set: timeout");
383 		}
384 		curnack = idsr & nackmask;
385 		curbusy = idsr & busymask;
386 
387 #ifdef OLYMPUS_C_REV_B_ERRATA_XCALL
388 		/*
389 		 * Only proceed to send more xcalls if all the
390 		 * cpus in the previous IDSR_BN_SETS were completed.
391 		 */
392 		if (curbusy) {
393 			busy++;
394 			continue;
395 		}
396 #endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */
397 
398 #if (NCPU > IDSR_BN_SETS)
399 		if (shipped < ncpuids) {
400 			uint64_t cpus_left;
401 			uint16_t next = (uint16_t)index;
402 
403 			cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
404 			    busymask;
405 
406 			if (cpus_left) {
407 				do {
408 					/*
409 					 * Sequence through and ship to the
410 					 * remainder of the CPUs in the system
411 					 * (e.g. other than the first
412 					 * (IDSR_BN_SETS)) in reverse order.
413 					 */
414 					lo = lowbit(cpus_left) - 1;
415 					i = IDSR_BUSY_IDX(lo);
416 					shipit(next, i);
417 					shipped++;
418 					cpuids[i] = next;
419 
420 					/*
421 					 * If we've processed all the CPUs,
422 					 * exit the loop now and save
423 					 * instructions.
424 					 */
425 					if (shipped == ncpuids)
426 						break;
427 
428 					for ((index = ((int)next - 1));
429 						index >= 0; index--)
430 						if (CPU_IN_SET(set, index)) {
431 							next = (uint16_t)index;
432 							break;
433 						}
434 
435 					cpus_left &= ~(1ull << lo);
436 				} while (cpus_left);
437 				continue;
438 			}
439 		}
440 #endif
441 #ifndef	OLYMPUS_C_REV_B_ERRATA_XCALL
442 		if (curbusy) {
443 			busy++;
444 			continue;
445 		}
446 #endif	/* OLYMPUS_C_REV_B_ERRATA_XCALL */
447 #ifdef SEND_MONDO_STATS
448 		{
449 			int n = gettick() - starttick;
450 			if (n < 8192)
451 				x_nack_stimes[n >> 7]++;
452 		}
453 #endif
454 		while (gettick() < (tick + sys_clock_mhz))
455 			;
456 		do {
457 			lo = lowbit(curnack) - 1;
458 			i = IDSR_NACK_IDX(lo);
459 			shipit(cpuids[i], i);
460 			curnack &= ~(1ull << lo);
461 		} while (curnack);
462 		nack++;
463 		busy = 0;
464 	}
465 #ifdef SEND_MONDO_STATS
466 	{
467 		int n = gettick() - starttick;
468 		if (n < 8192)
469 			x_set_stimes[n >> 7]++;
470 		else
471 			x_set_ltimes[(n >> 13) & 0xf]++;
472 	}
473 	x_set_cpus[shipped]++;
474 #endif
475 }
476 
477 /*
478  * Cpu private initialization.
479  */
480 void
481 cpu_init_private(struct cpu *cp)
482 {
483 	if (!(IS_OLYMPUS_C(cpunodes[cp->cpu_id].implementation))) {
484 		cmn_err(CE_PANIC, "CPU%d Impl %d: Only SPARC64-VI is supported",
485 			cp->cpu_id, cpunodes[cp->cpu_id].implementation);
486 	}
487 
488 	adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
489 }
490 
491 void
492 cpu_setup(void)
493 {
494 	extern int at_flags;
495 	extern int disable_delay_tlb_flush, delay_tlb_flush;
496 	extern int cpc_has_overflow_intr;
497 	extern int disable_text_largepages;
498 	extern int use_text_pgsz4m;
499 	uint64_t cpu0_log;
500 	extern	 uint64_t opl_cpu0_err_log;
501 
502 	/*
503 	 * Initialize Error log Scratch register for error handling.
504 	 */
505 
506 	cpu0_log = va_to_pa(&opl_cpu0_err_log);
507 	opl_error_setup(cpu0_log);
508 
509 	/*
510 	 * Enable MMU translating multiple page sizes for
511 	 * sITLB and sDTLB.
512 	 */
513 	opl_mpg_enable();
514 
515 	/*
516 	 * Setup chip-specific trap handlers.
517 	 */
518 	cpu_init_trap();
519 
520 	cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT);
521 
522 	at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
523 
524 	/*
525 	 * Due to the number of entries in the fully-associative tlb
526 	 * this may have to be tuned lower than in spitfire.
527 	 */
528 	pp_slots = MIN(8, MAXPP_SLOTS);
529 
530 	/*
531 	 * Block stores do not invalidate all pages of the d$, pagecopy
532 	 * et. al. need virtual translations with virtual coloring taken
533 	 * into consideration.  prefetch/ldd will pollute the d$ on the
534 	 * load side.
535 	 */
536 	pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE;
537 
538 	if (use_page_coloring) {
539 		do_pg_coloring = 1;
540 		if (use_virtual_coloring)
541 			do_virtual_coloring = 1;
542 	}
543 
544 	isa_list =
545 	    "sparcv9+vis2 sparcv9+vis sparcv9 "
546 	    "sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
547 	    "sparcv8 sparcv8-fsmuld sparcv7 sparc";
548 
549 	cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2;
550 
551 	/*
552 	 * On SPARC64-VI, there's no hole in the virtual address space
553 	 */
554 	hole_start = hole_end = 0;
555 
556 	/*
557 	 * The kpm mapping window.
558 	 * kpm_size:
559 	 *	The size of a single kpm range.
560 	 *	The overall size will be: kpm_size * vac_colors.
561 	 * kpm_vbase:
562 	 *	The virtual start address of the kpm range within the kernel
563 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
564 	 */
565 	kpm_size = (size_t)(128ull * 1024 * 1024 * 1024 * 1024); /* 128TB */
566 	kpm_size_shift = 47;
567 	kpm_vbase = (caddr_t)0x8000000000000000ull; /* 8EB */
568 	kpm_smallpages = 1;
569 
570 	/*
571 	 * The traptrace code uses either %tick or %stick for
572 	 * timestamping.  We have %stick so we can use it.
573 	 */
574 	traptrace_use_stick = 1;
575 
576 	/*
577 	 * SPARC64-VI has a performance counter overflow interrupt
578 	 */
579 	cpc_has_overflow_intr = 1;
580 
581 	/*
582 	 * Use SPARC64-VI flush-all support
583 	 */
584 	if (!disable_delay_tlb_flush)
585 		delay_tlb_flush = 1;
586 
587 	/*
588 	 * Declare that this architecture/cpu combination does not support
589 	 * fpRAS.
590 	 */
591 	fpras_implemented = 0;
592 
593 	/*
594 	 * Enable 4M pages to be used for mapping user text by default.  Don't
595 	 * use large pages for initialized data segments since we may not know
596 	 * at exec() time what should be the preferred large page size for DTLB
597 	 * programming.
598 	 */
599 	use_text_pgsz4m = 1;
600 	disable_text_largepages = (1 << TTE64K) | (1 << TTE512K) |
601 	    (1 << TTE32M) | (1 << TTE256M);
602 }
603 
604 /*
605  * Called by setcpudelay
606  */
607 void
608 cpu_init_tick_freq(void)
609 {
610 	/*
611 	 * For SPARC64-VI we want to use the system clock rate as
612 	 * the basis for low level timing, due to support of mixed
613 	 * speed CPUs and power managment.
614 	 */
615 	if (system_clock_freq == 0)
616 		cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq");
617 
618 	sys_tick_freq = system_clock_freq;
619 }
620 
621 #ifdef SEND_MONDO_STATS
622 uint32_t x_one_stimes[64];
623 uint32_t x_one_ltimes[16];
624 uint32_t x_set_stimes[64];
625 uint32_t x_set_ltimes[16];
626 uint32_t x_set_cpus[NCPU];
627 uint32_t x_nack_stimes[64];
628 #endif
629 
630 /*
631  * Note: A version of this function is used by the debugger via the KDI,
632  * and must be kept in sync with this version.  Any changes made to this
633  * function to support new chips or to accomodate errata must also be included
634  * in the KDI-specific version.  See us3_kdi.c.
635  */
636 void
637 send_one_mondo(int cpuid)
638 {
639 	int busy, nack;
640 	uint64_t idsr, starttick, endtick, tick, lasttick;
641 	uint64_t busymask;
642 
643 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
644 	starttick = lasttick = gettick();
645 	shipit(cpuid, 0);
646 	endtick = starttick + xc_tick_limit;
647 	busy = nack = 0;
648 	busymask = IDSR_BUSY;
649 	for (;;) {
650 		idsr = getidsr();
651 		if (idsr == 0)
652 			break;
653 
654 		tick = gettick();
655 		/*
656 		 * If there is a big jump between the current tick
657 		 * count and lasttick, we have probably hit a break
658 		 * point.  Adjust endtick accordingly to avoid panic.
659 		 */
660 		if (tick > (lasttick + xc_tick_jump_limit))
661 			endtick += (tick - lasttick);
662 		lasttick = tick;
663 		if (tick > endtick) {
664 			if (panic_quiesce)
665 				return;
666 			cmn_err(CE_PANIC, "send mondo timeout "
667 				"(target 0x%x) [%d NACK %d BUSY]",
668 					cpuid, nack, busy);
669 		}
670 
671 		if (idsr & busymask) {
672 			busy++;
673 			continue;
674 		}
675 		drv_usecwait(1);
676 		shipit(cpuid, 0);
677 		nack++;
678 		busy = 0;
679 	}
680 #ifdef SEND_MONDO_STATS
681 	{
682 		int n = gettick() - starttick;
683 		if (n < 8192)
684 			x_one_stimes[n >> 7]++;
685 		else
686 			x_one_ltimes[(n >> 13) & 0xf]++;
687 	}
688 #endif
689 }
690 
691 /*
692  * init_mmu_page_sizes is set to one after the bootup time initialization
693  * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
694  * valid value.
695  *
696  * mmu_disable_ism_large_pages and mmu_disable_large_pages are the mmu-specific
697  * versions of disable_ism_large_pages and disable_large_pages, and feed back
698  * into those two hat variables at hat initialization time.
699  *
700  */
701 int init_mmu_page_sizes = 0;
702 static int mmu_disable_ism_large_pages = ((1 << TTE64K) |
703 	(1 << TTE512K) | (1 << TTE256M));
704 static int mmu_disable_large_pages = 0;
705 
706 /*
707  * Re-initialize mmu_page_sizes and friends, for SPARC64-VI mmu support.
708  * Called during very early bootup from check_cpus_set().
709  * Can be called to verify that mmu_page_sizes are set up correctly.
710  *
711  * Set Olympus defaults. We do not use the function parameter.
712  */
713 /*ARGSUSED*/
714 int
715 mmu_init_mmu_page_sizes(int32_t not_used)
716 {
717 	if (!init_mmu_page_sizes) {
718 		mmu_page_sizes = MMU_PAGE_SIZES;
719 		mmu_hashcnt = MAX_HASHCNT;
720 		mmu_ism_pagesize = MMU_PAGESIZE32M;
721 		mmu_exported_pagesize_mask = (1 << TTE8K) |
722 		    (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) |
723 		    (1 << TTE32M) | (1 << TTE256M);
724 		init_mmu_page_sizes = 1;
725 		return (0);
726 	}
727 	return (1);
728 }
729 
730 /* SPARC64-VI worst case DTLB parameters */
731 #ifndef	LOCKED_DTLB_ENTRIES
732 #define	LOCKED_DTLB_ENTRIES	5	/* 2 user TSBs, 2 nucleus, + OBP */
733 #endif
734 #define	TOTAL_DTLB_ENTRIES	32
735 #define	AVAIL_32M_ENTRIES	0
736 #define	AVAIL_256M_ENTRIES	0
737 #define	AVAIL_DTLB_ENTRIES	(TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
738 static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = {
739 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
740 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
741 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES};
742 
743 /*
744  * The function returns the mmu-specific values for the
745  * hat's disable_large_pages and disable_ism_large_pages variables.
746  */
747 int
748 mmu_large_pages_disabled(uint_t flag)
749 {
750 	int pages_disable = 0;
751 
752 	if (flag == HAT_LOAD) {
753 		pages_disable =  mmu_disable_large_pages;
754 	} else if (flag == HAT_LOAD_SHARE) {
755 		pages_disable = mmu_disable_ism_large_pages;
756 	}
757 	return (pages_disable);
758 }
759 
760 /*
761  * mmu_init_large_pages is called with the desired ism_pagesize parameter.
762  * It may be called from set_platform_defaults, if some value other than 32M
763  * is desired.  mmu_ism_pagesize is the tunable.  If it has a bad value,
764  * then only warn, since it would be bad form to panic due to a user typo.
765  *
766  * The function re-initializes the mmu_disable_ism_large_pages variable.
767  */
768 void
769 mmu_init_large_pages(size_t ism_pagesize)
770 {
771 	switch (ism_pagesize) {
772 	case MMU_PAGESIZE4M:
773 		mmu_disable_ism_large_pages = ((1 << TTE64K) |
774 		    (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
775 		break;
776 	case MMU_PAGESIZE32M:
777 		mmu_disable_ism_large_pages = ((1 << TTE64K) |
778 		    (1 << TTE512K) | (1 << TTE256M));
779 		break;
780 	case MMU_PAGESIZE256M:
781 		mmu_disable_ism_large_pages = ((1 << TTE64K) |
782 		    (1 << TTE512K) | (1 << TTE32M));
783 		break;
784 	default:
785 		cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx",
786 		    ism_pagesize);
787 		break;
788 	}
789 }
790 
791 /*ARGSUSED*/
792 uint_t
793 mmu_preferred_pgsz(struct hat *hat, caddr_t addr, size_t len)
794 {
795 	sfmmu_t *sfmmup = (sfmmu_t *)hat;
796 	uint_t pgsz0, pgsz1;
797 	uint_t szc, maxszc = mmu_page_sizes - 1;
798 	size_t pgsz;
799 	extern int disable_large_pages;
800 
801 	pgsz0 = (uint_t)sfmmup->sfmmu_pgsz[0];
802 	pgsz1 = (uint_t)sfmmup->sfmmu_pgsz[1];
803 
804 	/*
805 	 * If either of the TLBs are reprogrammed, choose
806 	 * the largest mapping size as the preferred size,
807 	 * if it fits the size and alignment constraints.
808 	 * Else return the largest mapping size that fits,
809 	 * if neither TLB is reprogrammed.
810 	 */
811 	if (pgsz0 > TTE8K || pgsz1 > TTE8K) {
812 		if (pgsz1 > pgsz0) {	/* First try pgsz1 */
813 			pgsz = hw_page_array[pgsz1].hp_size;
814 			if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
815 				return (pgsz1);
816 		}
817 		if (pgsz0 > TTE8K) {	/* Then try pgsz0, if !TTE8K */
818 			pgsz = hw_page_array[pgsz0].hp_size;
819 			if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
820 				return (pgsz0);
821 		}
822 	} else { /* Otherwise pick best fit if neither TLB is reprogrammed. */
823 		for (szc = maxszc; szc > TTE8K; szc--) {
824 			if (disable_large_pages & (1 << szc))
825 				continue;
826 
827 			pgsz = hw_page_array[szc].hp_size;
828 			if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
829 				return (szc);
830 		}
831 	}
832 	return (TTE8K);
833 }
834 
835 /*
836  * Function to reprogram the TLBs when page sizes used
837  * by a process change significantly.
838  */
839 void
840 mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
841 {
842 	uint8_t pgsz0, pgsz1;
843 
844 	/*
845 	 * Don't program 2nd dtlb for kernel and ism hat
846 	 */
847 	ASSERT(hat->sfmmu_ismhat == NULL);
848 	ASSERT(hat != ksfmmup);
849 
850 	/*
851 	 * hat->sfmmu_pgsz[] is an array whose elements
852 	 * contain a sorted order of page sizes.  Element
853 	 * 0 is the most commonly used page size, followed
854 	 * by element 1, and so on.
855 	 *
856 	 * ttecnt[] is an array of per-page-size page counts
857 	 * mapped into the process.
858 	 *
859 	 * If the HAT's choice for page sizes is unsuitable,
860 	 * we can override it here.  The new values written
861 	 * to the array will be handed back to us later to
862 	 * do the actual programming of the TLB hardware.
863 	 *
864 	 */
865 	pgsz0 = (uint8_t)MIN(tmp_pgsz[0], tmp_pgsz[1]);
866 	pgsz1 = (uint8_t)MAX(tmp_pgsz[0], tmp_pgsz[1]);
867 
868 	/*
869 	 * This implements PAGESIZE programming of the sTLB
870 	 * if large TTE counts don't exceed the thresholds.
871 	 */
872 	if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
873 		pgsz0 = page_szc(MMU_PAGESIZE);
874 	if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
875 		pgsz1 = page_szc(MMU_PAGESIZE);
876 	tmp_pgsz[0] = pgsz0;
877 	tmp_pgsz[1] = pgsz1;
878 	/* otherwise, accept what the HAT chose for us */
879 }
880 
881 /*
882  * The HAT calls this function when an MMU context is allocated so that we
883  * can reprogram the large TLBs appropriately for the new process using
884  * the context.
885  *
886  * The caller must hold the HAT lock.
887  */
888 void
889 mmu_set_ctx_page_sizes(struct hat *hat)
890 {
891 	uint8_t pgsz0, pgsz1;
892 	uint8_t new_cext;
893 
894 	ASSERT(sfmmu_hat_lock_held(hat));
895 	/*
896 	 * Don't program 2nd dtlb for kernel and ism hat
897 	 */
898 	if (hat->sfmmu_ismhat || hat == ksfmmup)
899 		return;
900 
901 	/*
902 	 * If supported, reprogram the TLBs to a larger pagesize.
903 	 */
904 	pgsz0 = hat->sfmmu_pgsz[0];
905 	pgsz1 = hat->sfmmu_pgsz[1];
906 	ASSERT(pgsz0 < mmu_page_sizes);
907 	ASSERT(pgsz1 < mmu_page_sizes);
908 	new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
909 	if (hat->sfmmu_cext != new_cext) {
910 #ifdef DEBUG
911 		int i;
912 		/*
913 		 * assert cnum should be invalid, this is because pagesize
914 		 * can only be changed after a proc's ctxs are invalidated.
915 		 */
916 		for (i = 0; i < max_mmu_ctxdoms; i++) {
917 			ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
918 		}
919 #endif /* DEBUG */
920 		hat->sfmmu_cext = new_cext;
921 	}
922 	/*
923 	 * sfmmu_setctx_sec() will take care of the
924 	 * rest of the dirty work for us.
925 	 */
926 }
927 
928 /*
929  * This function assumes that there are either four or six supported page
930  * sizes and at most two programmable TLBs, so we need to decide which
931  * page sizes are most important and then adjust the TLB page sizes
932  * accordingly (if supported).
933  *
934  * If these assumptions change, this function will need to be
935  * updated to support whatever the new limits are.
936  */
937 void
938 mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt)
939 {
940 	uint64_t sortcnt[MMU_PAGE_SIZES];
941 	uint8_t tmp_pgsz[MMU_PAGE_SIZES];
942 	uint8_t i, j, max;
943 	uint16_t oldval, newval;
944 
945 	/*
946 	 * We only consider reprogramming the TLBs if one or more of
947 	 * the two most used page sizes changes and we're using
948 	 * large pages in this process.
949 	 */
950 	if (sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) {
951 		/* Sort page sizes. */
952 		for (i = 0; i < mmu_page_sizes; i++) {
953 			sortcnt[i] = ttecnt[i];
954 		}
955 		for (j = 0; j < mmu_page_sizes; j++) {
956 			for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) {
957 				if (sortcnt[i] > sortcnt[max])
958 					max = i;
959 			}
960 			tmp_pgsz[j] = max;
961 			sortcnt[max] = 0;
962 		}
963 
964 		oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1];
965 
966 		mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz);
967 
968 		/* Check 2 largest values after the sort. */
969 		newval = tmp_pgsz[0] << 8 | tmp_pgsz[1];
970 		if (newval != oldval) {
971 			sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz);
972 		}
973 	}
974 }
975 
976 /*
977  * Return processor specific async error structure
978  * size used.
979  */
980 int
981 cpu_aflt_size(void)
982 {
983 	return (sizeof (opl_async_flt_t));
984 }
985 
986 /*
987  * The cpu_sync_log_err() function is called via the [uc]e_drain() function to
988  * post-process CPU events that are dequeued.  As such, it can be invoked
989  * from softint context, from AST processing in the trap() flow, or from the
990  * panic flow.  We decode the CPU-specific data, and take appropriate actions.
991  * Historically this entry point was used to log the actual cmn_err(9F) text;
992  * now with FMA it is used to prepare 'flt' to be converted into an ereport.
993  * With FMA this function now also returns a flag which indicates to the
994  * caller whether the ereport should be posted (1) or suppressed (0).
995  */
996 /*ARGSUSED*/
997 static int
998 cpu_sync_log_err(void *flt)
999 {
1000 	opl_async_flt_t *opl_flt = (opl_async_flt_t *)flt;
1001 	struct async_flt *aflt = (struct async_flt *)flt;
1002 
1003 	/*
1004 	 * No extra processing of urgent error events.
1005 	 * Always generate ereports for these events.
1006 	 */
1007 	if (aflt->flt_status == OPL_ECC_URGENT_TRAP)
1008 		return (1);
1009 
1010 	/*
1011 	 * Additional processing for synchronous errors.
1012 	 */
1013 	switch (opl_flt->flt_type) {
1014 	case OPL_CPU_INV_SFSR:
1015 		return (1);
1016 
1017 	case OPL_CPU_SYNC_UE:
1018 		/*
1019 		 * The validity: SFSR_MK_UE bit has been checked
1020 		 * in opl_cpu_sync_error()
1021 		 * No more check is required.
1022 		 *
1023 		 * opl_flt->flt_eid_mod and flt_eid_sid have been set by H/W,
1024 		 * and they have been retrieved in cpu_queue_events()
1025 		 */
1026 
1027 		if (opl_flt->flt_eid_mod == OPL_ERRID_MEM) {
1028 			ASSERT(aflt->flt_in_memory);
1029 			/*
1030 			 * We want to skip logging only if ALL the following
1031 			 * conditions are true:
1032 			 *
1033 			 *	1. We are not panicing already.
1034 			 *	2. The error is a memory error.
1035 			 *	3. There is only one error.
1036 			 *	4. The error is on a retired page.
1037 			 *	5. The error occurred under on_trap
1038 			 *	protection AFLT_PROT_EC
1039 			 */
1040 			if (!panicstr && aflt->flt_prot == AFLT_PROT_EC &&
1041 			    page_retire_check(aflt->flt_addr, NULL) == 0) {
1042 				/*
1043 				 * Do not log an error from
1044 				 * the retired page
1045 				 */
1046 				softcall(ecc_page_zero, (void *)aflt->flt_addr);
1047 				return (0);
1048 			}
1049 			if (!panicstr)
1050 				cpu_page_retire(opl_flt);
1051 		}
1052 		return (1);
1053 
1054 	case OPL_CPU_SYNC_OTHERS:
1055 		/*
1056 		 * For the following error cases, the processor HW does
1057 		 * not set the flt_eid_mod/flt_eid_sid. Instead, SW will attempt
1058 		 * to assign appropriate values here to reflect what we
1059 		 * think is the most likely cause of the problem w.r.t to
1060 		 * the particular error event.  For Buserr and timeout
1061 		 * error event, we will assign OPL_ERRID_CHANNEL as the
1062 		 * most likely reason.  For TLB parity or multiple hit
1063 		 * error events, we will assign the reason as
1064 		 * OPL_ERRID_CPU (cpu related problem) and set the
1065 		 * flt_eid_sid to point to the cpuid.
1066 		 */
1067 
1068 		if (opl_flt->flt_bit & (SFSR_BERR|SFSR_TO)) {
1069 			/*
1070 			 * flt_eid_sid will not be used for this case.
1071 			 */
1072 			opl_flt->flt_eid_mod = OPL_ERRID_CHANNEL;
1073 		}
1074 		if (opl_flt->flt_bit & (SFSR_TLB_MUL|SFSR_TLB_PRT)) {
1075 			    opl_flt->flt_eid_mod = OPL_ERRID_CPU;
1076 			    opl_flt->flt_eid_sid = aflt->flt_inst;
1077 		}
1078 
1079 		/*
1080 		 * In case of no effective error bit
1081 		 */
1082 		if ((opl_flt->flt_bit & SFSR_ERRS) == 0) {
1083 			    opl_flt->flt_eid_mod = OPL_ERRID_CPU;
1084 			    opl_flt->flt_eid_sid = aflt->flt_inst;
1085 		}
1086 		break;
1087 
1088 		default:
1089 			return (1);
1090 	}
1091 	return (1);
1092 }
1093 
1094 /*
1095  * Retire the bad page that may contain the flushed error.
1096  */
1097 void
1098 cpu_page_retire(opl_async_flt_t *opl_flt)
1099 {
1100 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1101 	(void) page_retire(aflt->flt_addr, PR_UE);
1102 }
1103 
1104 /*
1105  * Invoked by error_init() early in startup and therefore before
1106  * startup_errorq() is called to drain any error Q -
1107  *
1108  * startup()
1109  *   startup_end()
1110  *     error_init()
1111  *       cpu_error_init()
1112  * errorq_init()
1113  *   errorq_drain()
1114  * start_other_cpus()
1115  *
1116  * The purpose of this routine is to create error-related taskqs.  Taskqs
1117  * are used for this purpose because cpu_lock can't be grabbed from interrupt
1118  * context.
1119  *
1120  */
1121 /*ARGSUSED*/
1122 void
1123 cpu_error_init(int items)
1124 {
1125 	opl_err_log = (opl_errlog_t *)
1126 	    kmem_alloc(ERRLOG_ALLOC_SZ, KM_SLEEP);
1127 	if ((uint64_t)opl_err_log & MMU_PAGEOFFSET)
1128 		cmn_err(CE_PANIC, "The base address of the error log "
1129 		    "is not page aligned");
1130 }
1131 
1132 /*
1133  * We route all errors through a single switch statement.
1134  */
1135 void
1136 cpu_ue_log_err(struct async_flt *aflt)
1137 {
1138 	switch (aflt->flt_class) {
1139 	case CPU_FAULT:
1140 		if (cpu_sync_log_err(aflt))
1141 			cpu_ereport_post(aflt);
1142 		break;
1143 
1144 	case BUS_FAULT:
1145 		bus_async_log_err(aflt);
1146 		break;
1147 
1148 	default:
1149 		cmn_err(CE_WARN, "discarding async error %p with invalid "
1150 		    "fault class (0x%x)", (void *)aflt, aflt->flt_class);
1151 		return;
1152 	}
1153 }
1154 
1155 /*
1156  * Routine for panic hook callback from panic_idle().
1157  *
1158  * Nothing to do here.
1159  */
1160 void
1161 cpu_async_panic_callb(void)
1162 {
1163 }
1164 
1165 /*
1166  * Routine to return a string identifying the physical name
1167  * associated with a memory/cache error.
1168  */
1169 /*ARGSUSED*/
1170 int
1171 cpu_get_mem_unum(int synd_status, ushort_t flt_synd, uint64_t flt_stat,
1172     uint64_t flt_addr, int flt_bus_id, int flt_in_memory,
1173     ushort_t flt_status, char *buf, int buflen, int *lenp)
1174 {
1175 	int synd_code;
1176 	int ret;
1177 
1178 	/*
1179 	 * An AFSR of -1 defaults to a memory syndrome.
1180 	 */
1181 	synd_code = (int)flt_synd;
1182 
1183 	if (&plat_get_mem_unum) {
1184 		if ((ret = plat_get_mem_unum(synd_code, flt_addr, flt_bus_id,
1185 			flt_in_memory, flt_status, buf, buflen, lenp)) != 0) {
1186 			buf[0] = '\0';
1187 			*lenp = 0;
1188 		}
1189 		return (ret);
1190 	}
1191 	buf[0] = '\0';
1192 	*lenp = 0;
1193 	return (ENOTSUP);
1194 }
1195 
1196 /*
1197  * Wrapper for cpu_get_mem_unum() routine that takes an
1198  * async_flt struct rather than explicit arguments.
1199  */
1200 int
1201 cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt,
1202     char *buf, int buflen, int *lenp)
1203 {
1204 	/*
1205 	 * We always pass -1 so that cpu_get_mem_unum will interpret this as a
1206 	 * memory error.
1207 	 */
1208 	return (cpu_get_mem_unum(synd_status, aflt->flt_synd,
1209 	    (uint64_t)-1,
1210 	    aflt->flt_addr, aflt->flt_bus_id, aflt->flt_in_memory,
1211 	    aflt->flt_status, buf, buflen, lenp));
1212 }
1213 
1214 /*
1215  * This routine is a more generic interface to cpu_get_mem_unum()
1216  * that may be used by other modules (e.g. mm).
1217  */
1218 /*ARGSUSED*/
1219 int
1220 cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar,
1221     char *buf, int buflen, int *lenp)
1222 {
1223 	int synd_status, flt_in_memory, ret;
1224 	ushort_t flt_status = 0;
1225 	char unum[UNUM_NAMLEN];
1226 
1227 	/*
1228 	 * Check for an invalid address.
1229 	 */
1230 	if (afar == (uint64_t)-1)
1231 		return (ENXIO);
1232 
1233 	if (synd == (uint64_t)-1)
1234 		synd_status = AFLT_STAT_INVALID;
1235 	else
1236 		synd_status = AFLT_STAT_VALID;
1237 
1238 	flt_in_memory = (*afsr & SFSR_MEMORY) &&
1239 		pf_is_memory(afar >> MMU_PAGESHIFT);
1240 
1241 	ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, *afsr, afar,
1242 		CPU->cpu_id, flt_in_memory, flt_status, unum,
1243 		UNUM_NAMLEN, lenp);
1244 	if (ret != 0)
1245 		return (ret);
1246 
1247 	if (*lenp >= buflen)
1248 		return (ENAMETOOLONG);
1249 
1250 	(void) strncpy(buf, unum, buflen);
1251 
1252 	return (0);
1253 }
1254 
1255 /*
1256  * Routine to return memory information associated
1257  * with a physical address and syndrome.
1258  */
1259 /*ARGSUSED*/
1260 int
1261 cpu_get_mem_info(uint64_t synd, uint64_t afar,
1262     uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
1263     int *segsp, int *banksp, int *mcidp)
1264 {
1265 	int synd_code = (int)synd;
1266 
1267 	if (afar == (uint64_t)-1)
1268 		return (ENXIO);
1269 
1270 	if (p2get_mem_info != NULL)
1271 		return ((p2get_mem_info)(synd_code, afar,
1272 			mem_sizep, seg_sizep, bank_sizep,
1273 			segsp, banksp, mcidp));
1274 	else
1275 		return (ENOTSUP);
1276 }
1277 
1278 /*
1279  * Routine to return a string identifying the physical
1280  * name associated with a cpuid.
1281  */
1282 int
1283 cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1284 {
1285 	int ret;
1286 	char unum[UNUM_NAMLEN];
1287 
1288 	if (&plat_get_cpu_unum) {
1289 		if ((ret = plat_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, lenp))
1290 			!= 0)
1291 			return (ret);
1292 	} else {
1293 		return (ENOTSUP);
1294 	}
1295 
1296 	if (*lenp >= buflen)
1297 		return (ENAMETOOLONG);
1298 
1299 	(void) strncpy(buf, unum, *lenp);
1300 
1301 	return (0);
1302 }
1303 
1304 /*
1305  * This routine exports the name buffer size.
1306  */
1307 size_t
1308 cpu_get_name_bufsize()
1309 {
1310 	return (UNUM_NAMLEN);
1311 }
1312 
1313 /*
1314  * Flush the entire ecache by ASI_L2_CNTL.U2_FLUSH
1315  */
1316 void
1317 cpu_flush_ecache(void)
1318 {
1319 	flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size,
1320 	    cpunodes[CPU->cpu_id].ecache_linesize);
1321 }
1322 
1323 static uint8_t
1324 flt_to_trap_type(struct async_flt *aflt)
1325 {
1326 	if (aflt->flt_status & OPL_ECC_ISYNC_TRAP)
1327 		return (TRAP_TYPE_ECC_I);
1328 	if (aflt->flt_status & OPL_ECC_DSYNC_TRAP)
1329 		return (TRAP_TYPE_ECC_D);
1330 	if (aflt->flt_status & OPL_ECC_URGENT_TRAP)
1331 		return (TRAP_TYPE_URGENT);
1332 	return (-1);
1333 }
1334 
1335 /*
1336  * Encode the data saved in the opl_async_flt_t struct into
1337  * the FM ereport payload.
1338  */
1339 /* ARGSUSED */
1340 static void
1341 cpu_payload_add_aflt(struct async_flt *aflt, nvlist_t *payload,
1342 		nvlist_t *resource)
1343 {
1344 	opl_async_flt_t *opl_flt = (opl_async_flt_t *)aflt;
1345 	char unum[UNUM_NAMLEN];
1346 	char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
1347 	int len;
1348 
1349 
1350 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFSR) {
1351 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFSR,
1352 			DATA_TYPE_UINT64, aflt->flt_stat, NULL);
1353 	}
1354 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFAR) {
1355 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFAR,
1356 			DATA_TYPE_UINT64, aflt->flt_addr, NULL);
1357 	}
1358 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_UGESR) {
1359 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UGESR,
1360 			DATA_TYPE_UINT64, aflt->flt_stat, NULL);
1361 	}
1362 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PC) {
1363 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PC,
1364 		    DATA_TYPE_UINT64, (uint64_t)aflt->flt_pc, NULL);
1365 	}
1366 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TL) {
1367 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TL,
1368 		    DATA_TYPE_UINT8, (uint8_t)aflt->flt_tl, NULL);
1369 	}
1370 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TT) {
1371 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TT,
1372 		    DATA_TYPE_UINT8, flt_to_trap_type(aflt), NULL);
1373 	}
1374 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PRIV) {
1375 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PRIV,
1376 		    DATA_TYPE_BOOLEAN_VALUE,
1377 		    (aflt->flt_priv ? B_TRUE : B_FALSE), NULL);
1378 	}
1379 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_FLT_STATUS) {
1380 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FLT_STATUS,
1381 			DATA_TYPE_UINT64, (uint64_t)aflt->flt_status, NULL);
1382 	}
1383 
1384 	switch (opl_flt->flt_eid_mod) {
1385 	case OPL_ERRID_CPU:
1386 		(void) snprintf(sbuf, sizeof (sbuf), "%llX",
1387 			(u_longlong_t)cpunodes[opl_flt->flt_eid_sid].device_id);
1388 		(void) fm_fmri_cpu_set(resource, FM_CPU_SCHEME_VERSION,
1389 			NULL, opl_flt->flt_eid_sid,
1390 			(uint8_t *)&cpunodes[opl_flt->flt_eid_sid].version,
1391 			sbuf);
1392 		fm_payload_set(payload,
1393 			FM_EREPORT_PAYLOAD_NAME_RESOURCE,
1394 			DATA_TYPE_NVLIST, resource, NULL);
1395 		break;
1396 
1397 	case OPL_ERRID_CHANNEL:
1398 		/*
1399 		 * No resource is created but the cpumem DE will find
1400 		 * the defective path by retreiving EID from SFSR which is
1401 		 * included in the payload.
1402 		 */
1403 		break;
1404 
1405 	case OPL_ERRID_MEM:
1406 		(void) cpu_get_mem_unum_aflt(0, aflt, unum, UNUM_NAMLEN, &len);
1407 		(void) fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION,
1408 			NULL, unum, NULL, (uint64_t)-1);
1409 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE,
1410 			DATA_TYPE_NVLIST, resource, NULL);
1411 		break;
1412 
1413 	case OPL_ERRID_PATH:
1414 		/*
1415 		 * No resource is created but the cpumem DE will find
1416 		 * the defective path by retreiving EID from SFSR which is
1417 		 * included in the payload.
1418 		 */
1419 		break;
1420 	}
1421 }
1422 
1423 /*
1424  * Returns whether fault address is valid for this error bit and
1425  * whether the address is "in memory" (i.e. pf_is_memory returns 1).
1426  */
1427 /*ARGSUSED*/
1428 static int
1429 cpu_flt_in_memory(opl_async_flt_t *opl_flt, uint64_t t_afsr_bit)
1430 {
1431 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1432 
1433 	if (aflt->flt_status & (OPL_ECC_SYNC_TRAP)) {
1434 		return ((t_afsr_bit & SFSR_MEMORY) &&
1435 		    pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT));
1436 	}
1437 	return (0);
1438 }
1439 
1440 /*
1441  * In OPL SCF does the stick synchronization.
1442  */
1443 void
1444 sticksync_slave(void)
1445 {
1446 }
1447 
1448 /*
1449  * In OPL SCF does the stick synchronization.
1450  */
1451 void
1452 sticksync_master(void)
1453 {
1454 }
1455 
1456 /*
1457  * Cpu private unitialization.  OPL cpus do not use the private area.
1458  */
1459 void
1460 cpu_uninit_private(struct cpu *cp)
1461 {
1462 	cmp_delete_cpu(cp->cpu_id);
1463 }
1464 
1465 /*
1466  * Always flush an entire cache.
1467  */
1468 void
1469 cpu_error_ecache_flush(void)
1470 {
1471 	cpu_flush_ecache();
1472 }
1473 
1474 void
1475 cpu_ereport_post(struct async_flt *aflt)
1476 {
1477 	char *cpu_type, buf[FM_MAX_CLASS];
1478 	nv_alloc_t *nva = NULL;
1479 	nvlist_t *ereport, *detector, *resource;
1480 	errorq_elem_t *eqep;
1481 	char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
1482 
1483 	if (aflt->flt_panic || panicstr) {
1484 		eqep = errorq_reserve(ereport_errorq);
1485 		if (eqep == NULL)
1486 			return;
1487 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
1488 		nva = errorq_elem_nva(ereport_errorq, eqep);
1489 	} else {
1490 		ereport = fm_nvlist_create(nva);
1491 	}
1492 
1493 	/*
1494 	 * Create the scheme "cpu" FMRI.
1495 	 */
1496 	detector = fm_nvlist_create(nva);
1497 	resource = fm_nvlist_create(nva);
1498 	switch (cpunodes[aflt->flt_inst].implementation) {
1499 	case OLYMPUS_C_IMPL:
1500 		cpu_type = FM_EREPORT_CPU_SPARC64_VI;
1501 		break;
1502 	default:
1503 		cpu_type = FM_EREPORT_CPU_UNSUPPORTED;
1504 		break;
1505 	}
1506 	(void) snprintf(sbuf, sizeof (sbuf), "%llX",
1507 	    (u_longlong_t)cpunodes[aflt->flt_inst].device_id);
1508 	(void) fm_fmri_cpu_set(detector, FM_CPU_SCHEME_VERSION, NULL,
1509 	    aflt->flt_inst, (uint8_t *)&cpunodes[aflt->flt_inst].version,
1510 	    sbuf);
1511 
1512 	/*
1513 	 * Encode all the common data into the ereport.
1514 	 */
1515 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s",
1516 	    FM_ERROR_CPU, cpu_type, aflt->flt_erpt_class);
1517 
1518 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
1519 	    fm_ena_generate(aflt->flt_id, FM_ENA_FMT1), detector, NULL);
1520 
1521 	/*
1522 	 * Encode the error specific data that was saved in
1523 	 * the async_flt structure into the ereport.
1524 	 */
1525 	cpu_payload_add_aflt(aflt, ereport, resource);
1526 
1527 	if (aflt->flt_panic || panicstr) {
1528 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1529 	} else {
1530 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1531 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1532 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1533 		fm_nvlist_destroy(resource, FM_NVA_FREE);
1534 	}
1535 }
1536 
1537 void
1538 cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
1539 {
1540 	int status;
1541 	ddi_fm_error_t de;
1542 
1543 	bzero(&de, sizeof (ddi_fm_error_t));
1544 
1545 	de.fme_version = DDI_FME_VERSION;
1546 	de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
1547 	de.fme_flag = expected;
1548 	de.fme_bus_specific = (void *)aflt->flt_addr;
1549 	status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
1550 	if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL))
1551 		aflt->flt_panic = 1;
1552 }
1553 
1554 void
1555 cpu_errorq_dispatch(char *error_class, void *payload, size_t payload_sz,
1556     errorq_t *eqp, uint_t flag)
1557 {
1558 	struct async_flt *aflt = (struct async_flt *)payload;
1559 
1560 	aflt->flt_erpt_class = error_class;
1561 	errorq_dispatch(eqp, payload, payload_sz, flag);
1562 }
1563 
1564 void
1565 adjust_hw_copy_limits(int ecache_size)
1566 {
1567 	/*
1568 	 * Set hw copy limits.
1569 	 *
1570 	 * /etc/system will be parsed later and can override one or more
1571 	 * of these settings.
1572 	 *
1573 	 * At this time, ecache size seems only mildly relevant.
1574 	 * We seem to run into issues with the d-cache and stalls
1575 	 * we see on misses.
1576 	 *
1577 	 * Cycle measurement indicates that 2 byte aligned copies fare
1578 	 * little better than doing things with VIS at around 512 bytes.
1579 	 * 4 byte aligned shows promise until around 1024 bytes. 8 Byte
1580 	 * aligned is faster whenever the source and destination data
1581 	 * in cache and the total size is less than 2 Kbytes.  The 2K
1582 	 * limit seems to be driven by the 2K write cache.
1583 	 * When more than 2K of copies are done in non-VIS mode, stores
1584 	 * backup in the write cache.  In VIS mode, the write cache is
1585 	 * bypassed, allowing faster cache-line writes aligned on cache
1586 	 * boundaries.
1587 	 *
1588 	 * In addition, in non-VIS mode, there is no prefetching, so
1589 	 * for larger copies, the advantage of prefetching to avoid even
1590 	 * occasional cache misses is enough to justify using the VIS code.
1591 	 *
1592 	 * During testing, it was discovered that netbench ran 3% slower
1593 	 * when hw_copy_limit_8 was 2K or larger.  Apparently for server
1594 	 * applications, data is only used once (copied to the output
1595 	 * buffer, then copied by the network device off the system).  Using
1596 	 * the VIS copy saves more L2 cache state.  Network copies are
1597 	 * around 1.3K to 1.5K in size for historical reasons.
1598 	 *
1599 	 * Therefore, a limit of 1K bytes will be used for the 8 byte
1600 	 * aligned copy even for large caches and 8 MB ecache.  The
1601 	 * infrastructure to allow different limits for different sized
1602 	 * caches is kept to allow further tuning in later releases.
1603 	 */
1604 
1605 	if (min_ecache_size == 0 && use_hw_bcopy) {
1606 		/*
1607 		 * First time through - should be before /etc/system
1608 		 * is read.
1609 		 * Could skip the checks for zero but this lets us
1610 		 * preserve any debugger rewrites.
1611 		 */
1612 		if (hw_copy_limit_1 == 0) {
1613 			hw_copy_limit_1 = VIS_COPY_THRESHOLD;
1614 			priv_hcl_1 = hw_copy_limit_1;
1615 		}
1616 		if (hw_copy_limit_2 == 0) {
1617 			hw_copy_limit_2 = 2 * VIS_COPY_THRESHOLD;
1618 			priv_hcl_2 = hw_copy_limit_2;
1619 		}
1620 		if (hw_copy_limit_4 == 0) {
1621 			hw_copy_limit_4 = 4 * VIS_COPY_THRESHOLD;
1622 			priv_hcl_4 = hw_copy_limit_4;
1623 		}
1624 		if (hw_copy_limit_8 == 0) {
1625 			hw_copy_limit_8 = 4 * VIS_COPY_THRESHOLD;
1626 			priv_hcl_8 = hw_copy_limit_8;
1627 		}
1628 		min_ecache_size = ecache_size;
1629 	} else {
1630 		/*
1631 		 * MP initialization. Called *after* /etc/system has
1632 		 * been parsed. One CPU has already been initialized.
1633 		 * Need to cater for /etc/system having scragged one
1634 		 * of our values.
1635 		 */
1636 		if (ecache_size == min_ecache_size) {
1637 			/*
1638 			 * Same size ecache. We do nothing unless we
1639 			 * have a pessimistic ecache setting. In that
1640 			 * case we become more optimistic (if the cache is
1641 			 * large enough).
1642 			 */
1643 			if (hw_copy_limit_8 == 4 * VIS_COPY_THRESHOLD) {
1644 				/*
1645 				 * Need to adjust hw_copy_limit* from our
1646 				 * pessimistic uniprocessor value to a more
1647 				 * optimistic UP value *iff* it hasn't been
1648 				 * reset.
1649 				 */
1650 				if ((ecache_size > 1048576) &&
1651 				    (priv_hcl_8 == hw_copy_limit_8)) {
1652 					if (ecache_size <= 2097152)
1653 						hw_copy_limit_8 = 4 *
1654 						    VIS_COPY_THRESHOLD;
1655 					else if (ecache_size <= 4194304)
1656 						hw_copy_limit_8 = 4 *
1657 						    VIS_COPY_THRESHOLD;
1658 					else
1659 						hw_copy_limit_8 = 4 *
1660 						    VIS_COPY_THRESHOLD;
1661 					priv_hcl_8 = hw_copy_limit_8;
1662 				}
1663 			}
1664 		} else if (ecache_size < min_ecache_size) {
1665 			/*
1666 			 * A different ecache size. Can this even happen?
1667 			 */
1668 			if (priv_hcl_8 == hw_copy_limit_8) {
1669 				/*
1670 				 * The previous value that we set
1671 				 * is unchanged (i.e., it hasn't been
1672 				 * scragged by /etc/system). Rewrite it.
1673 				 */
1674 				if (ecache_size <= 1048576)
1675 					hw_copy_limit_8 = 8 *
1676 					    VIS_COPY_THRESHOLD;
1677 				else if (ecache_size <= 2097152)
1678 					hw_copy_limit_8 = 8 *
1679 					    VIS_COPY_THRESHOLD;
1680 				else if (ecache_size <= 4194304)
1681 					hw_copy_limit_8 = 8 *
1682 					    VIS_COPY_THRESHOLD;
1683 				else
1684 					hw_copy_limit_8 = 10 *
1685 					    VIS_COPY_THRESHOLD;
1686 				priv_hcl_8 = hw_copy_limit_8;
1687 				min_ecache_size = ecache_size;
1688 			}
1689 		}
1690 	}
1691 }
1692 
1693 #define	VIS_BLOCKSIZE		64
1694 
1695 int
1696 dtrace_blksuword32_err(uintptr_t addr, uint32_t *data)
1697 {
1698 	int ret, watched;
1699 
1700 	watched = watch_disable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
1701 	ret = dtrace_blksuword32(addr, data, 0);
1702 	if (watched)
1703 		watch_enable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
1704 
1705 	return (ret);
1706 }
1707 
1708 void
1709 opl_cpu_reg_init()
1710 {
1711 	uint64_t	this_cpu_log;
1712 
1713 	/*
1714 	 * We do not need to re-initialize cpu0 registers.
1715 	 */
1716 	if (cpu[getprocessorid()] == &cpu0)
1717 		return;
1718 
1719 	/*
1720 	 * Initialize Error log Scratch register for error handling.
1721 	 */
1722 
1723 	this_cpu_log = va_to_pa((void*)(((uint64_t)opl_err_log) +
1724 		ERRLOG_BUFSZ * (getprocessorid())));
1725 	opl_error_setup(this_cpu_log);
1726 
1727 	/*
1728 	 * Enable MMU translating multiple page sizes for
1729 	 * sITLB and sDTLB.
1730 	 */
1731 	opl_mpg_enable();
1732 }
1733 
1734 /*
1735  * Queue one event in ue_queue based on ecc_type_to_info entry.
1736  */
1737 static void
1738 cpu_queue_one_event(opl_async_flt_t *opl_flt, char *reason,
1739     ecc_type_to_info_t *eccp)
1740 {
1741 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1742 
1743 	if (reason &&
1744 	    strlen(reason) + strlen(eccp->ec_reason) < MAX_REASON_STRING) {
1745 		(void) strcat(reason, eccp->ec_reason);
1746 	}
1747 
1748 	opl_flt->flt_bit = eccp->ec_afsr_bit;
1749 	opl_flt->flt_type = eccp->ec_flt_type;
1750 	aflt->flt_in_memory = cpu_flt_in_memory(opl_flt, opl_flt->flt_bit);
1751 	aflt->flt_payload = eccp->ec_err_payload;
1752 
1753 	ASSERT(aflt->flt_status & (OPL_ECC_SYNC_TRAP|OPL_ECC_URGENT_TRAP));
1754 	cpu_errorq_dispatch(eccp->ec_err_class,
1755 		(void *)opl_flt, sizeof (opl_async_flt_t),
1756 		ue_queue,
1757 		aflt->flt_panic);
1758 }
1759 
1760 /*
1761  * Queue events on async event queue one event per error bit.
1762  * Return number of events queued.
1763  */
1764 int
1765 cpu_queue_events(opl_async_flt_t *opl_flt, char *reason, uint64_t t_afsr_errs)
1766 {
1767 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1768 	ecc_type_to_info_t *eccp;
1769 	int nevents = 0;
1770 
1771 	/*
1772 	 * Queue expected errors, error bit and fault type must must match
1773 	 * in the ecc_type_to_info table.
1774 	 */
1775 	for (eccp = ecc_type_to_info; t_afsr_errs != 0 && eccp->ec_desc != NULL;
1776 		eccp++) {
1777 		if ((eccp->ec_afsr_bit & t_afsr_errs) != 0 &&
1778 		    (eccp->ec_flags & aflt->flt_status) != 0) {
1779 			/*
1780 			 * UE error event can be further
1781 			 * classified/breakdown into finer granularity
1782 			 * based on the flt_eid_mod value set by HW.  We do
1783 			 * special handling here so that we can report UE
1784 			 * error in finer granularity as ue_mem,
1785 			 * ue_channel, ue_cpu or ue_path.
1786 			 */
1787 			if (eccp->ec_flt_type == OPL_CPU_SYNC_UE) {
1788 				opl_flt->flt_eid_mod =
1789 					(aflt->flt_stat & SFSR_EID_MOD)
1790 					>> SFSR_EID_MOD_SHIFT;
1791 				opl_flt->flt_eid_sid =
1792 					(aflt->flt_stat & SFSR_EID_SID)
1793 					>> SFSR_EID_SID_SHIFT;
1794 				/*
1795 				 * Need to advance eccp pointer by flt_eid_mod
1796 				 * so that we get an appropriate ecc pointer
1797 				 *
1798 				 * EID			# of advances
1799 				 * ----------------------------------
1800 				 * OPL_ERRID_MEM	0
1801 				 * OPL_ERRID_CHANNEL	1
1802 				 * OPL_ERRID_CPU	2
1803 				 * OPL_ERRID_PATH	3
1804 				 */
1805 				eccp += opl_flt->flt_eid_mod;
1806 			}
1807 			cpu_queue_one_event(opl_flt, reason, eccp);
1808 			t_afsr_errs &= ~eccp->ec_afsr_bit;
1809 			nevents++;
1810 		}
1811 	}
1812 
1813 	return (nevents);
1814 }
1815 
1816 /*
1817  * Sync. error wrapper functions.
1818  * We use these functions in order to transfer here from the
1819  * nucleus trap handler information about trap type (data or
1820  * instruction) and trap level (0 or above 0). This way we
1821  * get rid of using SFSR's reserved bits.
1822  */
1823 
1824 #define	OPL_SYNC_TL0	0
1825 #define	OPL_SYNC_TL1	1
1826 #define	OPL_ISYNC_ERR	0
1827 #define	OPL_DSYNC_ERR	1
1828 
1829 void
1830 opl_cpu_isync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1831 {
1832 	uint64_t t_sfar = p_sfar;
1833 	uint64_t t_sfsr = p_sfsr;
1834 
1835 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1836 	    OPL_SYNC_TL0, OPL_ISYNC_ERR);
1837 }
1838 
1839 void
1840 opl_cpu_isync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1841 {
1842 	uint64_t t_sfar = p_sfar;
1843 	uint64_t t_sfsr = p_sfsr;
1844 
1845 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1846 	    OPL_SYNC_TL1, OPL_ISYNC_ERR);
1847 }
1848 
1849 void
1850 opl_cpu_dsync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1851 {
1852 	uint64_t t_sfar = p_sfar;
1853 	uint64_t t_sfsr = p_sfsr;
1854 
1855 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1856 	    OPL_SYNC_TL0, OPL_DSYNC_ERR);
1857 }
1858 
1859 void
1860 opl_cpu_dsync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1861 {
1862 	uint64_t t_sfar = p_sfar;
1863 	uint64_t t_sfsr = p_sfsr;
1864 
1865 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1866 	    OPL_SYNC_TL1, OPL_DSYNC_ERR);
1867 }
1868 
1869 /*
1870  * The fj sync err handler transfers control here for UE, BERR, TO, TLB_MUL
1871  * and TLB_PRT.
1872  * This function is designed based on cpu_deferred_error().
1873  */
1874 
1875 static void
1876 opl_cpu_sync_error(struct regs *rp, ulong_t t_sfar, ulong_t t_sfsr,
1877     uint_t tl, uint_t derr)
1878 {
1879 	opl_async_flt_t opl_flt;
1880 	struct async_flt *aflt;
1881 	int trampolined = 0;
1882 	char pr_reason[MAX_REASON_STRING];
1883 	uint64_t log_sfsr;
1884 	int expected = DDI_FM_ERR_UNEXPECTED;
1885 	ddi_acc_hdl_t *hp;
1886 
1887 	/*
1888 	 * We need to look at p_flag to determine if the thread detected an
1889 	 * error while dumping core.  We can't grab p_lock here, but it's ok
1890 	 * because we just need a consistent snapshot and we know that everyone
1891 	 * else will store a consistent set of bits while holding p_lock.  We
1892 	 * don't have to worry about a race because SDOCORE is set once prior
1893 	 * to doing i/o from the process's address space and is never cleared.
1894 	 */
1895 	uint_t pflag = ttoproc(curthread)->p_flag;
1896 
1897 	pr_reason[0] = '\0';
1898 
1899 	/*
1900 	 * handle the specific error
1901 	 */
1902 	bzero(&opl_flt, sizeof (opl_async_flt_t));
1903 	aflt = (struct async_flt *)&opl_flt;
1904 	aflt->flt_id = gethrtime_waitfree();
1905 	aflt->flt_bus_id = getprocessorid();
1906 	aflt->flt_inst = CPU->cpu_id;
1907 	aflt->flt_stat = t_sfsr;
1908 	aflt->flt_addr = t_sfar;
1909 	aflt->flt_pc = (caddr_t)rp->r_pc;
1910 	aflt->flt_prot = (uchar_t)AFLT_PROT_NONE;
1911 	aflt->flt_class = (uchar_t)CPU_FAULT;
1912 	aflt->flt_priv = (uchar_t)
1913 		(tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ?  1 : 0));
1914 	aflt->flt_tl = (uchar_t)tl;
1915 	aflt->flt_panic = (uchar_t)(tl != 0 || aft_testfatal != 0 ||
1916 	    (t_sfsr & (SFSR_TLB_MUL|SFSR_TLB_PRT)) != 0);
1917 	aflt->flt_core = (pflag & SDOCORE) ? 1 : 0;
1918 	aflt->flt_status = (derr) ? OPL_ECC_DSYNC_TRAP : OPL_ECC_ISYNC_TRAP;
1919 
1920 	/*
1921 	 * If SFSR.FV is not set, both SFSR and SFAR/SFPAR values are uncertain.
1922 	 * So, clear all error bits to avoid mis-handling and force the system
1923 	 * panicked.
1924 	 * We skip all the procedures below down to the panic message call.
1925 	 */
1926 	if (!(t_sfsr & SFSR_FV)) {
1927 		opl_flt.flt_type = OPL_CPU_INV_SFSR;
1928 		aflt->flt_panic = 1;
1929 		aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC;
1930 		cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR,
1931 			(void *)&opl_flt, sizeof (opl_async_flt_t), ue_queue,
1932 			aflt->flt_panic);
1933 		fm_panic("%sErrors(s)", "invalid SFSR");
1934 	}
1935 
1936 	/*
1937 	 * If either UE and MK bit is off, this is not valid UE error.
1938 	 * If it is not valid UE error, clear UE & MK_UE bits to prevent
1939 	 * mis-handling below.
1940 	 * aflt->flt_stat keeps the original bits as a reference.
1941 	 */
1942 	if ((t_sfsr & (SFSR_MK_UE|SFSR_UE)) !=
1943 	    (SFSR_MK_UE|SFSR_UE)) {
1944 		t_sfsr &= ~(SFSR_MK_UE|SFSR_UE);
1945 	}
1946 
1947 	/*
1948 	 * If the trap occurred in privileged mode at TL=0, we need to check to
1949 	 * see if we were executing in the kernel under on_trap() or t_lofault
1950 	 * protection.  If so, modify the saved registers so that we return
1951 	 * from the trap to the appropriate trampoline routine.
1952 	 */
1953 	if (!aflt->flt_panic && aflt->flt_priv && tl == 0) {
1954 		if (curthread->t_ontrap != NULL) {
1955 			on_trap_data_t *otp = curthread->t_ontrap;
1956 
1957 			if (otp->ot_prot & OT_DATA_EC) {
1958 				aflt->flt_prot = (uchar_t)AFLT_PROT_EC;
1959 				otp->ot_trap |= (ushort_t)OT_DATA_EC;
1960 				rp->r_pc = otp->ot_trampoline;
1961 				rp->r_npc = rp->r_pc + 4;
1962 				trampolined = 1;
1963 			}
1964 
1965 			if ((t_sfsr & (SFSR_TO | SFSR_BERR)) &&
1966 			    (otp->ot_prot & OT_DATA_ACCESS)) {
1967 				aflt->flt_prot = (uchar_t)AFLT_PROT_ACCESS;
1968 				otp->ot_trap |= (ushort_t)OT_DATA_ACCESS;
1969 				rp->r_pc = otp->ot_trampoline;
1970 				rp->r_npc = rp->r_pc + 4;
1971 				trampolined = 1;
1972 				/*
1973 				 * for peeks and caut_gets errors are expected
1974 				 */
1975 				hp = (ddi_acc_hdl_t *)otp->ot_handle;
1976 				if (!hp)
1977 					expected = DDI_FM_ERR_PEEK;
1978 				else if (hp->ah_acc.devacc_attr_access ==
1979 				    DDI_CAUTIOUS_ACC)
1980 					expected = DDI_FM_ERR_EXPECTED;
1981 			}
1982 
1983 		} else if (curthread->t_lofault) {
1984 			aflt->flt_prot = AFLT_PROT_COPY;
1985 			rp->r_g1 = EFAULT;
1986 			rp->r_pc = curthread->t_lofault;
1987 			rp->r_npc = rp->r_pc + 4;
1988 			trampolined = 1;
1989 		}
1990 	}
1991 
1992 	/*
1993 	 * If we're in user mode or we're doing a protected copy, we either
1994 	 * want the ASTON code below to send a signal to the user process
1995 	 * or we want to panic if aft_panic is set.
1996 	 *
1997 	 * If we're in privileged mode and we're not doing a copy, then we
1998 	 * need to check if we've trampolined.  If we haven't trampolined,
1999 	 * we should panic.
2000 	 */
2001 	if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
2002 		if (t_sfsr & (SFSR_ERRS & ~(SFSR_BERR | SFSR_TO)))
2003 			aflt->flt_panic |= aft_panic;
2004 	} else if (!trampolined) {
2005 		aflt->flt_panic = 1;
2006 	}
2007 
2008 	/*
2009 	 * If we've trampolined due to a privileged TO or BERR, or if an
2010 	 * unprivileged TO or BERR occurred, we don't want to enqueue an
2011 	 * event for that TO or BERR.  Queue all other events (if any) besides
2012 	 * the TO/BERR.
2013 	 */
2014 	log_sfsr = t_sfsr;
2015 	if (trampolined) {
2016 		log_sfsr &= ~(SFSR_TO | SFSR_BERR);
2017 	} else if (!aflt->flt_priv) {
2018 		/*
2019 		 * User mode, suppress messages if
2020 		 * cpu_berr_to_verbose is not set.
2021 		 */
2022 		if (!cpu_berr_to_verbose)
2023 			log_sfsr &= ~(SFSR_TO | SFSR_BERR);
2024 	}
2025 
2026 	if (((log_sfsr & SFSR_ERRS) &&
2027 		(cpu_queue_events(&opl_flt, pr_reason, t_sfsr) == 0)) ||
2028 	    ((t_sfsr & SFSR_ERRS) == 0)) {
2029 		opl_flt.flt_type = OPL_CPU_INV_SFSR;
2030 		aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC;
2031 		cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR,
2032 			(void *)&opl_flt, sizeof (opl_async_flt_t), ue_queue,
2033 			aflt->flt_panic);
2034 	}
2035 
2036 	if (t_sfsr & (SFSR_UE|SFSR_TO|SFSR_BERR)) {
2037 		cpu_run_bus_error_handlers(aflt, expected);
2038 	}
2039 
2040 	/*
2041 	 * Panic here if aflt->flt_panic has been set.  Enqueued errors will
2042 	 * be logged as part of the panic flow.
2043 	 */
2044 	if (aflt->flt_panic) {
2045 		if (pr_reason[0] == 0)
2046 			strcpy(pr_reason, "invalid SFSR ");
2047 
2048 		fm_panic("%sErrors(s)", pr_reason);
2049 	}
2050 
2051 	/*
2052 	 * If we queued an error and we are going to return from the trap and
2053 	 * the error was in user mode or inside of a copy routine, set AST flag
2054 	 * so the queue will be drained before returning to user mode.  The
2055 	 * AST processing will also act on our failure policy.
2056 	 */
2057 	if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
2058 		int pcb_flag = 0;
2059 
2060 		if (t_sfsr & (SFSR_ERRS &
2061 			~(SFSR_BERR | SFSR_TO)))
2062 			pcb_flag |= ASYNC_HWERR;
2063 
2064 		if (t_sfsr & SFSR_BERR)
2065 			pcb_flag |= ASYNC_BERR;
2066 
2067 		if (t_sfsr & SFSR_TO)
2068 			pcb_flag |= ASYNC_BTO;
2069 
2070 		ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
2071 		aston(curthread);
2072 	}
2073 }
2074 
2075 /*ARGSUSED*/
2076 void
2077 opl_cpu_urgent_error(struct regs *rp, ulong_t p_ugesr, ulong_t tl)
2078 {
2079 	opl_async_flt_t opl_flt;
2080 	struct async_flt *aflt;
2081 	char pr_reason[MAX_REASON_STRING];
2082 
2083 	/* normalize tl */
2084 	tl = (tl >= 2 ? 1 : 0);
2085 	pr_reason[0] = '\0';
2086 
2087 	bzero(&opl_flt, sizeof (opl_async_flt_t));
2088 	aflt = (struct async_flt *)&opl_flt;
2089 	aflt->flt_id = gethrtime_waitfree();
2090 	aflt->flt_bus_id = getprocessorid();
2091 	aflt->flt_inst = CPU->cpu_id;
2092 	aflt->flt_stat = p_ugesr;
2093 	aflt->flt_pc = (caddr_t)rp->r_pc;
2094 	aflt->flt_class = (uchar_t)CPU_FAULT;
2095 	aflt->flt_tl = tl;
2096 	aflt->flt_priv = (uchar_t)
2097 		(tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ?  1 : 0));
2098 	aflt->flt_status = OPL_ECC_URGENT_TRAP;
2099 	aflt->flt_panic = 1;
2100 	/*
2101 	 * HW does not set mod/sid in case of urgent error.
2102 	 * So we have to set it here.
2103 	 */
2104 	opl_flt.flt_eid_mod = OPL_ERRID_CPU;
2105 	opl_flt.flt_eid_sid = aflt->flt_inst;
2106 
2107 	if (cpu_queue_events(&opl_flt, pr_reason, p_ugesr) == 0) {
2108 		opl_flt.flt_type = OPL_CPU_INV_UGESR;
2109 		aflt->flt_payload = FM_EREPORT_PAYLOAD_URGENT;
2110 		cpu_errorq_dispatch(FM_EREPORT_CPU_INV_URG,
2111 			(void *)&opl_flt, sizeof (opl_async_flt_t),
2112 			ue_queue, aflt->flt_panic);
2113 	}
2114 
2115 	fm_panic("Urgent Error");
2116 }
2117 
2118 /*
2119  * Initialization error counters resetting.
2120  */
2121 /* ARGSUSED */
2122 static void
2123 opl_ras_online(void *arg, cpu_t *cp, cyc_handler_t *hdlr, cyc_time_t *when)
2124 {
2125 	hdlr->cyh_func = (cyc_func_t)ras_cntr_reset;
2126 	hdlr->cyh_level = CY_LOW_LEVEL;
2127 	hdlr->cyh_arg = (void *)(uintptr_t)cp->cpu_id;
2128 
2129 	when->cyt_when = cp->cpu_id * (((hrtime_t)NANOSEC * 10)/ NCPU);
2130 	when->cyt_interval = (hrtime_t)NANOSEC * opl_async_check_interval;
2131 }
2132 
2133 void
2134 cpu_mp_init(void)
2135 {
2136 	cyc_omni_handler_t hdlr;
2137 
2138 	hdlr.cyo_online = opl_ras_online;
2139 	hdlr.cyo_offline = NULL;
2140 	hdlr.cyo_arg = NULL;
2141 	mutex_enter(&cpu_lock);
2142 	(void) cyclic_add_omni(&hdlr);
2143 	mutex_exit(&cpu_lock);
2144 }
2145 
2146 /*ARGSUSED*/
2147 void
2148 mmu_init_kernel_pgsz(struct hat *hat)
2149 {
2150 }
2151 
2152 size_t
2153 mmu_get_kernel_lpsize(size_t lpsize)
2154 {
2155 	uint_t tte;
2156 
2157 	if (lpsize == 0) {
2158 		/* no setting for segkmem_lpsize in /etc/system: use default */
2159 		return (MMU_PAGESIZE4M);
2160 	}
2161 
2162 	for (tte = TTE8K; tte <= TTE4M; tte++) {
2163 		if (lpsize == TTEBYTES(tte))
2164 			return (lpsize);
2165 	}
2166 
2167 	return (TTEBYTES(TTE8K));
2168 }
2169 
2170 /*
2171  * The following are functions that are unused in
2172  * OPL cpu module. They are defined here to resolve
2173  * dependencies in the "unix" module.
2174  * Unused functions that should never be called in
2175  * OPL are coded with ASSERT(0).
2176  */
2177 
2178 void
2179 cpu_disable_errors(void)
2180 {}
2181 
2182 void
2183 cpu_enable_errors(void)
2184 { ASSERT(0); }
2185 
2186 /*ARGSUSED*/
2187 void
2188 cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t t)
2189 { ASSERT(0); }
2190 
2191 /*ARGSUSED*/
2192 void
2193 cpu_faulted_enter(struct cpu *cp)
2194 {}
2195 
2196 /*ARGSUSED*/
2197 void
2198 cpu_faulted_exit(struct cpu *cp)
2199 {}
2200 
2201 /*ARGSUSED*/
2202 void
2203 cpu_check_allcpus(struct async_flt *aflt)
2204 {}
2205 
2206 /*ARGSUSED*/
2207 void
2208 cpu_ce_log_err(struct async_flt *aflt, errorq_elem_t *t)
2209 { ASSERT(0); }
2210 
2211 /*ARGSUSED*/
2212 void
2213 cpu_check_ce(int flag, uint64_t pa, caddr_t va, uint_t psz)
2214 { ASSERT(0); }
2215 
2216 /*ARGSUSED*/
2217 void
2218 cpu_ce_count_unum(struct async_flt *ecc, int len, char *unum)
2219 { ASSERT(0); }
2220 
2221 /*ARGSUSED*/
2222 void
2223 cpu_busy_ecache_scrub(struct cpu *cp)
2224 {}
2225 
2226 /*ARGSUSED*/
2227 void
2228 cpu_idle_ecache_scrub(struct cpu *cp)
2229 {}
2230 
2231 /* ARGSUSED */
2232 void
2233 cpu_change_speed(uint64_t divisor, uint64_t arg2)
2234 { ASSERT(0); }
2235 
2236 void
2237 cpu_init_cache_scrub(void)
2238 {}
2239 
2240 /* ARGSUSED */
2241 int
2242 cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
2243 {
2244 	if (&plat_get_mem_sid) {
2245 		return (plat_get_mem_sid(unum, buf, buflen, lenp));
2246 	} else {
2247 		return (ENOTSUP);
2248 	}
2249 }
2250 
2251 /* ARGSUSED */
2252 int
2253 cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
2254 {
2255 	if (&plat_get_mem_addr) {
2256 		return (plat_get_mem_addr(unum, sid, offset, addrp));
2257 	} else {
2258 		return (ENOTSUP);
2259 	}
2260 }
2261 
2262 /* ARGSUSED */
2263 int
2264 cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp)
2265 {
2266 	if (&plat_get_mem_offset) {
2267 		return (plat_get_mem_offset(flt_addr, offp));
2268 	} else {
2269 		return (ENOTSUP);
2270 	}
2271 }
2272 
2273 /*ARGSUSED*/
2274 void
2275 itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
2276 { ASSERT(0); }
2277 
2278 /*ARGSUSED*/
2279 void
2280 dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
2281 { ASSERT(0); }
2282 
2283 /*ARGSUSED*/
2284 void
2285 read_ecc_data(struct async_flt *aflt, short verbose, short ce_err)
2286 { ASSERT(0); }
2287 
2288 /*ARGSUSED*/
2289 int
2290 ce_scrub_xdiag_recirc(struct async_flt *aflt, errorq_t *eqp,
2291     errorq_elem_t *eqep, size_t afltoffset)
2292 {
2293 	ASSERT(0);
2294 	return (0);
2295 }
2296 
2297 /*ARGSUSED*/
2298 char *
2299 flt_to_error_type(struct async_flt *aflt)
2300 {
2301 	ASSERT(0);
2302 	return (NULL);
2303 }
2304