xref: /titanic_52/usr/src/uts/sun4u/cpu/opl_olympus.c (revision 9acbbeaf2a1ffe5c14b244867d427714fab43c5c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 #include <sys/ddi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/archsystm.h>
33 #include <sys/vmsystm.h>
34 #include <sys/machparam.h>
35 #include <sys/machsystm.h>
36 #include <sys/machthread.h>
37 #include <sys/cpu.h>
38 #include <sys/cmp.h>
39 #include <sys/elf_SPARC.h>
40 #include <vm/vm_dep.h>
41 #include <vm/hat_sfmmu.h>
42 #include <vm/seg_kpm.h>
43 #include <sys/cpuvar.h>
44 #include <sys/opl_olympus_regs.h>
45 #include <sys/opl_module.h>
46 #include <sys/async.h>
47 #include <sys/cmn_err.h>
48 #include <sys/debug.h>
49 #include <sys/dditypes.h>
50 #include <sys/cpu_module.h>
51 #include <sys/sysmacros.h>
52 #include <sys/intreg.h>
53 #include <sys/clock.h>
54 #include <sys/platform_module.h>
55 #include <sys/ontrap.h>
56 #include <sys/panic.h>
57 #include <sys/memlist.h>
58 #include <sys/ndifm.h>
59 #include <sys/ddifm.h>
60 #include <sys/fm/protocol.h>
61 #include <sys/fm/util.h>
62 #include <sys/fm/cpu/SPARC64-VI.h>
63 #include <sys/dtrace.h>
64 #include <sys/watchpoint.h>
65 #include <sys/promif.h>
66 
67 /*
68  * Internal functions.
69  */
70 static int cpu_sync_log_err(void *flt);
71 static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *);
72 static void opl_cpu_sync_error(struct regs *, ulong_t, ulong_t, uint_t, uint_t);
73 static int  cpu_flt_in_memory(opl_async_flt_t *, uint64_t);
74 
75 /*
76  * Error counters resetting interval.
77  */
78 static int opl_async_check_interval = 60;		/* 1 min */
79 
80 uint_t cpu_impl_dual_pgsz = 1;
81 
82 /*
83  * PA[22:0] represent Displacement in Jupiter
84  * configuration space.
85  */
86 uint_t	root_phys_addr_lo_mask = 0x7fffffu;
87 
88 /*
89  * set in /etc/system to control logging of user BERR/TO's
90  */
91 int cpu_berr_to_verbose = 0;
92 
93 static int min_ecache_size;
94 static uint_t priv_hcl_1;
95 static uint_t priv_hcl_2;
96 static uint_t priv_hcl_4;
97 static uint_t priv_hcl_8;
98 
99 /*
100  * Olympus error log
101  */
102 static opl_errlog_t	*opl_err_log;
103 
104 /*
105  * UE is classified into four classes (MEM, CHANNEL, CPU, PATH).
106  * No any other ecc_type_info insertion is allowed in between the following
107  * four UE classess.
108  */
109 ecc_type_to_info_t ecc_type_to_info[] = {
110 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
111 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
112 	FM_EREPORT_CPU_UE_MEM,
113 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
114 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
115 	FM_EREPORT_CPU_UE_CHANNEL,
116 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
117 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
118 	FM_EREPORT_CPU_UE_CPU,
119 	SFSR_UE,	"UE ",	(OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
120 	"Uncorrectable ECC",  FM_EREPORT_PAYLOAD_SYNC,
121 	FM_EREPORT_CPU_UE_PATH,
122 	SFSR_BERR, "BERR ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
123 	"Bus Error",  FM_EREPORT_PAYLOAD_SYNC,
124 	FM_EREPORT_CPU_BERR,
125 	SFSR_TO, "TO ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
126 	"Bus Timeout",  FM_EREPORT_PAYLOAD_SYNC,
127 	FM_EREPORT_CPU_BTO,
128 	SFSR_TLB_MUL, "TLB_MUL ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
129 	"TLB MultiHit",  FM_EREPORT_PAYLOAD_SYNC,
130 	FM_EREPORT_CPU_MTLB,
131 	SFSR_TLB_PRT, "TLB_PRT ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
132 	"TLB Parity",  FM_EREPORT_PAYLOAD_SYNC,
133 	FM_EREPORT_CPU_TLBP,
134 
135 	UGESR_IAUG_CRE, "IAUG_CRE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
136 	"IAUG CRE",  FM_EREPORT_PAYLOAD_URGENT,
137 	FM_EREPORT_CPU_CRE,
138 	UGESR_IAUG_TSBCTXT, "IAUG_TSBCTXT",
139 	OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
140 	"IAUG TSBCTXT",  FM_EREPORT_PAYLOAD_URGENT,
141 	FM_EREPORT_CPU_TSBCTX,
142 	UGESR_IUG_TSBP, "IUG_TSBP", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
143 	"IUG TSBP",  FM_EREPORT_PAYLOAD_URGENT,
144 	FM_EREPORT_CPU_TSBP,
145 	UGESR_IUG_PSTATE, "IUG_PSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
146 	"IUG PSTATE",  FM_EREPORT_PAYLOAD_URGENT,
147 	FM_EREPORT_CPU_PSTATE,
148 	UGESR_IUG_TSTATE, "IUG_TSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
149 	"IUG TSTATE",  FM_EREPORT_PAYLOAD_URGENT,
150 	FM_EREPORT_CPU_TSTATE,
151 	UGESR_IUG_F, "IUG_F", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
152 	"IUG FREG",  FM_EREPORT_PAYLOAD_URGENT,
153 	FM_EREPORT_CPU_IUG_F,
154 	UGESR_IUG_R, "IUG_R", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
155 	"IUG RREG",  FM_EREPORT_PAYLOAD_URGENT,
156 	FM_EREPORT_CPU_IUG_R,
157 	UGESR_AUG_SDC, "AUG_SDC", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
158 	"AUG SDC",  FM_EREPORT_PAYLOAD_URGENT,
159 	FM_EREPORT_CPU_SDC,
160 	UGESR_IUG_WDT, "IUG_WDT", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
161 	"IUG WDT",  FM_EREPORT_PAYLOAD_URGENT,
162 	FM_EREPORT_CPU_WDT,
163 	UGESR_IUG_DTLB, "IUG_DTLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
164 	"IUG DTLB",  FM_EREPORT_PAYLOAD_URGENT,
165 	FM_EREPORT_CPU_DTLB,
166 	UGESR_IUG_ITLB, "IUG_ITLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
167 	"IUG ITLB",  FM_EREPORT_PAYLOAD_URGENT,
168 	FM_EREPORT_CPU_ITLB,
169 	UGESR_IUG_COREERR, "IUG_COREERR",
170 	OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
171 	"IUG COREERR",  FM_EREPORT_PAYLOAD_URGENT,
172 	FM_EREPORT_CPU_CORE,
173 	UGESR_MULTI_DAE, "MULTI_DAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
174 	"MULTI DAE",  FM_EREPORT_PAYLOAD_URGENT,
175 	FM_EREPORT_CPU_DAE,
176 	UGESR_MULTI_IAE, "MULTI_IAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
177 	"MULTI IAE",  FM_EREPORT_PAYLOAD_URGENT,
178 	FM_EREPORT_CPU_IAE,
179 	UGESR_MULTI_UGE, "MULTI_UGE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
180 	"MULTI UGE",  FM_EREPORT_PAYLOAD_URGENT,
181 	FM_EREPORT_CPU_UGE,
182 	0,		NULL,		0,		0,
183 	NULL,  0,	   0,
184 };
185 
186 int (*p2get_mem_info)(int synd_code, uint64_t paddr,
187 		uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
188 		int *segsp, int *banksp, int *mcidp);
189 
190 
191 /*
192  * Setup trap handlers for 0xA, 0x32, 0x40 trap types.
193  */
194 void
195 cpu_init_trap(void)
196 {
197 	OPL_SET_TRAP(tt0_iae, opl_serr_instr);
198 	OPL_SET_TRAP(tt1_iae, opl_serr_instr);
199 	OPL_SET_TRAP(tt0_dae, opl_serr_instr);
200 	OPL_SET_TRAP(tt1_dae, opl_serr_instr);
201 	OPL_SET_TRAP(tt0_asdat, opl_ugerr_instr);
202 	OPL_SET_TRAP(tt1_asdat, opl_ugerr_instr);
203 }
204 
205 static int
206 getintprop(pnode_t node, char *name, int deflt)
207 {
208 	int	value;
209 
210 	switch (prom_getproplen(node, name)) {
211 	case sizeof (int):
212 		(void) prom_getprop(node, name, (caddr_t)&value);
213 		break;
214 
215 	default:
216 		value = deflt;
217 		break;
218 	}
219 
220 	return (value);
221 }
222 
223 /*
224  * Set the magic constants of the implementation.
225  */
226 /*ARGSUSED*/
227 void
228 cpu_fiximp(pnode_t dnode)
229 {
230 	int i, a;
231 	extern int vac_size, vac_shift;
232 	extern uint_t vac_mask;
233 
234 	static struct {
235 		char	*name;
236 		int	*var;
237 		int	defval;
238 	} prop[] = {
239 		"l1-dcache-size", &dcache_size, OPL_DCACHE_SIZE,
240 		"l1-dcache-line-size", &dcache_linesize, OPL_DCACHE_LSIZE,
241 		"l1-icache-size", &icache_size, OPL_ICACHE_SIZE,
242 		"l1-icache-line-size", &icache_linesize, OPL_ICACHE_LSIZE,
243 		"l2-cache-size", &ecache_size, OPL_ECACHE_SIZE,
244 		"l2-cache-line-size", &ecache_alignsize, OPL_ECACHE_LSIZE,
245 		"l2-cache-associativity", &ecache_associativity, OPL_ECACHE_NWAY
246 	};
247 
248 	for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++)
249 		*prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval);
250 
251 	ecache_setsize = ecache_size / ecache_associativity;
252 
253 	vac_size = OPL_VAC_SIZE;
254 	vac_mask = MMU_PAGEMASK & (vac_size - 1);
255 	i = 0; a = vac_size;
256 	while (a >>= 1)
257 		++i;
258 	vac_shift = i;
259 	shm_alignment = vac_size;
260 	vac = 1;
261 }
262 
263 #ifdef	OLYMPUS_C_REV_B_ERRATA_XCALL
264 /*
265  * Quick and dirty way to redefine locally in
266  * OPL the value of IDSR_BN_SETS to 31 instead
267  * of the standard 32 value. This is to workaround
268  * REV_B of Olympus_c processor's problem in handling
269  * more than 31 xcall broadcast.
270  */
271 #undef	IDSR_BN_SETS
272 #define	IDSR_BN_SETS    31
273 #endif	/* OLYMPUS_C_REV_B_ERRATA_XCALL */
274 
275 void
276 send_mondo_set(cpuset_t set)
277 {
278 	int lo, busy, nack, shipped = 0;
279 	uint16_t i, cpuids[IDSR_BN_SETS];
280 	uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
281 	uint64_t starttick, endtick, tick, lasttick;
282 #if (NCPU > IDSR_BN_SETS)
283 	int index = 0;
284 	int ncpuids = 0;
285 #endif
286 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
287 	int bn_sets = IDSR_BN_SETS;
288 	uint64_t ver;
289 
290 	ASSERT(NCPU > bn_sets);
291 #endif
292 
293 	ASSERT(!CPUSET_ISNULL(set));
294 	starttick = lasttick = gettick();
295 
296 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
297 	ver = ultra_getver();
298 	if (((ULTRA_VER_IMPL(ver)) == OLYMPUS_C_IMPL) &&
299 		((OLYMPUS_REV_MASK(ver)) == OLYMPUS_C_A))
300 		bn_sets = 1;
301 #endif
302 
303 #if (NCPU <= IDSR_BN_SETS)
304 	for (i = 0; i < NCPU; i++)
305 		if (CPU_IN_SET(set, i)) {
306 			shipit(i, shipped);
307 			nackmask |= IDSR_NACK_BIT(shipped);
308 			cpuids[shipped++] = i;
309 			CPUSET_DEL(set, i);
310 			if (CPUSET_ISNULL(set))
311 				break;
312 		}
313 	CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
314 #else
315 	for (i = 0; i < NCPU; i++)
316 		if (CPU_IN_SET(set, i)) {
317 			ncpuids++;
318 
319 			/*
320 			 * Ship only to the first (IDSR_BN_SETS) CPUs.  If we
321 			 * find we have shipped to more than (IDSR_BN_SETS)
322 			 * CPUs, set "index" to the highest numbered CPU in
323 			 * the set so we can ship to other CPUs a bit later on.
324 			 */
325 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
326 			if (shipped < bn_sets) {
327 #else
328 			if (shipped < IDSR_BN_SETS) {
329 #endif
330 				shipit(i, shipped);
331 				nackmask |= IDSR_NACK_BIT(shipped);
332 				cpuids[shipped++] = i;
333 				CPUSET_DEL(set, i);
334 				if (CPUSET_ISNULL(set))
335 					break;
336 			} else
337 				index = (int)i;
338 		}
339 
340 	CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
341 #endif
342 
343 	busymask = IDSR_NACK_TO_BUSY(nackmask);
344 	busy = nack = 0;
345 	endtick = starttick + xc_tick_limit;
346 	for (;;) {
347 		idsr = getidsr();
348 #if (NCPU <= IDSR_BN_SETS)
349 		if (idsr == 0)
350 			break;
351 #else
352 		if (idsr == 0 && shipped == ncpuids)
353 			break;
354 #endif
355 		tick = gettick();
356 		/*
357 		 * If there is a big jump between the current tick
358 		 * count and lasttick, we have probably hit a break
359 		 * point.  Adjust endtick accordingly to avoid panic.
360 		 */
361 		if (tick > (lasttick + xc_tick_jump_limit))
362 			endtick += (tick - lasttick);
363 		lasttick = tick;
364 		if (tick > endtick) {
365 			if (panic_quiesce)
366 				return;
367 			cmn_err(CE_CONT, "send mondo timeout "
368 				"[%d NACK %d BUSY]\nIDSR 0x%"
369 				"" PRIx64 "  cpuids:", nack, busy, idsr);
370 #ifdef	OLYMPUS_C_REV_A_ERRATA_XCALL
371 			for (i = 0; i < bn_sets; i++) {
372 #else
373 			for (i = 0; i < IDSR_BN_SETS; i++) {
374 #endif
375 				if (idsr & (IDSR_NACK_BIT(i) |
376 				    IDSR_BUSY_BIT(i))) {
377 					cmn_err(CE_CONT, " 0x%x",
378 						cpuids[i]);
379 				}
380 			}
381 			cmn_err(CE_CONT, "\n");
382 			cmn_err(CE_PANIC, "send_mondo_set: timeout");
383 		}
384 		curnack = idsr & nackmask;
385 		curbusy = idsr & busymask;
386 
387 #ifdef OLYMPUS_C_REV_B_ERRATA_XCALL
388 		/*
389 		 * Only proceed to send more xcalls if all the
390 		 * cpus in the previous IDSR_BN_SETS were completed.
391 		 */
392 		if (curbusy) {
393 			busy++;
394 			continue;
395 		}
396 #endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */
397 
398 #if (NCPU > IDSR_BN_SETS)
399 		if (shipped < ncpuids) {
400 			uint64_t cpus_left;
401 			uint16_t next = (uint16_t)index;
402 
403 			cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
404 			    busymask;
405 
406 			if (cpus_left) {
407 				do {
408 					/*
409 					 * Sequence through and ship to the
410 					 * remainder of the CPUs in the system
411 					 * (e.g. other than the first
412 					 * (IDSR_BN_SETS)) in reverse order.
413 					 */
414 					lo = lowbit(cpus_left) - 1;
415 					i = IDSR_BUSY_IDX(lo);
416 					shipit(next, i);
417 					shipped++;
418 					cpuids[i] = next;
419 
420 					/*
421 					 * If we've processed all the CPUs,
422 					 * exit the loop now and save
423 					 * instructions.
424 					 */
425 					if (shipped == ncpuids)
426 						break;
427 
428 					for ((index = ((int)next - 1));
429 						index >= 0; index--)
430 						if (CPU_IN_SET(set, index)) {
431 							next = (uint16_t)index;
432 							break;
433 						}
434 
435 					cpus_left &= ~(1ull << lo);
436 				} while (cpus_left);
437 				continue;
438 			}
439 		}
440 #endif
441 #ifndef	OLYMPUS_C_REV_B_ERRATA_XCALL
442 		if (curbusy) {
443 			busy++;
444 			continue;
445 		}
446 #endif	/* OLYMPUS_C_REV_B_ERRATA_XCALL */
447 #ifdef SEND_MONDO_STATS
448 		{
449 			int n = gettick() - starttick;
450 			if (n < 8192)
451 				x_nack_stimes[n >> 7]++;
452 		}
453 #endif
454 		while (gettick() < (tick + sys_clock_mhz))
455 			;
456 		do {
457 			lo = lowbit(curnack) - 1;
458 			i = IDSR_NACK_IDX(lo);
459 			shipit(cpuids[i], i);
460 			curnack &= ~(1ull << lo);
461 		} while (curnack);
462 		nack++;
463 		busy = 0;
464 	}
465 #ifdef SEND_MONDO_STATS
466 	{
467 		int n = gettick() - starttick;
468 		if (n < 8192)
469 			x_set_stimes[n >> 7]++;
470 		else
471 			x_set_ltimes[(n >> 13) & 0xf]++;
472 	}
473 	x_set_cpus[shipped]++;
474 #endif
475 }
476 
477 /*
478  * Cpu private initialization.
479  */
480 void
481 cpu_init_private(struct cpu *cp)
482 {
483 	if (!(IS_OLYMPUS_C(cpunodes[cp->cpu_id].implementation))) {
484 		cmn_err(CE_PANIC, "CPU%d Impl %d: Only SPARC64-VI is supported",
485 			cp->cpu_id, cpunodes[cp->cpu_id].implementation);
486 	}
487 
488 	adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
489 }
490 
491 void
492 cpu_setup(void)
493 {
494 	extern int at_flags;
495 	extern int disable_delay_tlb_flush, delay_tlb_flush;
496 	extern int cpc_has_overflow_intr;
497 	extern int disable_text_largepages;
498 	extern int use_text_pgsz4m;
499 	uint64_t cpu0_log;
500 	extern	 uint64_t opl_cpu0_err_log;
501 
502 	/*
503 	 * Initialize Error log Scratch register for error handling.
504 	 */
505 
506 	cpu0_log = va_to_pa(&opl_cpu0_err_log);
507 	opl_error_setup(cpu0_log);
508 
509 	/*
510 	 * Enable MMU translating multiple page sizes for
511 	 * sITLB and sDTLB.
512 	 */
513 	opl_mpg_enable();
514 
515 	/*
516 	 * Setup chip-specific trap handlers.
517 	 */
518 	cpu_init_trap();
519 
520 	cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT);
521 
522 	at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
523 
524 	/*
525 	 * Due to the number of entries in the fully-associative tlb
526 	 * this may have to be tuned lower than in spitfire.
527 	 */
528 	pp_slots = MIN(8, MAXPP_SLOTS);
529 
530 	/*
531 	 * Block stores do not invalidate all pages of the d$, pagecopy
532 	 * et. al. need virtual translations with virtual coloring taken
533 	 * into consideration.  prefetch/ldd will pollute the d$ on the
534 	 * load side.
535 	 */
536 	pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE;
537 
538 	if (use_page_coloring) {
539 		do_pg_coloring = 1;
540 		if (use_virtual_coloring)
541 			do_virtual_coloring = 1;
542 	}
543 
544 	isa_list =
545 	    "sparcv9+vis2 sparcv9+vis sparcv9 "
546 	    "sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
547 	    "sparcv8 sparcv8-fsmuld sparcv7 sparc";
548 
549 	cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2;
550 
551 	/*
552 	 * On SPARC64-VI, there's no hole in the virtual address space
553 	 */
554 	hole_start = hole_end = 0;
555 
556 	/*
557 	 * The kpm mapping window.
558 	 * kpm_size:
559 	 *	The size of a single kpm range.
560 	 *	The overall size will be: kpm_size * vac_colors.
561 	 * kpm_vbase:
562 	 *	The virtual start address of the kpm range within the kernel
563 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
564 	 */
565 	kpm_size = (size_t)(128ull * 1024 * 1024 * 1024 * 1024); /* 128TB */
566 	kpm_size_shift = 47;
567 	kpm_vbase = (caddr_t)0x8000000000000000ull; /* 8EB */
568 	kpm_smallpages = 1;
569 
570 	/*
571 	 * The traptrace code uses either %tick or %stick for
572 	 * timestamping.  We have %stick so we can use it.
573 	 */
574 	traptrace_use_stick = 1;
575 
576 	/*
577 	 * SPARC64-VI has a performance counter overflow interrupt
578 	 */
579 	cpc_has_overflow_intr = 1;
580 
581 	/*
582 	 * Use SPARC64-VI flush-all support
583 	 */
584 	if (!disable_delay_tlb_flush)
585 		delay_tlb_flush = 1;
586 
587 	/*
588 	 * Declare that this architecture/cpu combination does not support
589 	 * fpRAS.
590 	 */
591 	fpras_implemented = 0;
592 
593 	/*
594 	 * Enable 4M pages to be used for mapping user text by default.  Don't
595 	 * use large pages for initialized data segments since we may not know
596 	 * at exec() time what should be the preferred large page size for DTLB
597 	 * programming.
598 	 */
599 	use_text_pgsz4m = 1;
600 	disable_text_largepages = (1 << TTE64K) | (1 << TTE512K) |
601 	    (1 << TTE32M) | (1 << TTE256M);
602 }
603 
604 /*
605  * Called by setcpudelay
606  */
607 void
608 cpu_init_tick_freq(void)
609 {
610 	/*
611 	 * For SPARC64-VI we want to use the system clock rate as
612 	 * the basis for low level timing, due to support of mixed
613 	 * speed CPUs and power managment.
614 	 */
615 	if (system_clock_freq == 0)
616 		cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq");
617 
618 	sys_tick_freq = system_clock_freq;
619 }
620 
621 #ifdef SEND_MONDO_STATS
622 uint32_t x_one_stimes[64];
623 uint32_t x_one_ltimes[16];
624 uint32_t x_set_stimes[64];
625 uint32_t x_set_ltimes[16];
626 uint32_t x_set_cpus[NCPU];
627 uint32_t x_nack_stimes[64];
628 #endif
629 
630 /*
631  * Note: A version of this function is used by the debugger via the KDI,
632  * and must be kept in sync with this version.  Any changes made to this
633  * function to support new chips or to accomodate errata must also be included
634  * in the KDI-specific version.  See us3_kdi.c.
635  */
636 void
637 send_one_mondo(int cpuid)
638 {
639 	int busy, nack;
640 	uint64_t idsr, starttick, endtick, tick, lasttick;
641 	uint64_t busymask;
642 
643 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
644 	starttick = lasttick = gettick();
645 	shipit(cpuid, 0);
646 	endtick = starttick + xc_tick_limit;
647 	busy = nack = 0;
648 	busymask = IDSR_BUSY;
649 	for (;;) {
650 		idsr = getidsr();
651 		if (idsr == 0)
652 			break;
653 
654 		tick = gettick();
655 		/*
656 		 * If there is a big jump between the current tick
657 		 * count and lasttick, we have probably hit a break
658 		 * point.  Adjust endtick accordingly to avoid panic.
659 		 */
660 		if (tick > (lasttick + xc_tick_jump_limit))
661 			endtick += (tick - lasttick);
662 		lasttick = tick;
663 		if (tick > endtick) {
664 			if (panic_quiesce)
665 				return;
666 			cmn_err(CE_PANIC, "send mondo timeout "
667 				"(target 0x%x) [%d NACK %d BUSY]",
668 					cpuid, nack, busy);
669 		}
670 
671 		if (idsr & busymask) {
672 			busy++;
673 			continue;
674 		}
675 		drv_usecwait(1);
676 		shipit(cpuid, 0);
677 		nack++;
678 		busy = 0;
679 	}
680 #ifdef SEND_MONDO_STATS
681 	{
682 		int n = gettick() - starttick;
683 		if (n < 8192)
684 			x_one_stimes[n >> 7]++;
685 		else
686 			x_one_ltimes[(n >> 13) & 0xf]++;
687 	}
688 #endif
689 }
690 
691 /*
692  * init_mmu_page_sizes is set to one after the bootup time initialization
693  * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
694  * valid value.
695  *
696  * mmu_disable_ism_large_pages and mmu_disable_large_pages are the mmu-specific
697  * versions of disable_ism_large_pages and disable_large_pages, and feed back
698  * into those two hat variables at hat initialization time.
699  *
700  */
701 int init_mmu_page_sizes = 0;
702 static int mmu_disable_ism_large_pages = ((1 << TTE64K) |
703 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
704 static int mmu_disable_auto_large_pages = ((1 << TTE64K) |
705 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
706 static int mmu_disable_large_pages = 0;
707 
708 /*
709  * Re-initialize mmu_page_sizes and friends, for SPARC64-VI mmu support.
710  * Called during very early bootup from check_cpus_set().
711  * Can be called to verify that mmu_page_sizes are set up correctly.
712  *
713  * Set Olympus defaults. We do not use the function parameter.
714  */
715 /*ARGSUSED*/
716 int
717 mmu_init_mmu_page_sizes(int32_t not_used)
718 {
719 	if (!init_mmu_page_sizes) {
720 		mmu_page_sizes = MMU_PAGE_SIZES;
721 		mmu_hashcnt = MAX_HASHCNT;
722 		mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
723 		auto_lpg_maxszc = TTE4M;
724 		mmu_exported_pagesize_mask = (1 << TTE8K) |
725 		    (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) |
726 		    (1 << TTE32M) | (1 << TTE256M);
727 		init_mmu_page_sizes = 1;
728 		return (0);
729 	}
730 	return (1);
731 }
732 
733 /* SPARC64-VI worst case DTLB parameters */
734 #ifndef	LOCKED_DTLB_ENTRIES
735 #define	LOCKED_DTLB_ENTRIES	5	/* 2 user TSBs, 2 nucleus, + OBP */
736 #endif
737 #define	TOTAL_DTLB_ENTRIES	32
738 #define	AVAIL_32M_ENTRIES	0
739 #define	AVAIL_256M_ENTRIES	0
740 #define	AVAIL_DTLB_ENTRIES	(TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
741 static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = {
742 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
743 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
744 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES};
745 
746 /*
747  * The function returns the mmu-specific values for the
748  * hat's disable_large_pages, disable_ism_large_pages, and
749  * disable_auto_large_pages variables.
750  */
751 int
752 mmu_large_pages_disabled(uint_t flag)
753 {
754 	int pages_disable = 0;
755 
756 	if (flag == HAT_LOAD) {
757 		pages_disable =  mmu_disable_large_pages;
758 	} else if (flag == HAT_LOAD_SHARE) {
759 		pages_disable = mmu_disable_ism_large_pages;
760 	} else if (flag == HAT_LOAD_AUTOLPG) {
761 		pages_disable = mmu_disable_auto_large_pages;
762 	}
763 	return (pages_disable);
764 }
765 
766 /*
767  * mmu_init_large_pages is called with the desired ism_pagesize parameter.
768  * It may be called from set_platform_defaults, if some value other than 32M
769  * is desired.  mmu_ism_pagesize is the tunable.  If it has a bad value,
770  * then only warn, since it would be bad form to panic due to a user typo.
771  *
772  * The function re-initializes the mmu_disable_ism_large_pages variable.
773  */
774 void
775 mmu_init_large_pages(size_t ism_pagesize)
776 {
777 	switch (ism_pagesize) {
778 	case MMU_PAGESIZE4M:
779 		mmu_disable_ism_large_pages = ((1 << TTE64K) |
780 		    (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
781 		mmu_disable_auto_large_pages = ((1 << TTE64K) |
782 		    (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
783 		auto_lpg_maxszc = TTE4M;
784 		break;
785 	case MMU_PAGESIZE32M:
786 		mmu_disable_ism_large_pages = ((1 << TTE64K) |
787 		    (1 << TTE512K) | (1 << TTE256M));
788 		mmu_disable_auto_large_pages = ((1 << TTE64K) |
789 		    (1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M));
790 		auto_lpg_maxszc = TTE32M;
791 		break;
792 	case MMU_PAGESIZE256M:
793 		mmu_disable_ism_large_pages = ((1 << TTE64K) |
794 		    (1 << TTE512K) | (1 << TTE32M));
795 		mmu_disable_auto_large_pages = ((1 << TTE64K) |
796 		    (1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M));
797 		auto_lpg_maxszc = TTE256M;
798 		break;
799 	default:
800 		cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx",
801 		    ism_pagesize);
802 		break;
803 	}
804 }
805 
806 /*
807  * Function to reprogram the TLBs when page sizes used
808  * by a process change significantly.
809  */
810 void
811 mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
812 {
813 	uint8_t pgsz0, pgsz1;
814 
815 	/*
816 	 * Don't program 2nd dtlb for kernel and ism hat
817 	 */
818 	ASSERT(hat->sfmmu_ismhat == NULL);
819 	ASSERT(hat != ksfmmup);
820 
821 	/*
822 	 * hat->sfmmu_pgsz[] is an array whose elements
823 	 * contain a sorted order of page sizes.  Element
824 	 * 0 is the most commonly used page size, followed
825 	 * by element 1, and so on.
826 	 *
827 	 * ttecnt[] is an array of per-page-size page counts
828 	 * mapped into the process.
829 	 *
830 	 * If the HAT's choice for page sizes is unsuitable,
831 	 * we can override it here.  The new values written
832 	 * to the array will be handed back to us later to
833 	 * do the actual programming of the TLB hardware.
834 	 *
835 	 */
836 	pgsz0 = (uint8_t)MIN(tmp_pgsz[0], tmp_pgsz[1]);
837 	pgsz1 = (uint8_t)MAX(tmp_pgsz[0], tmp_pgsz[1]);
838 
839 	/*
840 	 * This implements PAGESIZE programming of the sTLB
841 	 * if large TTE counts don't exceed the thresholds.
842 	 */
843 	if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
844 		pgsz0 = page_szc(MMU_PAGESIZE);
845 	if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
846 		pgsz1 = page_szc(MMU_PAGESIZE);
847 	tmp_pgsz[0] = pgsz0;
848 	tmp_pgsz[1] = pgsz1;
849 	/* otherwise, accept what the HAT chose for us */
850 }
851 
852 /*
853  * The HAT calls this function when an MMU context is allocated so that we
854  * can reprogram the large TLBs appropriately for the new process using
855  * the context.
856  *
857  * The caller must hold the HAT lock.
858  */
859 void
860 mmu_set_ctx_page_sizes(struct hat *hat)
861 {
862 	uint8_t pgsz0, pgsz1;
863 	uint8_t new_cext;
864 
865 	ASSERT(sfmmu_hat_lock_held(hat));
866 	/*
867 	 * Don't program 2nd dtlb for kernel and ism hat
868 	 */
869 	if (hat->sfmmu_ismhat || hat == ksfmmup)
870 		return;
871 
872 	/*
873 	 * If supported, reprogram the TLBs to a larger pagesize.
874 	 */
875 	pgsz0 = hat->sfmmu_pgsz[0];
876 	pgsz1 = hat->sfmmu_pgsz[1];
877 	ASSERT(pgsz0 < mmu_page_sizes);
878 	ASSERT(pgsz1 < mmu_page_sizes);
879 	new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
880 	if (hat->sfmmu_cext != new_cext) {
881 #ifdef DEBUG
882 		int i;
883 		/*
884 		 * assert cnum should be invalid, this is because pagesize
885 		 * can only be changed after a proc's ctxs are invalidated.
886 		 */
887 		for (i = 0; i < max_mmu_ctxdoms; i++) {
888 			ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
889 		}
890 #endif /* DEBUG */
891 		hat->sfmmu_cext = new_cext;
892 	}
893 	/*
894 	 * sfmmu_setctx_sec() will take care of the
895 	 * rest of the dirty work for us.
896 	 */
897 }
898 
899 /*
900  * This function assumes that there are either four or six supported page
901  * sizes and at most two programmable TLBs, so we need to decide which
902  * page sizes are most important and then adjust the TLB page sizes
903  * accordingly (if supported).
904  *
905  * If these assumptions change, this function will need to be
906  * updated to support whatever the new limits are.
907  */
908 void
909 mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt)
910 {
911 	uint64_t sortcnt[MMU_PAGE_SIZES];
912 	uint8_t tmp_pgsz[MMU_PAGE_SIZES];
913 	uint8_t i, j, max;
914 	uint16_t oldval, newval;
915 
916 	/*
917 	 * We only consider reprogramming the TLBs if one or more of
918 	 * the two most used page sizes changes and we're using
919 	 * large pages in this process.
920 	 */
921 	if (sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) {
922 		/* Sort page sizes. */
923 		for (i = 0; i < mmu_page_sizes; i++) {
924 			sortcnt[i] = ttecnt[i];
925 		}
926 		for (j = 0; j < mmu_page_sizes; j++) {
927 			for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) {
928 				if (sortcnt[i] > sortcnt[max])
929 					max = i;
930 			}
931 			tmp_pgsz[j] = max;
932 			sortcnt[max] = 0;
933 		}
934 
935 		oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1];
936 
937 		mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz);
938 
939 		/* Check 2 largest values after the sort. */
940 		newval = tmp_pgsz[0] << 8 | tmp_pgsz[1];
941 		if (newval != oldval) {
942 			sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz);
943 		}
944 	}
945 }
946 
947 /*
948  * Return processor specific async error structure
949  * size used.
950  */
951 int
952 cpu_aflt_size(void)
953 {
954 	return (sizeof (opl_async_flt_t));
955 }
956 
957 /*
958  * The cpu_sync_log_err() function is called via the [uc]e_drain() function to
959  * post-process CPU events that are dequeued.  As such, it can be invoked
960  * from softint context, from AST processing in the trap() flow, or from the
961  * panic flow.  We decode the CPU-specific data, and take appropriate actions.
962  * Historically this entry point was used to log the actual cmn_err(9F) text;
963  * now with FMA it is used to prepare 'flt' to be converted into an ereport.
964  * With FMA this function now also returns a flag which indicates to the
965  * caller whether the ereport should be posted (1) or suppressed (0).
966  */
967 /*ARGSUSED*/
968 static int
969 cpu_sync_log_err(void *flt)
970 {
971 	opl_async_flt_t *opl_flt = (opl_async_flt_t *)flt;
972 	struct async_flt *aflt = (struct async_flt *)flt;
973 
974 	/*
975 	 * No extra processing of urgent error events.
976 	 * Always generate ereports for these events.
977 	 */
978 	if (aflt->flt_status == OPL_ECC_URGENT_TRAP)
979 		return (1);
980 
981 	/*
982 	 * Additional processing for synchronous errors.
983 	 */
984 	switch (opl_flt->flt_type) {
985 	case OPL_CPU_INV_SFSR:
986 		return (1);
987 
988 	case OPL_CPU_SYNC_UE:
989 		/*
990 		 * The validity: SFSR_MK_UE bit has been checked
991 		 * in opl_cpu_sync_error()
992 		 * No more check is required.
993 		 *
994 		 * opl_flt->flt_eid_mod and flt_eid_sid have been set by H/W,
995 		 * and they have been retrieved in cpu_queue_events()
996 		 */
997 
998 		if (opl_flt->flt_eid_mod == OPL_ERRID_MEM) {
999 			ASSERT(aflt->flt_in_memory);
1000 			/*
1001 			 * We want to skip logging only if ALL the following
1002 			 * conditions are true:
1003 			 *
1004 			 *	1. We are not panicing already.
1005 			 *	2. The error is a memory error.
1006 			 *	3. There is only one error.
1007 			 *	4. The error is on a retired page.
1008 			 *	5. The error occurred under on_trap
1009 			 *	protection AFLT_PROT_EC
1010 			 */
1011 			if (!panicstr && aflt->flt_prot == AFLT_PROT_EC &&
1012 			    page_retire_check(aflt->flt_addr, NULL) == 0) {
1013 				/*
1014 				 * Do not log an error from
1015 				 * the retired page
1016 				 */
1017 				softcall(ecc_page_zero, (void *)aflt->flt_addr);
1018 				return (0);
1019 			}
1020 			if (!panicstr)
1021 				cpu_page_retire(opl_flt);
1022 		}
1023 		return (1);
1024 
1025 	case OPL_CPU_SYNC_OTHERS:
1026 		/*
1027 		 * For the following error cases, the processor HW does
1028 		 * not set the flt_eid_mod/flt_eid_sid. Instead, SW will attempt
1029 		 * to assign appropriate values here to reflect what we
1030 		 * think is the most likely cause of the problem w.r.t to
1031 		 * the particular error event.  For Buserr and timeout
1032 		 * error event, we will assign OPL_ERRID_CHANNEL as the
1033 		 * most likely reason.  For TLB parity or multiple hit
1034 		 * error events, we will assign the reason as
1035 		 * OPL_ERRID_CPU (cpu related problem) and set the
1036 		 * flt_eid_sid to point to the cpuid.
1037 		 */
1038 
1039 		if (opl_flt->flt_bit & (SFSR_BERR|SFSR_TO)) {
1040 			/*
1041 			 * flt_eid_sid will not be used for this case.
1042 			 */
1043 			opl_flt->flt_eid_mod = OPL_ERRID_CHANNEL;
1044 		}
1045 		if (opl_flt->flt_bit & (SFSR_TLB_MUL|SFSR_TLB_PRT)) {
1046 			    opl_flt->flt_eid_mod = OPL_ERRID_CPU;
1047 			    opl_flt->flt_eid_sid = aflt->flt_inst;
1048 		}
1049 
1050 		/*
1051 		 * In case of no effective error bit
1052 		 */
1053 		if ((opl_flt->flt_bit & SFSR_ERRS) == 0) {
1054 			    opl_flt->flt_eid_mod = OPL_ERRID_CPU;
1055 			    opl_flt->flt_eid_sid = aflt->flt_inst;
1056 		}
1057 		break;
1058 
1059 		default:
1060 			return (1);
1061 	}
1062 	return (1);
1063 }
1064 
1065 /*
1066  * Retire the bad page that may contain the flushed error.
1067  */
1068 void
1069 cpu_page_retire(opl_async_flt_t *opl_flt)
1070 {
1071 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1072 	(void) page_retire(aflt->flt_addr, PR_UE);
1073 }
1074 
1075 /*
1076  * Invoked by error_init() early in startup and therefore before
1077  * startup_errorq() is called to drain any error Q -
1078  *
1079  * startup()
1080  *   startup_end()
1081  *     error_init()
1082  *       cpu_error_init()
1083  * errorq_init()
1084  *   errorq_drain()
1085  * start_other_cpus()
1086  *
1087  * The purpose of this routine is to create error-related taskqs.  Taskqs
1088  * are used for this purpose because cpu_lock can't be grabbed from interrupt
1089  * context.
1090  *
1091  */
1092 /*ARGSUSED*/
1093 void
1094 cpu_error_init(int items)
1095 {
1096 	opl_err_log = (opl_errlog_t *)
1097 	    kmem_alloc(ERRLOG_ALLOC_SZ, KM_SLEEP);
1098 	if ((uint64_t)opl_err_log & MMU_PAGEOFFSET)
1099 		cmn_err(CE_PANIC, "The base address of the error log "
1100 		    "is not page aligned");
1101 }
1102 
1103 /*
1104  * We route all errors through a single switch statement.
1105  */
1106 void
1107 cpu_ue_log_err(struct async_flt *aflt)
1108 {
1109 	switch (aflt->flt_class) {
1110 	case CPU_FAULT:
1111 		if (cpu_sync_log_err(aflt))
1112 			cpu_ereport_post(aflt);
1113 		break;
1114 
1115 	case BUS_FAULT:
1116 		bus_async_log_err(aflt);
1117 		break;
1118 
1119 	default:
1120 		cmn_err(CE_WARN, "discarding async error %p with invalid "
1121 		    "fault class (0x%x)", (void *)aflt, aflt->flt_class);
1122 		return;
1123 	}
1124 }
1125 
1126 /*
1127  * Routine for panic hook callback from panic_idle().
1128  *
1129  * Nothing to do here.
1130  */
1131 void
1132 cpu_async_panic_callb(void)
1133 {
1134 }
1135 
1136 /*
1137  * Routine to return a string identifying the physical name
1138  * associated with a memory/cache error.
1139  */
1140 /*ARGSUSED*/
1141 int
1142 cpu_get_mem_unum(int synd_status, ushort_t flt_synd, uint64_t flt_stat,
1143     uint64_t flt_addr, int flt_bus_id, int flt_in_memory,
1144     ushort_t flt_status, char *buf, int buflen, int *lenp)
1145 {
1146 	int synd_code;
1147 	int ret;
1148 
1149 	/*
1150 	 * An AFSR of -1 defaults to a memory syndrome.
1151 	 */
1152 	synd_code = (int)flt_synd;
1153 
1154 	if (&plat_get_mem_unum) {
1155 		if ((ret = plat_get_mem_unum(synd_code, flt_addr, flt_bus_id,
1156 			flt_in_memory, flt_status, buf, buflen, lenp)) != 0) {
1157 			buf[0] = '\0';
1158 			*lenp = 0;
1159 		}
1160 		return (ret);
1161 	}
1162 	buf[0] = '\0';
1163 	*lenp = 0;
1164 	return (ENOTSUP);
1165 }
1166 
1167 /*
1168  * Wrapper for cpu_get_mem_unum() routine that takes an
1169  * async_flt struct rather than explicit arguments.
1170  */
1171 int
1172 cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt,
1173     char *buf, int buflen, int *lenp)
1174 {
1175 	/*
1176 	 * We always pass -1 so that cpu_get_mem_unum will interpret this as a
1177 	 * memory error.
1178 	 */
1179 	return (cpu_get_mem_unum(synd_status, aflt->flt_synd,
1180 	    (uint64_t)-1,
1181 	    aflt->flt_addr, aflt->flt_bus_id, aflt->flt_in_memory,
1182 	    aflt->flt_status, buf, buflen, lenp));
1183 }
1184 
1185 /*
1186  * This routine is a more generic interface to cpu_get_mem_unum()
1187  * that may be used by other modules (e.g. mm).
1188  */
1189 /*ARGSUSED*/
1190 int
1191 cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar,
1192     char *buf, int buflen, int *lenp)
1193 {
1194 	int synd_status, flt_in_memory, ret;
1195 	ushort_t flt_status = 0;
1196 	char unum[UNUM_NAMLEN];
1197 
1198 	/*
1199 	 * Check for an invalid address.
1200 	 */
1201 	if (afar == (uint64_t)-1)
1202 		return (ENXIO);
1203 
1204 	if (synd == (uint64_t)-1)
1205 		synd_status = AFLT_STAT_INVALID;
1206 	else
1207 		synd_status = AFLT_STAT_VALID;
1208 
1209 	flt_in_memory = (*afsr & SFSR_MEMORY) &&
1210 		pf_is_memory(afar >> MMU_PAGESHIFT);
1211 
1212 	ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, *afsr, afar,
1213 		CPU->cpu_id, flt_in_memory, flt_status, unum,
1214 		UNUM_NAMLEN, lenp);
1215 	if (ret != 0)
1216 		return (ret);
1217 
1218 	if (*lenp >= buflen)
1219 		return (ENAMETOOLONG);
1220 
1221 	(void) strncpy(buf, unum, buflen);
1222 
1223 	return (0);
1224 }
1225 
1226 /*
1227  * Routine to return memory information associated
1228  * with a physical address and syndrome.
1229  */
1230 /*ARGSUSED*/
1231 int
1232 cpu_get_mem_info(uint64_t synd, uint64_t afar,
1233     uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
1234     int *segsp, int *banksp, int *mcidp)
1235 {
1236 	int synd_code = (int)synd;
1237 
1238 	if (afar == (uint64_t)-1)
1239 		return (ENXIO);
1240 
1241 	if (p2get_mem_info != NULL)
1242 		return ((p2get_mem_info)(synd_code, afar,
1243 			mem_sizep, seg_sizep, bank_sizep,
1244 			segsp, banksp, mcidp));
1245 	else
1246 		return (ENOTSUP);
1247 }
1248 
1249 /*
1250  * Routine to return a string identifying the physical
1251  * name associated with a cpuid.
1252  */
1253 int
1254 cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1255 {
1256 	int ret;
1257 	char unum[UNUM_NAMLEN];
1258 
1259 	if (&plat_get_cpu_unum) {
1260 		if ((ret = plat_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, lenp))
1261 			!= 0)
1262 			return (ret);
1263 	} else {
1264 		return (ENOTSUP);
1265 	}
1266 
1267 	if (*lenp >= buflen)
1268 		return (ENAMETOOLONG);
1269 
1270 	(void) strncpy(buf, unum, *lenp);
1271 
1272 	return (0);
1273 }
1274 
1275 /*
1276  * This routine exports the name buffer size.
1277  */
1278 size_t
1279 cpu_get_name_bufsize()
1280 {
1281 	return (UNUM_NAMLEN);
1282 }
1283 
1284 /*
1285  * Flush the entire ecache by ASI_L2_CNTL.U2_FLUSH
1286  */
1287 void
1288 cpu_flush_ecache(void)
1289 {
1290 	flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size,
1291 	    cpunodes[CPU->cpu_id].ecache_linesize);
1292 }
1293 
1294 static uint8_t
1295 flt_to_trap_type(struct async_flt *aflt)
1296 {
1297 	if (aflt->flt_status & OPL_ECC_ISYNC_TRAP)
1298 		return (TRAP_TYPE_ECC_I);
1299 	if (aflt->flt_status & OPL_ECC_DSYNC_TRAP)
1300 		return (TRAP_TYPE_ECC_D);
1301 	if (aflt->flt_status & OPL_ECC_URGENT_TRAP)
1302 		return (TRAP_TYPE_URGENT);
1303 	return (-1);
1304 }
1305 
1306 /*
1307  * Encode the data saved in the opl_async_flt_t struct into
1308  * the FM ereport payload.
1309  */
1310 /* ARGSUSED */
1311 static void
1312 cpu_payload_add_aflt(struct async_flt *aflt, nvlist_t *payload,
1313 		nvlist_t *resource)
1314 {
1315 	opl_async_flt_t *opl_flt = (opl_async_flt_t *)aflt;
1316 	char unum[UNUM_NAMLEN];
1317 	char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
1318 	int len;
1319 
1320 
1321 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFSR) {
1322 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFSR,
1323 			DATA_TYPE_UINT64, aflt->flt_stat, NULL);
1324 	}
1325 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFAR) {
1326 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFAR,
1327 			DATA_TYPE_UINT64, aflt->flt_addr, NULL);
1328 	}
1329 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_UGESR) {
1330 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UGESR,
1331 			DATA_TYPE_UINT64, aflt->flt_stat, NULL);
1332 	}
1333 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PC) {
1334 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PC,
1335 		    DATA_TYPE_UINT64, (uint64_t)aflt->flt_pc, NULL);
1336 	}
1337 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TL) {
1338 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TL,
1339 		    DATA_TYPE_UINT8, (uint8_t)aflt->flt_tl, NULL);
1340 	}
1341 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TT) {
1342 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TT,
1343 		    DATA_TYPE_UINT8, flt_to_trap_type(aflt), NULL);
1344 	}
1345 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PRIV) {
1346 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PRIV,
1347 		    DATA_TYPE_BOOLEAN_VALUE,
1348 		    (aflt->flt_priv ? B_TRUE : B_FALSE), NULL);
1349 	}
1350 	if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_FLT_STATUS) {
1351 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FLT_STATUS,
1352 			DATA_TYPE_UINT64, (uint64_t)aflt->flt_status, NULL);
1353 	}
1354 
1355 	switch (opl_flt->flt_eid_mod) {
1356 	case OPL_ERRID_CPU:
1357 		(void) snprintf(sbuf, sizeof (sbuf), "%llX",
1358 			(u_longlong_t)cpunodes[opl_flt->flt_eid_sid].device_id);
1359 		(void) fm_fmri_cpu_set(resource, FM_CPU_SCHEME_VERSION,
1360 			NULL, opl_flt->flt_eid_sid,
1361 			(uint8_t *)&cpunodes[opl_flt->flt_eid_sid].version,
1362 			sbuf);
1363 		fm_payload_set(payload,
1364 			FM_EREPORT_PAYLOAD_NAME_RESOURCE,
1365 			DATA_TYPE_NVLIST, resource, NULL);
1366 		break;
1367 
1368 	case OPL_ERRID_CHANNEL:
1369 		/*
1370 		 * No resource is created but the cpumem DE will find
1371 		 * the defective path by retreiving EID from SFSR which is
1372 		 * included in the payload.
1373 		 */
1374 		break;
1375 
1376 	case OPL_ERRID_MEM:
1377 		(void) cpu_get_mem_unum_aflt(0, aflt, unum, UNUM_NAMLEN, &len);
1378 		(void) fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION,
1379 			NULL, unum, NULL, (uint64_t)-1);
1380 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE,
1381 			DATA_TYPE_NVLIST, resource, NULL);
1382 		break;
1383 
1384 	case OPL_ERRID_PATH:
1385 		/*
1386 		 * No resource is created but the cpumem DE will find
1387 		 * the defective path by retreiving EID from SFSR which is
1388 		 * included in the payload.
1389 		 */
1390 		break;
1391 	}
1392 }
1393 
1394 /*
1395  * Returns whether fault address is valid for this error bit and
1396  * whether the address is "in memory" (i.e. pf_is_memory returns 1).
1397  */
1398 /*ARGSUSED*/
1399 static int
1400 cpu_flt_in_memory(opl_async_flt_t *opl_flt, uint64_t t_afsr_bit)
1401 {
1402 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1403 
1404 	if (aflt->flt_status & (OPL_ECC_SYNC_TRAP)) {
1405 		return ((t_afsr_bit & SFSR_MEMORY) &&
1406 		    pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT));
1407 	}
1408 	return (0);
1409 }
1410 
1411 /*
1412  * In OPL SCF does the stick synchronization.
1413  */
1414 void
1415 sticksync_slave(void)
1416 {
1417 }
1418 
1419 /*
1420  * In OPL SCF does the stick synchronization.
1421  */
1422 void
1423 sticksync_master(void)
1424 {
1425 }
1426 
1427 /*
1428  * Cpu private unitialization.  OPL cpus do not use the private area.
1429  */
1430 void
1431 cpu_uninit_private(struct cpu *cp)
1432 {
1433 	cmp_delete_cpu(cp->cpu_id);
1434 }
1435 
1436 /*
1437  * Always flush an entire cache.
1438  */
1439 void
1440 cpu_error_ecache_flush(void)
1441 {
1442 	cpu_flush_ecache();
1443 }
1444 
1445 void
1446 cpu_ereport_post(struct async_flt *aflt)
1447 {
1448 	char *cpu_type, buf[FM_MAX_CLASS];
1449 	nv_alloc_t *nva = NULL;
1450 	nvlist_t *ereport, *detector, *resource;
1451 	errorq_elem_t *eqep;
1452 	char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
1453 
1454 	if (aflt->flt_panic || panicstr) {
1455 		eqep = errorq_reserve(ereport_errorq);
1456 		if (eqep == NULL)
1457 			return;
1458 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
1459 		nva = errorq_elem_nva(ereport_errorq, eqep);
1460 	} else {
1461 		ereport = fm_nvlist_create(nva);
1462 	}
1463 
1464 	/*
1465 	 * Create the scheme "cpu" FMRI.
1466 	 */
1467 	detector = fm_nvlist_create(nva);
1468 	resource = fm_nvlist_create(nva);
1469 	switch (cpunodes[aflt->flt_inst].implementation) {
1470 	case OLYMPUS_C_IMPL:
1471 		cpu_type = FM_EREPORT_CPU_SPARC64_VI;
1472 		break;
1473 	default:
1474 		cpu_type = FM_EREPORT_CPU_UNSUPPORTED;
1475 		break;
1476 	}
1477 	(void) snprintf(sbuf, sizeof (sbuf), "%llX",
1478 	    (u_longlong_t)cpunodes[aflt->flt_inst].device_id);
1479 	(void) fm_fmri_cpu_set(detector, FM_CPU_SCHEME_VERSION, NULL,
1480 	    aflt->flt_inst, (uint8_t *)&cpunodes[aflt->flt_inst].version,
1481 	    sbuf);
1482 
1483 	/*
1484 	 * Encode all the common data into the ereport.
1485 	 */
1486 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s",
1487 	    FM_ERROR_CPU, cpu_type, aflt->flt_erpt_class);
1488 
1489 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
1490 	    fm_ena_generate(aflt->flt_id, FM_ENA_FMT1), detector, NULL);
1491 
1492 	/*
1493 	 * Encode the error specific data that was saved in
1494 	 * the async_flt structure into the ereport.
1495 	 */
1496 	cpu_payload_add_aflt(aflt, ereport, resource);
1497 
1498 	if (aflt->flt_panic || panicstr) {
1499 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1500 	} else {
1501 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1502 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1503 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1504 		fm_nvlist_destroy(resource, FM_NVA_FREE);
1505 	}
1506 }
1507 
1508 void
1509 cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
1510 {
1511 	int status;
1512 	ddi_fm_error_t de;
1513 
1514 	bzero(&de, sizeof (ddi_fm_error_t));
1515 
1516 	de.fme_version = DDI_FME_VERSION;
1517 	de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
1518 	de.fme_flag = expected;
1519 	de.fme_bus_specific = (void *)aflt->flt_addr;
1520 	status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
1521 	if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL))
1522 		aflt->flt_panic = 1;
1523 }
1524 
1525 void
1526 cpu_errorq_dispatch(char *error_class, void *payload, size_t payload_sz,
1527     errorq_t *eqp, uint_t flag)
1528 {
1529 	struct async_flt *aflt = (struct async_flt *)payload;
1530 
1531 	aflt->flt_erpt_class = error_class;
1532 	errorq_dispatch(eqp, payload, payload_sz, flag);
1533 }
1534 
1535 void
1536 adjust_hw_copy_limits(int ecache_size)
1537 {
1538 	/*
1539 	 * Set hw copy limits.
1540 	 *
1541 	 * /etc/system will be parsed later and can override one or more
1542 	 * of these settings.
1543 	 *
1544 	 * At this time, ecache size seems only mildly relevant.
1545 	 * We seem to run into issues with the d-cache and stalls
1546 	 * we see on misses.
1547 	 *
1548 	 * Cycle measurement indicates that 2 byte aligned copies fare
1549 	 * little better than doing things with VIS at around 512 bytes.
1550 	 * 4 byte aligned shows promise until around 1024 bytes. 8 Byte
1551 	 * aligned is faster whenever the source and destination data
1552 	 * in cache and the total size is less than 2 Kbytes.  The 2K
1553 	 * limit seems to be driven by the 2K write cache.
1554 	 * When more than 2K of copies are done in non-VIS mode, stores
1555 	 * backup in the write cache.  In VIS mode, the write cache is
1556 	 * bypassed, allowing faster cache-line writes aligned on cache
1557 	 * boundaries.
1558 	 *
1559 	 * In addition, in non-VIS mode, there is no prefetching, so
1560 	 * for larger copies, the advantage of prefetching to avoid even
1561 	 * occasional cache misses is enough to justify using the VIS code.
1562 	 *
1563 	 * During testing, it was discovered that netbench ran 3% slower
1564 	 * when hw_copy_limit_8 was 2K or larger.  Apparently for server
1565 	 * applications, data is only used once (copied to the output
1566 	 * buffer, then copied by the network device off the system).  Using
1567 	 * the VIS copy saves more L2 cache state.  Network copies are
1568 	 * around 1.3K to 1.5K in size for historical reasons.
1569 	 *
1570 	 * Therefore, a limit of 1K bytes will be used for the 8 byte
1571 	 * aligned copy even for large caches and 8 MB ecache.  The
1572 	 * infrastructure to allow different limits for different sized
1573 	 * caches is kept to allow further tuning in later releases.
1574 	 */
1575 
1576 	if (min_ecache_size == 0 && use_hw_bcopy) {
1577 		/*
1578 		 * First time through - should be before /etc/system
1579 		 * is read.
1580 		 * Could skip the checks for zero but this lets us
1581 		 * preserve any debugger rewrites.
1582 		 */
1583 		if (hw_copy_limit_1 == 0) {
1584 			hw_copy_limit_1 = VIS_COPY_THRESHOLD;
1585 			priv_hcl_1 = hw_copy_limit_1;
1586 		}
1587 		if (hw_copy_limit_2 == 0) {
1588 			hw_copy_limit_2 = 2 * VIS_COPY_THRESHOLD;
1589 			priv_hcl_2 = hw_copy_limit_2;
1590 		}
1591 		if (hw_copy_limit_4 == 0) {
1592 			hw_copy_limit_4 = 4 * VIS_COPY_THRESHOLD;
1593 			priv_hcl_4 = hw_copy_limit_4;
1594 		}
1595 		if (hw_copy_limit_8 == 0) {
1596 			hw_copy_limit_8 = 4 * VIS_COPY_THRESHOLD;
1597 			priv_hcl_8 = hw_copy_limit_8;
1598 		}
1599 		min_ecache_size = ecache_size;
1600 	} else {
1601 		/*
1602 		 * MP initialization. Called *after* /etc/system has
1603 		 * been parsed. One CPU has already been initialized.
1604 		 * Need to cater for /etc/system having scragged one
1605 		 * of our values.
1606 		 */
1607 		if (ecache_size == min_ecache_size) {
1608 			/*
1609 			 * Same size ecache. We do nothing unless we
1610 			 * have a pessimistic ecache setting. In that
1611 			 * case we become more optimistic (if the cache is
1612 			 * large enough).
1613 			 */
1614 			if (hw_copy_limit_8 == 4 * VIS_COPY_THRESHOLD) {
1615 				/*
1616 				 * Need to adjust hw_copy_limit* from our
1617 				 * pessimistic uniprocessor value to a more
1618 				 * optimistic UP value *iff* it hasn't been
1619 				 * reset.
1620 				 */
1621 				if ((ecache_size > 1048576) &&
1622 				    (priv_hcl_8 == hw_copy_limit_8)) {
1623 					if (ecache_size <= 2097152)
1624 						hw_copy_limit_8 = 4 *
1625 						    VIS_COPY_THRESHOLD;
1626 					else if (ecache_size <= 4194304)
1627 						hw_copy_limit_8 = 4 *
1628 						    VIS_COPY_THRESHOLD;
1629 					else
1630 						hw_copy_limit_8 = 4 *
1631 						    VIS_COPY_THRESHOLD;
1632 					priv_hcl_8 = hw_copy_limit_8;
1633 				}
1634 			}
1635 		} else if (ecache_size < min_ecache_size) {
1636 			/*
1637 			 * A different ecache size. Can this even happen?
1638 			 */
1639 			if (priv_hcl_8 == hw_copy_limit_8) {
1640 				/*
1641 				 * The previous value that we set
1642 				 * is unchanged (i.e., it hasn't been
1643 				 * scragged by /etc/system). Rewrite it.
1644 				 */
1645 				if (ecache_size <= 1048576)
1646 					hw_copy_limit_8 = 8 *
1647 					    VIS_COPY_THRESHOLD;
1648 				else if (ecache_size <= 2097152)
1649 					hw_copy_limit_8 = 8 *
1650 					    VIS_COPY_THRESHOLD;
1651 				else if (ecache_size <= 4194304)
1652 					hw_copy_limit_8 = 8 *
1653 					    VIS_COPY_THRESHOLD;
1654 				else
1655 					hw_copy_limit_8 = 10 *
1656 					    VIS_COPY_THRESHOLD;
1657 				priv_hcl_8 = hw_copy_limit_8;
1658 				min_ecache_size = ecache_size;
1659 			}
1660 		}
1661 	}
1662 }
1663 
1664 #define	VIS_BLOCKSIZE		64
1665 
1666 int
1667 dtrace_blksuword32_err(uintptr_t addr, uint32_t *data)
1668 {
1669 	int ret, watched;
1670 
1671 	watched = watch_disable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
1672 	ret = dtrace_blksuword32(addr, data, 0);
1673 	if (watched)
1674 		watch_enable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
1675 
1676 	return (ret);
1677 }
1678 
1679 void
1680 opl_cpu_reg_init()
1681 {
1682 	uint64_t	this_cpu_log;
1683 
1684 	/*
1685 	 * We do not need to re-initialize cpu0 registers.
1686 	 */
1687 	if (cpu[getprocessorid()] == &cpu0)
1688 		return;
1689 
1690 	/*
1691 	 * Initialize Error log Scratch register for error handling.
1692 	 */
1693 
1694 	this_cpu_log = va_to_pa((void*)(((uint64_t)opl_err_log) +
1695 		ERRLOG_BUFSZ * (getprocessorid())));
1696 	opl_error_setup(this_cpu_log);
1697 
1698 	/*
1699 	 * Enable MMU translating multiple page sizes for
1700 	 * sITLB and sDTLB.
1701 	 */
1702 	opl_mpg_enable();
1703 }
1704 
1705 /*
1706  * Queue one event in ue_queue based on ecc_type_to_info entry.
1707  */
1708 static void
1709 cpu_queue_one_event(opl_async_flt_t *opl_flt, char *reason,
1710     ecc_type_to_info_t *eccp)
1711 {
1712 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1713 
1714 	if (reason &&
1715 	    strlen(reason) + strlen(eccp->ec_reason) < MAX_REASON_STRING) {
1716 		(void) strcat(reason, eccp->ec_reason);
1717 	}
1718 
1719 	opl_flt->flt_bit = eccp->ec_afsr_bit;
1720 	opl_flt->flt_type = eccp->ec_flt_type;
1721 	aflt->flt_in_memory = cpu_flt_in_memory(opl_flt, opl_flt->flt_bit);
1722 	aflt->flt_payload = eccp->ec_err_payload;
1723 
1724 	ASSERT(aflt->flt_status & (OPL_ECC_SYNC_TRAP|OPL_ECC_URGENT_TRAP));
1725 	cpu_errorq_dispatch(eccp->ec_err_class,
1726 		(void *)opl_flt, sizeof (opl_async_flt_t),
1727 		ue_queue,
1728 		aflt->flt_panic);
1729 }
1730 
1731 /*
1732  * Queue events on async event queue one event per error bit.
1733  * Return number of events queued.
1734  */
1735 int
1736 cpu_queue_events(opl_async_flt_t *opl_flt, char *reason, uint64_t t_afsr_errs)
1737 {
1738 	struct async_flt *aflt = (struct async_flt *)opl_flt;
1739 	ecc_type_to_info_t *eccp;
1740 	int nevents = 0;
1741 
1742 	/*
1743 	 * Queue expected errors, error bit and fault type must must match
1744 	 * in the ecc_type_to_info table.
1745 	 */
1746 	for (eccp = ecc_type_to_info; t_afsr_errs != 0 && eccp->ec_desc != NULL;
1747 		eccp++) {
1748 		if ((eccp->ec_afsr_bit & t_afsr_errs) != 0 &&
1749 		    (eccp->ec_flags & aflt->flt_status) != 0) {
1750 			/*
1751 			 * UE error event can be further
1752 			 * classified/breakdown into finer granularity
1753 			 * based on the flt_eid_mod value set by HW.  We do
1754 			 * special handling here so that we can report UE
1755 			 * error in finer granularity as ue_mem,
1756 			 * ue_channel, ue_cpu or ue_path.
1757 			 */
1758 			if (eccp->ec_flt_type == OPL_CPU_SYNC_UE) {
1759 				opl_flt->flt_eid_mod =
1760 					(aflt->flt_stat & SFSR_EID_MOD)
1761 					>> SFSR_EID_MOD_SHIFT;
1762 				opl_flt->flt_eid_sid =
1763 					(aflt->flt_stat & SFSR_EID_SID)
1764 					>> SFSR_EID_SID_SHIFT;
1765 				/*
1766 				 * Need to advance eccp pointer by flt_eid_mod
1767 				 * so that we get an appropriate ecc pointer
1768 				 *
1769 				 * EID			# of advances
1770 				 * ----------------------------------
1771 				 * OPL_ERRID_MEM	0
1772 				 * OPL_ERRID_CHANNEL	1
1773 				 * OPL_ERRID_CPU	2
1774 				 * OPL_ERRID_PATH	3
1775 				 */
1776 				eccp += opl_flt->flt_eid_mod;
1777 			}
1778 			cpu_queue_one_event(opl_flt, reason, eccp);
1779 			t_afsr_errs &= ~eccp->ec_afsr_bit;
1780 			nevents++;
1781 		}
1782 	}
1783 
1784 	return (nevents);
1785 }
1786 
1787 /*
1788  * Sync. error wrapper functions.
1789  * We use these functions in order to transfer here from the
1790  * nucleus trap handler information about trap type (data or
1791  * instruction) and trap level (0 or above 0). This way we
1792  * get rid of using SFSR's reserved bits.
1793  */
1794 
1795 #define	OPL_SYNC_TL0	0
1796 #define	OPL_SYNC_TL1	1
1797 #define	OPL_ISYNC_ERR	0
1798 #define	OPL_DSYNC_ERR	1
1799 
1800 void
1801 opl_cpu_isync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1802 {
1803 	uint64_t t_sfar = p_sfar;
1804 	uint64_t t_sfsr = p_sfsr;
1805 
1806 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1807 	    OPL_SYNC_TL0, OPL_ISYNC_ERR);
1808 }
1809 
1810 void
1811 opl_cpu_isync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1812 {
1813 	uint64_t t_sfar = p_sfar;
1814 	uint64_t t_sfsr = p_sfsr;
1815 
1816 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1817 	    OPL_SYNC_TL1, OPL_ISYNC_ERR);
1818 }
1819 
1820 void
1821 opl_cpu_dsync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1822 {
1823 	uint64_t t_sfar = p_sfar;
1824 	uint64_t t_sfsr = p_sfsr;
1825 
1826 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1827 	    OPL_SYNC_TL0, OPL_DSYNC_ERR);
1828 }
1829 
1830 void
1831 opl_cpu_dsync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
1832 {
1833 	uint64_t t_sfar = p_sfar;
1834 	uint64_t t_sfsr = p_sfsr;
1835 
1836 	opl_cpu_sync_error(rp, t_sfar, t_sfsr,
1837 	    OPL_SYNC_TL1, OPL_DSYNC_ERR);
1838 }
1839 
1840 /*
1841  * The fj sync err handler transfers control here for UE, BERR, TO, TLB_MUL
1842  * and TLB_PRT.
1843  * This function is designed based on cpu_deferred_error().
1844  */
1845 
1846 static void
1847 opl_cpu_sync_error(struct regs *rp, ulong_t t_sfar, ulong_t t_sfsr,
1848     uint_t tl, uint_t derr)
1849 {
1850 	opl_async_flt_t opl_flt;
1851 	struct async_flt *aflt;
1852 	int trampolined = 0;
1853 	char pr_reason[MAX_REASON_STRING];
1854 	uint64_t log_sfsr;
1855 	int expected = DDI_FM_ERR_UNEXPECTED;
1856 	ddi_acc_hdl_t *hp;
1857 
1858 	/*
1859 	 * We need to look at p_flag to determine if the thread detected an
1860 	 * error while dumping core.  We can't grab p_lock here, but it's ok
1861 	 * because we just need a consistent snapshot and we know that everyone
1862 	 * else will store a consistent set of bits while holding p_lock.  We
1863 	 * don't have to worry about a race because SDOCORE is set once prior
1864 	 * to doing i/o from the process's address space and is never cleared.
1865 	 */
1866 	uint_t pflag = ttoproc(curthread)->p_flag;
1867 
1868 	pr_reason[0] = '\0';
1869 
1870 	/*
1871 	 * handle the specific error
1872 	 */
1873 	bzero(&opl_flt, sizeof (opl_async_flt_t));
1874 	aflt = (struct async_flt *)&opl_flt;
1875 	aflt->flt_id = gethrtime_waitfree();
1876 	aflt->flt_bus_id = getprocessorid();
1877 	aflt->flt_inst = CPU->cpu_id;
1878 	aflt->flt_stat = t_sfsr;
1879 	aflt->flt_addr = t_sfar;
1880 	aflt->flt_pc = (caddr_t)rp->r_pc;
1881 	aflt->flt_prot = (uchar_t)AFLT_PROT_NONE;
1882 	aflt->flt_class = (uchar_t)CPU_FAULT;
1883 	aflt->flt_priv = (uchar_t)
1884 		(tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ?  1 : 0));
1885 	aflt->flt_tl = (uchar_t)tl;
1886 	aflt->flt_panic = (uchar_t)(tl != 0 || aft_testfatal != 0 ||
1887 	    (t_sfsr & (SFSR_TLB_MUL|SFSR_TLB_PRT)) != 0);
1888 	aflt->flt_core = (pflag & SDOCORE) ? 1 : 0;
1889 	aflt->flt_status = (derr) ? OPL_ECC_DSYNC_TRAP : OPL_ECC_ISYNC_TRAP;
1890 
1891 	/*
1892 	 * If SFSR.FV is not set, both SFSR and SFAR/SFPAR values are uncertain.
1893 	 * So, clear all error bits to avoid mis-handling and force the system
1894 	 * panicked.
1895 	 * We skip all the procedures below down to the panic message call.
1896 	 */
1897 	if (!(t_sfsr & SFSR_FV)) {
1898 		opl_flt.flt_type = OPL_CPU_INV_SFSR;
1899 		aflt->flt_panic = 1;
1900 		aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC;
1901 		cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR,
1902 			(void *)&opl_flt, sizeof (opl_async_flt_t), ue_queue,
1903 			aflt->flt_panic);
1904 		fm_panic("%sErrors(s)", "invalid SFSR");
1905 	}
1906 
1907 	/*
1908 	 * If either UE and MK bit is off, this is not valid UE error.
1909 	 * If it is not valid UE error, clear UE & MK_UE bits to prevent
1910 	 * mis-handling below.
1911 	 * aflt->flt_stat keeps the original bits as a reference.
1912 	 */
1913 	if ((t_sfsr & (SFSR_MK_UE|SFSR_UE)) !=
1914 	    (SFSR_MK_UE|SFSR_UE)) {
1915 		t_sfsr &= ~(SFSR_MK_UE|SFSR_UE);
1916 	}
1917 
1918 	/*
1919 	 * If the trap occurred in privileged mode at TL=0, we need to check to
1920 	 * see if we were executing in the kernel under on_trap() or t_lofault
1921 	 * protection.  If so, modify the saved registers so that we return
1922 	 * from the trap to the appropriate trampoline routine.
1923 	 */
1924 	if (!aflt->flt_panic && aflt->flt_priv && tl == 0) {
1925 		if (curthread->t_ontrap != NULL) {
1926 			on_trap_data_t *otp = curthread->t_ontrap;
1927 
1928 			if (otp->ot_prot & OT_DATA_EC) {
1929 				aflt->flt_prot = (uchar_t)AFLT_PROT_EC;
1930 				otp->ot_trap |= (ushort_t)OT_DATA_EC;
1931 				rp->r_pc = otp->ot_trampoline;
1932 				rp->r_npc = rp->r_pc + 4;
1933 				trampolined = 1;
1934 			}
1935 
1936 			if ((t_sfsr & (SFSR_TO | SFSR_BERR)) &&
1937 			    (otp->ot_prot & OT_DATA_ACCESS)) {
1938 				aflt->flt_prot = (uchar_t)AFLT_PROT_ACCESS;
1939 				otp->ot_trap |= (ushort_t)OT_DATA_ACCESS;
1940 				rp->r_pc = otp->ot_trampoline;
1941 				rp->r_npc = rp->r_pc + 4;
1942 				trampolined = 1;
1943 				/*
1944 				 * for peeks and caut_gets errors are expected
1945 				 */
1946 				hp = (ddi_acc_hdl_t *)otp->ot_handle;
1947 				if (!hp)
1948 					expected = DDI_FM_ERR_PEEK;
1949 				else if (hp->ah_acc.devacc_attr_access ==
1950 				    DDI_CAUTIOUS_ACC)
1951 					expected = DDI_FM_ERR_EXPECTED;
1952 			}
1953 
1954 		} else if (curthread->t_lofault) {
1955 			aflt->flt_prot = AFLT_PROT_COPY;
1956 			rp->r_g1 = EFAULT;
1957 			rp->r_pc = curthread->t_lofault;
1958 			rp->r_npc = rp->r_pc + 4;
1959 			trampolined = 1;
1960 		}
1961 	}
1962 
1963 	/*
1964 	 * If we're in user mode or we're doing a protected copy, we either
1965 	 * want the ASTON code below to send a signal to the user process
1966 	 * or we want to panic if aft_panic is set.
1967 	 *
1968 	 * If we're in privileged mode and we're not doing a copy, then we
1969 	 * need to check if we've trampolined.  If we haven't trampolined,
1970 	 * we should panic.
1971 	 */
1972 	if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
1973 		if (t_sfsr & (SFSR_ERRS & ~(SFSR_BERR | SFSR_TO)))
1974 			aflt->flt_panic |= aft_panic;
1975 	} else if (!trampolined) {
1976 		aflt->flt_panic = 1;
1977 	}
1978 
1979 	/*
1980 	 * If we've trampolined due to a privileged TO or BERR, or if an
1981 	 * unprivileged TO or BERR occurred, we don't want to enqueue an
1982 	 * event for that TO or BERR.  Queue all other events (if any) besides
1983 	 * the TO/BERR.
1984 	 */
1985 	log_sfsr = t_sfsr;
1986 	if (trampolined) {
1987 		log_sfsr &= ~(SFSR_TO | SFSR_BERR);
1988 	} else if (!aflt->flt_priv) {
1989 		/*
1990 		 * User mode, suppress messages if
1991 		 * cpu_berr_to_verbose is not set.
1992 		 */
1993 		if (!cpu_berr_to_verbose)
1994 			log_sfsr &= ~(SFSR_TO | SFSR_BERR);
1995 	}
1996 
1997 	if (((log_sfsr & SFSR_ERRS) &&
1998 		(cpu_queue_events(&opl_flt, pr_reason, t_sfsr) == 0)) ||
1999 	    ((t_sfsr & SFSR_ERRS) == 0)) {
2000 		opl_flt.flt_type = OPL_CPU_INV_SFSR;
2001 		aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC;
2002 		cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR,
2003 			(void *)&opl_flt, sizeof (opl_async_flt_t), ue_queue,
2004 			aflt->flt_panic);
2005 	}
2006 
2007 	if (t_sfsr & (SFSR_UE|SFSR_TO|SFSR_BERR)) {
2008 		cpu_run_bus_error_handlers(aflt, expected);
2009 	}
2010 
2011 	/*
2012 	 * Panic here if aflt->flt_panic has been set.  Enqueued errors will
2013 	 * be logged as part of the panic flow.
2014 	 */
2015 	if (aflt->flt_panic) {
2016 		if (pr_reason[0] == 0)
2017 			strcpy(pr_reason, "invalid SFSR ");
2018 
2019 		fm_panic("%sErrors(s)", pr_reason);
2020 	}
2021 
2022 	/*
2023 	 * If we queued an error and we are going to return from the trap and
2024 	 * the error was in user mode or inside of a copy routine, set AST flag
2025 	 * so the queue will be drained before returning to user mode.  The
2026 	 * AST processing will also act on our failure policy.
2027 	 */
2028 	if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
2029 		int pcb_flag = 0;
2030 
2031 		if (t_sfsr & (SFSR_ERRS &
2032 			~(SFSR_BERR | SFSR_TO)))
2033 			pcb_flag |= ASYNC_HWERR;
2034 
2035 		if (t_sfsr & SFSR_BERR)
2036 			pcb_flag |= ASYNC_BERR;
2037 
2038 		if (t_sfsr & SFSR_TO)
2039 			pcb_flag |= ASYNC_BTO;
2040 
2041 		ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
2042 		aston(curthread);
2043 	}
2044 }
2045 
2046 /*ARGSUSED*/
2047 void
2048 opl_cpu_urgent_error(struct regs *rp, ulong_t p_ugesr, ulong_t tl)
2049 {
2050 	opl_async_flt_t opl_flt;
2051 	struct async_flt *aflt;
2052 	char pr_reason[MAX_REASON_STRING];
2053 
2054 	/* normalize tl */
2055 	tl = (tl >= 2 ? 1 : 0);
2056 	pr_reason[0] = '\0';
2057 
2058 	bzero(&opl_flt, sizeof (opl_async_flt_t));
2059 	aflt = (struct async_flt *)&opl_flt;
2060 	aflt->flt_id = gethrtime_waitfree();
2061 	aflt->flt_bus_id = getprocessorid();
2062 	aflt->flt_inst = CPU->cpu_id;
2063 	aflt->flt_stat = p_ugesr;
2064 	aflt->flt_pc = (caddr_t)rp->r_pc;
2065 	aflt->flt_class = (uchar_t)CPU_FAULT;
2066 	aflt->flt_tl = tl;
2067 	aflt->flt_priv = (uchar_t)
2068 		(tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ?  1 : 0));
2069 	aflt->flt_status = OPL_ECC_URGENT_TRAP;
2070 	aflt->flt_panic = 1;
2071 	/*
2072 	 * HW does not set mod/sid in case of urgent error.
2073 	 * So we have to set it here.
2074 	 */
2075 	opl_flt.flt_eid_mod = OPL_ERRID_CPU;
2076 	opl_flt.flt_eid_sid = aflt->flt_inst;
2077 
2078 	if (cpu_queue_events(&opl_flt, pr_reason, p_ugesr) == 0) {
2079 		opl_flt.flt_type = OPL_CPU_INV_UGESR;
2080 		aflt->flt_payload = FM_EREPORT_PAYLOAD_URGENT;
2081 		cpu_errorq_dispatch(FM_EREPORT_CPU_INV_URG,
2082 			(void *)&opl_flt, sizeof (opl_async_flt_t),
2083 			ue_queue, aflt->flt_panic);
2084 	}
2085 
2086 	fm_panic("Urgent Error");
2087 }
2088 
2089 /*
2090  * Initialization error counters resetting.
2091  */
2092 /* ARGSUSED */
2093 static void
2094 opl_ras_online(void *arg, cpu_t *cp, cyc_handler_t *hdlr, cyc_time_t *when)
2095 {
2096 	hdlr->cyh_func = (cyc_func_t)ras_cntr_reset;
2097 	hdlr->cyh_level = CY_LOW_LEVEL;
2098 	hdlr->cyh_arg = (void *)(uintptr_t)cp->cpu_id;
2099 
2100 	when->cyt_when = cp->cpu_id * (((hrtime_t)NANOSEC * 10)/ NCPU);
2101 	when->cyt_interval = (hrtime_t)NANOSEC * opl_async_check_interval;
2102 }
2103 
2104 void
2105 cpu_mp_init(void)
2106 {
2107 	cyc_omni_handler_t hdlr;
2108 
2109 	hdlr.cyo_online = opl_ras_online;
2110 	hdlr.cyo_offline = NULL;
2111 	hdlr.cyo_arg = NULL;
2112 	mutex_enter(&cpu_lock);
2113 	(void) cyclic_add_omni(&hdlr);
2114 	mutex_exit(&cpu_lock);
2115 }
2116 
2117 /*ARGSUSED*/
2118 void
2119 mmu_init_kernel_pgsz(struct hat *hat)
2120 {
2121 }
2122 
2123 size_t
2124 mmu_get_kernel_lpsize(size_t lpsize)
2125 {
2126 	uint_t tte;
2127 
2128 	if (lpsize == 0) {
2129 		/* no setting for segkmem_lpsize in /etc/system: use default */
2130 		return (MMU_PAGESIZE4M);
2131 	}
2132 
2133 	for (tte = TTE8K; tte <= TTE4M; tte++) {
2134 		if (lpsize == TTEBYTES(tte))
2135 			return (lpsize);
2136 	}
2137 
2138 	return (TTEBYTES(TTE8K));
2139 }
2140 
2141 /*
2142  * The following are functions that are unused in
2143  * OPL cpu module. They are defined here to resolve
2144  * dependencies in the "unix" module.
2145  * Unused functions that should never be called in
2146  * OPL are coded with ASSERT(0).
2147  */
2148 
2149 void
2150 cpu_disable_errors(void)
2151 {}
2152 
2153 void
2154 cpu_enable_errors(void)
2155 { ASSERT(0); }
2156 
2157 /*ARGSUSED*/
2158 void
2159 cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t t)
2160 { ASSERT(0); }
2161 
2162 /*ARGSUSED*/
2163 void
2164 cpu_faulted_enter(struct cpu *cp)
2165 {}
2166 
2167 /*ARGSUSED*/
2168 void
2169 cpu_faulted_exit(struct cpu *cp)
2170 {}
2171 
2172 /*ARGSUSED*/
2173 void
2174 cpu_check_allcpus(struct async_flt *aflt)
2175 {}
2176 
2177 /*ARGSUSED*/
2178 void
2179 cpu_ce_log_err(struct async_flt *aflt, errorq_elem_t *t)
2180 { ASSERT(0); }
2181 
2182 /*ARGSUSED*/
2183 void
2184 cpu_check_ce(int flag, uint64_t pa, caddr_t va, uint_t psz)
2185 { ASSERT(0); }
2186 
2187 /*ARGSUSED*/
2188 void
2189 cpu_ce_count_unum(struct async_flt *ecc, int len, char *unum)
2190 { ASSERT(0); }
2191 
2192 /*ARGSUSED*/
2193 void
2194 cpu_busy_ecache_scrub(struct cpu *cp)
2195 {}
2196 
2197 /*ARGSUSED*/
2198 void
2199 cpu_idle_ecache_scrub(struct cpu *cp)
2200 {}
2201 
2202 /* ARGSUSED */
2203 void
2204 cpu_change_speed(uint64_t divisor, uint64_t arg2)
2205 { ASSERT(0); }
2206 
2207 void
2208 cpu_init_cache_scrub(void)
2209 {}
2210 
2211 /* ARGSUSED */
2212 int
2213 cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
2214 {
2215 	if (&plat_get_mem_sid) {
2216 		return (plat_get_mem_sid(unum, buf, buflen, lenp));
2217 	} else {
2218 		return (ENOTSUP);
2219 	}
2220 }
2221 
2222 /* ARGSUSED */
2223 int
2224 cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
2225 {
2226 	if (&plat_get_mem_addr) {
2227 		return (plat_get_mem_addr(unum, sid, offset, addrp));
2228 	} else {
2229 		return (ENOTSUP);
2230 	}
2231 }
2232 
2233 /* ARGSUSED */
2234 int
2235 cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp)
2236 {
2237 	if (&plat_get_mem_offset) {
2238 		return (plat_get_mem_offset(flt_addr, offp));
2239 	} else {
2240 		return (ENOTSUP);
2241 	}
2242 }
2243 
2244 /*ARGSUSED*/
2245 void
2246 itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
2247 { ASSERT(0); }
2248 
2249 /*ARGSUSED*/
2250 void
2251 dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
2252 { ASSERT(0); }
2253 
2254 /*ARGSUSED*/
2255 void
2256 read_ecc_data(struct async_flt *aflt, short verbose, short ce_err)
2257 { ASSERT(0); }
2258 
2259 /*ARGSUSED*/
2260 int
2261 ce_scrub_xdiag_recirc(struct async_flt *aflt, errorq_t *eqp,
2262     errorq_elem_t *eqep, size_t afltoffset)
2263 {
2264 	ASSERT(0);
2265 	return (0);
2266 }
2267 
2268 /*ARGSUSED*/
2269 char *
2270 flt_to_error_type(struct async_flt *aflt)
2271 {
2272 	ASSERT(0);
2273 	return (NULL);
2274 }
2275