xref: /titanic_41/usr/src/uts/sun4u/cpu/opl_olympus_asm.s (revision 524e558aae3e99de2bdab73592f925ea489fbe07)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Assembly code support for the Olympus-C module
26 */
27
28#pragma ident	"%Z%%M%	%I%	%E% SMI"
29
30#if !defined(lint)
31#include "assym.h"
32#endif	/* lint */
33
34#include <sys/asm_linkage.h>
35#include <sys/mmu.h>
36#include <vm/hat_sfmmu.h>
37#include <sys/machparam.h>
38#include <sys/machcpuvar.h>
39#include <sys/machthread.h>
40#include <sys/machtrap.h>
41#include <sys/privregs.h>
42#include <sys/asm_linkage.h>
43#include <sys/trap.h>
44#include <sys/opl_olympus_regs.h>
45#include <sys/opl_module.h>
46#include <sys/xc_impl.h>
47#include <sys/intreg.h>
48#include <sys/async.h>
49#include <sys/clock.h>
50#include <sys/cmpregs.h>
51
52#ifdef TRAPTRACE
53#include <sys/traptrace.h>
54#endif /* TRAPTRACE */
55
56/*
57 * Macro that flushes the entire Ecache.
58 *
59 * arg1 = ecache size
60 * arg2 = ecache linesize
61 * arg3 = ecache flush address - Not used for olympus-C
62 */
63#define	ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1)				\
64	mov	ASI_L2_CTRL_U2_FLUSH, arg1;				\
65	mov	ASI_L2_CTRL_RW_ADDR, arg2;				\
66	stxa	arg1, [arg2]ASI_L2_CTRL
67
68/*
69 * SPARC64-VI MMU and Cache operations.
70 */
71
72#if defined(lint)
73
74/* ARGSUSED */
75void
76vtag_flushpage(caddr_t vaddr, u_int ctxnum)
77{}
78
79#else	/* lint */
80
81	ENTRY_NP(vtag_flushpage)
82	/*
83	 * flush page from the tlb
84	 *
85	 * %o0 = vaddr
86	 * %o1 = ctxnum
87	 */
88	rdpr	%pstate, %o5
89#ifdef DEBUG
90	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
91	bnz,a,pt %icc, 3f			/* disabled, panic	 */
92	  nop
93	save	%sp, -SA(MINFRAME), %sp
94	sethi	%hi(sfmmu_panic1), %o0
95	call	panic
96	  or	%o0, %lo(sfmmu_panic1), %o0
97	ret
98	restore
993:
100#endif /* DEBUG */
101	/*
102	 * disable ints
103	 */
104	andn	%o5, PSTATE_IE, %o4
105	wrpr	%o4, 0, %pstate
106
107	/*
108	 * Then, blow out the tlb
109	 * Interrupts are disabled to prevent the primary ctx register
110	 * from changing underneath us.
111	 */
112	brnz,pt	%o1, 1f			/* KCONTEXT? */
113	sethi	%hi(FLUSH_ADDR), %o3
114	/*
115	 * For KCONTEXT demaps use primary. type = page implicitly
116	 */
117	stxa	%g0, [%o0]ASI_DTLB_DEMAP	/* dmmu flush for KCONTEXT */
118	stxa	%g0, [%o0]ASI_ITLB_DEMAP	/* immu flush for KCONTEXT */
119	flush	%o3
120	b	5f
121	nop
1221:
123	/*
124	 * User demap.  We need to set the primary context properly.
125	 * Secondary context cannot be used for SPARC64-VI IMMU.
126	 * %o0 = vaddr
127	 * %o1 = ctxnum
128	 * %o3 = FLUSH_ADDR
129	 */
130	sethi	%hi(ctx_pgsz_array), %o4
131	ldn	[%o4 + %lo(ctx_pgsz_array)], %o4
132	ldub	[%o4 + %o1], %o4
133	sll	%o4, CTXREG_EXT_SHIFT, %o4
134	or	%o1, %o4, %o1
135	wrpr	%g0, 1, %tl
136	set	MMU_PCONTEXT, %o4
137	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
138	ldxa	[%o4]ASI_DMMU, %o2		/* rd old ctxnum */
139	stxa	%o1, [%o4]ASI_DMMU		/* wr new ctxum */
1404:
141	stxa	%g0, [%o0]ASI_DTLB_DEMAP
142	stxa	%g0, [%o0]ASI_ITLB_DEMAP
143	stxa	%o2, [%o4]ASI_DMMU		/* restore old ctxnum */
144	flush	%o3
145	wrpr	%g0, 0, %tl
1465:
147	retl
148	wrpr	%g0, %o5, %pstate		/* enable interrupts */
149	SET_SIZE(vtag_flushpage)
150
151#endif	/* lint */
152
153
154#if defined(lint)
155
156/* ARGSUSED */
157void
158vtag_flushctx(u_int ctxnum)
159{}
160
161#else	/* lint */
162
163	ENTRY_NP(vtag_flushctx)
164	/*
165	 * flush context from the tlb
166	 *
167	 * %o0 = ctxnum
168	 * We disable interrupts to prevent the primary ctx register changing
169	 * underneath us.
170	 */
171	sethi	%hi(FLUSH_ADDR), %o3
172	rdpr	%pstate, %o2
173
174#ifdef DEBUG
175	andcc	%o2, PSTATE_IE, %g0		/* if interrupts already */
176	bnz,a,pt %icc, 1f			/* disabled, panic	 */
177	  nop
178	sethi	%hi(sfmmu_panic1), %o0
179	call	panic
180	  or	%o0, %lo(sfmmu_panic1), %o0
1811:
182#endif /* DEBUG */
183
184	sethi	%hi(ctx_pgsz_array), %o4
185	ldn	[%o4 + %lo(ctx_pgsz_array)], %o4
186	ldub	[%o4 + %o0], %o4
187	sll	%o4, CTXREG_EXT_SHIFT, %o4
188	or	%o0, %o4, %o0
189	wrpr	%o2, PSTATE_IE, %pstate		/* disable interrupts */
190	set	MMU_PCONTEXT, %o4
191	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g1
192	wrpr	%g0, 1, %tl
193	ldxa	[%o4]ASI_DMMU, %o5		/* rd old ctxnum */
194	stxa	%o0, [%o4]ASI_DMMU		/* wr new ctxum */
1954:
196	stxa	%g0, [%g1]ASI_DTLB_DEMAP
197	stxa	%g0, [%g1]ASI_ITLB_DEMAP
198	stxa	%o5, [%o4]ASI_DMMU		/* restore old ctxnum */
199	flush	%o3
200	wrpr	%g0, 0, %tl
2015:
202	retl
203	wrpr	%g0, %o2, %pstate		/* enable interrupts */
204	SET_SIZE(vtag_flushctx)
205
206#endif	/* lint */
207
208
209#if defined(lint)
210
211void
212vtag_flushall(void)
213{}
214
215#else	/* lint */
216
217	ENTRY_NP2(vtag_flushall, demap_all)
218	/*
219	 * flush the tlb
220	 */
221	sethi	%hi(FLUSH_ADDR), %o3
222	set	DEMAP_ALL_TYPE, %g1
223	stxa	%g0, [%g1]ASI_DTLB_DEMAP
224	stxa	%g0, [%g1]ASI_ITLB_DEMAP
225	flush	%o3
226	retl
227	nop
228	SET_SIZE(demap_all)
229	SET_SIZE(vtag_flushall)
230
231#endif	/* lint */
232
233
234#if defined(lint)
235
236/* ARGSUSED */
237void
238vtag_flushpage_tl1(uint64_t vaddr, uint64_t ctxnum)
239{}
240
241#else	/* lint */
242
243	ENTRY_NP(vtag_flushpage_tl1)
244	/*
245	 * x-trap to flush page from tlb and tsb
246	 *
247	 * %g1 = vaddr, zero-extended on 32-bit kernel
248	 * %g2 = ctxnum
249	 *
250	 * assumes TSBE_TAG = 0
251	 */
252	srln	%g1, MMU_PAGESHIFT, %g1
253	brnz,pt %g2, 1f					/* KCONTEXT */
254	slln	%g1, MMU_PAGESHIFT, %g1			/* g1 = vaddr */
255
256	/* We need to demap in the kernel context */
257	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
258	stxa	%g0, [%g1]ASI_DTLB_DEMAP
259	stxa	%g0, [%g1]ASI_ITLB_DEMAP
260	retry
2611:
262	/* We need to demap in a user context */
263	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
264	sethi	%hi(ctx_pgsz_array), %g4
265	ldn	[%g4 + %lo(ctx_pgsz_array)], %g4
266	ldub	[%g4 + %g2], %g4
267	sll	%g4, CTXREG_EXT_SHIFT, %g4
268	or	%g2, %g4, %g2
269
270	set	MMU_PCONTEXT, %g4
271	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
272	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
273	stxa	%g0, [%g1]ASI_DTLB_DEMAP
274	stxa	%g0, [%g1]ASI_ITLB_DEMAP
275	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
276	retry
277	SET_SIZE(vtag_flushpage_tl1)
278
279#endif	/* lint */
280
281#if defined(lint)
282
283/* ARGSUSED */
284void
285vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t ctx_pgcnt)
286{}
287
288#else	/* lint */
289
290	ENTRY_NP(vtag_flush_pgcnt_tl1)
291	/*
292	 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
293	 *
294	 * %g1 = vaddr, zero-extended on 32-bit kernel
295	 * %g2 = <zero32|ctx16|pgcnt16>
296	 *
297	 * NOTE: this handler relies on the fact that no
298	 *	interrupts or traps can occur during the loop
299	 *	issuing the TLB_DEMAP operations. It is assumed
300	 *	that interrupts are disabled and this code is
301	 *	fetching from the kernel locked text address.
302	 *
303	 * assumes TSBE_TAG = 0
304	 */
305	set	0xffff, %g4
306	and	%g4, %g2, %g3			/* g3 = pgcnt */
307	srln	%g2, 16, %g2			/* g2 = ctxnum */
308	srln	%g1, MMU_PAGESHIFT, %g1
309	brnz,pt	%g2, 1f				/* KCONTEXT? */
310	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
311
312	/* We need to demap in the kernel context */
313	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
314	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
3154:
316	stxa	%g0, [%g1]ASI_DTLB_DEMAP
317	stxa	%g0, [%g1]ASI_ITLB_DEMAP
318	deccc	%g3				/* decr pgcnt */
319	bnz,pt	%icc,4b
320	  add	%g1, %g2, %g1			/* next page */
321	retry
3221:
323	/* We need to demap in a user context */
324	sethi	%hi(ctx_pgsz_array), %g4
325	ldn	[%g4 + %lo(ctx_pgsz_array)], %g4
326	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
327	ldub	[%g4 + %g2], %g4
328	sll	%g4, CTXREG_EXT_SHIFT, %g4
329	or	%g2, %g4, %g2
330
331	set	MMU_PCONTEXT, %g4
332	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
333	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
334
335	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
3363:
337	stxa	%g0, [%g1]ASI_DTLB_DEMAP
338	stxa	%g0, [%g1]ASI_ITLB_DEMAP
339	deccc	%g3				/* decr pgcnt */
340	bnz,pt	%icc,3b
341	  add	%g1, %g2, %g1			/* next page */
342
343	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
344	retry
345	SET_SIZE(vtag_flush_pgcnt_tl1)
346
347#endif	/* lint */
348
349
350#if defined(lint)
351
352/* ARGSUSED */
353void
354vtag_flushctx_tl1(uint64_t ctxnum, uint64_t dummy)
355{}
356
357#else	/* lint */
358
359	ENTRY_NP(vtag_flushctx_tl1)
360	/*
361	 * x-trap to flush context from tlb
362	 *
363	 * %g1 = ctxnum
364	 */
365	sethi	%hi(ctx_pgsz_array), %g4
366	ldn	[%g4 + %lo(ctx_pgsz_array)], %g4
367	ldub	[%g4 + %g1], %g4
368	sll	%g4, CTXREG_EXT_SHIFT, %g4
369	or	%g1, %g4, %g1
370	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g4
371	set	MMU_PCONTEXT, %g3
372	ldxa	[%g3]ASI_DMMU, %g5		/* rd old ctxnum */
373	stxa	%g1, [%g3]ASI_DMMU		/* wr new ctxum */
374	stxa	%g0, [%g4]ASI_DTLB_DEMAP
375	stxa	%g0, [%g4]ASI_ITLB_DEMAP
376	stxa	%g5, [%g3]ASI_DMMU		/* restore old ctxnum */
377	retry
378	SET_SIZE(vtag_flushctx_tl1)
379
380#endif	/* lint */
381
382
383#if defined(lint)
384
385/*ARGSUSED*/
386void
387vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
388{}
389
390#else	/* lint */
391
392	ENTRY_NP(vtag_flushall_tl1)
393	/*
394	 * x-trap to flush tlb
395	 */
396	set	DEMAP_ALL_TYPE, %g4
397	stxa	%g0, [%g4]ASI_DTLB_DEMAP
398	stxa	%g0, [%g4]ASI_ITLB_DEMAP
399	retry
400	SET_SIZE(vtag_flushall_tl1)
401
402#endif	/* lint */
403
404
405/*
406 * VAC (virtual address conflict) does not apply to OPL.
407 * VAC resolution is managed by the Olympus processor hardware.
408 * As a result, all OPL VAC flushing routines are no-ops.
409 */
410
411#if defined(lint)
412
413/* ARGSUSED */
414void
415vac_flushpage(pfn_t pfnum, int vcolor)
416{}
417
418#else	/* lint */
419
420	ENTRY(vac_flushpage)
421	retl
422	  nop
423	SET_SIZE(vac_flushpage)
424
425#endif	/* lint */
426
427#if defined(lint)
428
429/* ARGSUSED */
430void
431vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
432{}
433
434#else	/* lint */
435
436	ENTRY_NP(vac_flushpage_tl1)
437	retry
438	SET_SIZE(vac_flushpage_tl1)
439
440#endif	/* lint */
441
442
443#if defined(lint)
444
445/* ARGSUSED */
446void
447vac_flushcolor(int vcolor, pfn_t pfnum)
448{}
449
450#else	/* lint */
451
452	ENTRY(vac_flushcolor)
453	retl
454	 nop
455	SET_SIZE(vac_flushcolor)
456
457#endif  /* lint */
458
459
460
461#if defined(lint)
462
463/* ARGSUSED */
464void
465vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
466{}
467
468#else	/* lint */
469
470	ENTRY(vac_flushcolor_tl1)
471	retry
472	SET_SIZE(vac_flushcolor_tl1)
473
474#endif	/* lint */
475
476#if defined(lint)
477
478int
479idsr_busy(void)
480{
481	return (0);
482}
483
484#else	/* lint */
485
486/*
487 * Determine whether or not the IDSR is busy.
488 * Entry: no arguments
489 * Returns: 1 if busy, 0 otherwise
490 */
491	ENTRY(idsr_busy)
492	ldxa	[%g0]ASI_INTR_DISPATCH_STATUS, %g1
493	clr	%o0
494	btst	IDSR_BUSY, %g1
495	bz,a,pt	%xcc, 1f
496	mov	1, %o0
4971:
498	retl
499	nop
500	SET_SIZE(idsr_busy)
501
502#endif	/* lint */
503
504#if defined(lint)
505
506/* ARGSUSED */
507void
508init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
509{}
510
511/* ARGSUSED */
512void
513init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
514{}
515
516#else	/* lint */
517
518	.global _dispatch_status_busy
519_dispatch_status_busy:
520	.asciz	"ASI_INTR_DISPATCH_STATUS error: busy"
521	.align	4
522
523/*
524 * Setup interrupt dispatch data registers
525 * Entry:
526 *	%o0 - function or inumber to call
527 *	%o1, %o2 - arguments (2 uint64_t's)
528 */
529	.seg "text"
530
531	ENTRY(init_mondo)
532#ifdef DEBUG
533	!
534	! IDSR should not be busy at the moment
535	!
536	ldxa	[%g0]ASI_INTR_DISPATCH_STATUS, %g1
537	btst	IDSR_BUSY, %g1
538	bz,pt	%xcc, 1f
539	nop
540	sethi	%hi(_dispatch_status_busy), %o0
541	call	panic
542	or	%o0, %lo(_dispatch_status_busy), %o0
543#endif /* DEBUG */
544
545	ALTENTRY(init_mondo_nocheck)
546	!
547	! interrupt vector dispatch data reg 0
548	!
5491:
550	mov	IDDR_0, %g1
551	mov	IDDR_1, %g2
552	mov	IDDR_2, %g3
553	stxa	%o0, [%g1]ASI_INTR_DISPATCH
554
555	!
556	! interrupt vector dispatch data reg 1
557	!
558	stxa	%o1, [%g2]ASI_INTR_DISPATCH
559
560	!
561	! interrupt vector dispatch data reg 2
562	!
563	stxa	%o2, [%g3]ASI_INTR_DISPATCH
564
565	membar	#Sync
566	retl
567	nop
568	SET_SIZE(init_mondo_nocheck)
569	SET_SIZE(init_mondo)
570
571#endif	/* lint */
572
573
574#if defined(lint)
575
576/* ARGSUSED */
577void
578shipit(int upaid, int bn)
579{ return; }
580
581#else	/* lint */
582
583/*
584 * Ship mondo to aid using busy/nack pair bn
585 */
586	ENTRY_NP(shipit)
587	sll	%o0, IDCR_PID_SHIFT, %g1	! IDCR<23:14> = agent id
588	sll	%o1, IDCR_BN_SHIFT, %g2		! IDCR<28:24> = b/n pair
589	or	%g1, IDCR_OFFSET, %g1		! IDCR<13:0> = 0x70
590	or	%g1, %g2, %g1
591	stxa	%g0, [%g1]ASI_INTR_DISPATCH	! interrupt vector dispatch
592	membar	#Sync
593	retl
594	nop
595	SET_SIZE(shipit)
596
597#endif	/* lint */
598
599
600#if defined(lint)
601
602/* ARGSUSED */
603void
604flush_instr_mem(caddr_t vaddr, size_t len)
605{}
606
607#else	/* lint */
608
609/*
610 * flush_instr_mem:
611 *	Flush 1 page of the I-$ starting at vaddr
612 * 	%o0 vaddr
613 *	%o1 bytes to be flushed
614 *
615 * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
616 * the stores from all processors so that a FLUSH instruction is only needed
617 * to ensure pipeline is consistent. This means a single flush is sufficient at
618 * the end of a sequence of stores that updates the instruction stream to
619 * ensure correct operation.
620 */
621
622	ENTRY(flush_instr_mem)
623	flush	%o0			! address irrelevent
624	retl
625	nop
626	SET_SIZE(flush_instr_mem)
627
628#endif	/* lint */
629
630
631/*
632 * flush_ecache:
633 *	%o0 - 64 bit physical address
634 *	%o1 - ecache size
635 *	%o2 - ecache linesize
636 */
637#if defined(lint)
638
639/*ARGSUSED*/
640void
641flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize)
642{}
643
644#else /* !lint */
645
646	ENTRY(flush_ecache)
647
648	/*
649	 * Flush the entire Ecache.
650	 */
651	ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
652	retl
653	nop
654	SET_SIZE(flush_ecache)
655
656#endif /* lint */
657
658#if defined(lint)
659
660/*ARGSUSED*/
661void
662kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size,
663    int icache_lsize)
664{
665}
666
667#else	/* lint */
668
669	/*
670	 * I/D cache flushing is not needed for OPL processors
671	 */
672	ENTRY(kdi_flush_idcache)
673	retl
674	nop
675	SET_SIZE(kdi_flush_idcache)
676
677#endif	/* lint */
678
679#ifdef	TRAPTRACE
680/*
681 * Simplified trap trace macro for OPL. Adapted from us3.
682 */
683#define	OPL_TRAPTRACE(ptr, scr1, scr2, label)			\
684	CPU_INDEX(scr1, ptr);					\
685	sll	scr1, TRAPTR_SIZE_SHIFT, scr1;			\
686	set	trap_trace_ctl, ptr;				\
687	add	ptr, scr1, scr1;				\
688	ld	[scr1 + TRAPTR_LIMIT], ptr;			\
689	tst	ptr;						\
690	be,pn	%icc, label/**/1;				\
691	 ldx	[scr1 + TRAPTR_PBASE], ptr;			\
692	ld	[scr1 + TRAPTR_OFFSET], scr1;			\
693	add	ptr, scr1, ptr;					\
694	rd	%asi, scr2;					\
695	wr	%g0, TRAPTR_ASI, %asi;				\
696	rd	STICK, scr1;					\
697	stxa    scr1, [ptr + TRAP_ENT_TICK]%asi;		\
698	rdpr	%tl, scr1;					\
699	stha    scr1, [ptr + TRAP_ENT_TL]%asi;			\
700	rdpr	%tt, scr1;					\
701	stha	scr1, [ptr + TRAP_ENT_TT]%asi;			\
702	rdpr	%tpc, scr1;					\
703	stna    scr1, [ptr + TRAP_ENT_TPC]%asi;			\
704	rdpr	%tstate, scr1;					\
705	stxa	scr1, [ptr + TRAP_ENT_TSTATE]%asi;		\
706	stna    %sp, [ptr + TRAP_ENT_SP]%asi;			\
707	stna    %g0, [ptr + TRAP_ENT_TR]%asi;			\
708	stna    %g0, [ptr + TRAP_ENT_F1]%asi;			\
709	stna    %g0, [ptr + TRAP_ENT_F2]%asi;			\
710	stna    %g0, [ptr + TRAP_ENT_F3]%asi;			\
711	stna    %g0, [ptr + TRAP_ENT_F4]%asi;			\
712	wr	%g0, scr2, %asi;				\
713	CPU_INDEX(ptr, scr1);					\
714	sll	ptr, TRAPTR_SIZE_SHIFT, ptr;			\
715	set	trap_trace_ctl, scr1;				\
716	add	scr1, ptr, ptr;					\
717	ld	[ptr + TRAPTR_OFFSET], scr1;			\
718	ld	[ptr + TRAPTR_LIMIT], scr2;			\
719	st	scr1, [ptr + TRAPTR_LAST_OFFSET];		\
720	add	scr1, TRAP_ENT_SIZE, scr1;			\
721	sub	scr2, TRAP_ENT_SIZE, scr2;			\
722	cmp	scr1, scr2;					\
723	movge	%icc, 0, scr1;					\
724	st	scr1, [ptr + TRAPTR_OFFSET];			\
725label/**/1:
726#endif	/* TRAPTRACE */
727
728
729
730/*
731 * Macros facilitating error handling.
732 */
733
734/*
735 * Save alternative global registers reg1, reg2, reg3
736 * to scratchpad registers 1, 2, 3 respectively.
737 */
738#define	OPL_SAVE_GLOBAL(reg1, reg2, reg3)	\
739	stxa	reg1, [%g0]ASI_SCRATCHPAD		;\
740	mov	OPL_SCRATCHPAD_SAVE_AG2, reg1	;\
741	stxa	reg2, [reg1]ASI_SCRATCHPAD		;\
742	mov	OPL_SCRATCHPAD_SAVE_AG3, reg1	;\
743	stxa	reg3, [reg1]ASI_SCRATCHPAD
744
745/*
746 * Restore alternative global registers reg1, reg2, reg3
747 * from scratchpad registers 1, 2, 3 respectively.
748 */
749#define	OPL_RESTORE_GLOBAL(reg1, reg2, reg3)			\
750	mov	OPL_SCRATCHPAD_SAVE_AG3, reg1			;\
751	ldxa	[reg1]ASI_SCRATCHPAD, reg3				;\
752	mov	OPL_SCRATCHPAD_SAVE_AG2, reg1			;\
753	ldxa	[reg1]ASI_SCRATCHPAD, reg2				;\
754	ldxa	[%g0]ASI_SCRATCHPAD, reg1
755
756/*
757 * Logs value `val' into the member `offset' of a structure
758 * at physical address `pa'
759 */
760#define	LOG_REG(pa, offset, val)				\
761	add	pa, offset, pa					;\
762	stxa	val, [pa]ASI_MEM
763
764#define	FLUSH_ALL_TLB(tmp1)					\
765	set	DEMAP_ALL_TYPE, tmp1				;\
766	stxa	%g0, [tmp1]ASI_ITLB_DEMAP			;\
767	stxa	%g0, [tmp1]ASI_DTLB_DEMAP			;\
768	sethi	%hi(FLUSH_ADDR), tmp1				;\
769	flush	tmp1
770
771/*
772 * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
773 * scratch register by zeroing all other fields. Result is in pa.
774 */
775#define	LOG_ADDR(pa)							\
776	mov	OPL_SCRATCHPAD_ERRLOG, pa				;\
777	ldxa	[pa]ASI_SCRATCHPAD, pa					;\
778	sllx	pa, 64-ERRLOG_REG_EIDR_SHIFT, pa			;\
779	srlx	pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa	;\
780	sllx	pa, ERRLOG_REG_ERR_SHIFT, pa
781
782/*
783 * Advance the per-cpu error log buffer pointer to the next
784 * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
785 * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
786 * unused input registers for this macro.
787 *
788 * Algorithm:
789 * 1. logpa = contents of errorlog scratchpad register
790 * 2. bufmask = ERRLOG_BUFSIZ - 1
791 * 3. tmp = logpa & ~(bufmask)     (tmp is now logbase)
792 * 4. logpa += ERRLOG_SZ
793 * 5. logpa = logpa & bufmask      (get new offset to logbase)
794 * 4. logpa = tmp | logpa
795 * 7. write logpa back into errorlog scratchpad register
796 *
797 * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
798 *
799 */
800#define	UPDATE_LOGADD(logpa, bufmask, tmp)			\
801	set	OPL_SCRATCHPAD_ERRLOG, tmp			;\
802	ldxa	[tmp]ASI_SCRATCHPAD, logpa				;\
803	set	(ERRLOG_BUFSZ-1), bufmask			;\
804	andn	logpa, bufmask, tmp				;\
805	add	logpa, ERRLOG_SZ, logpa				;\
806	and	logpa, bufmask, logpa				;\
807	or	tmp, logpa, logpa				;\
808	set	OPL_SCRATCHPAD_ERRLOG, tmp			;\
809	stxa	logpa, [tmp]ASI_SCRATCHPAD
810
811/* Log error status registers into the log buffer */
812#define	LOG_SYNC_REG(sfsr, sfar, tmp)				\
813	LOG_ADDR(tmp)						;\
814	LOG_REG(tmp, LOG_SFSR_OFF, sfsr)			;\
815	LOG_ADDR(tmp)						;\
816	mov	tmp, sfsr					;\
817	LOG_REG(tmp, LOG_SFAR_OFF, sfar)			;\
818	rd	STICK, sfar					;\
819	mov	sfsr, tmp					;\
820	LOG_REG(tmp, LOG_STICK_OFF, sfar)			;\
821	rdpr	%tl, tmp					;\
822	sllx	tmp, 32, sfar					;\
823	rdpr	%tt, tmp					;\
824	or	sfar, tmp, sfar					;\
825	mov	sfsr, tmp					;\
826	LOG_REG(tmp, LOG_TL_OFF, sfar)				;\
827	set	OPL_SCRATCHPAD_ERRLOG, tmp			;\
828	ldxa	[tmp]ASI_SCRATCHPAD, sfar				;\
829	mov	sfsr, tmp					;\
830	LOG_REG(tmp, LOG_ASI3_OFF, sfar)			;\
831	rdpr	%tpc, sfar					;\
832	mov	sfsr, tmp					;\
833	LOG_REG(tmp, LOG_TPC_OFF, sfar)				;\
834	UPDATE_LOGADD(sfsr, sfar, tmp)
835
836#define	LOG_UGER_REG(uger, tmp, tmp2)				\
837	LOG_ADDR(tmp)						;\
838	mov	tmp, tmp2					;\
839	LOG_REG(tmp2, LOG_UGER_OFF, uger)			;\
840	mov	tmp, uger					;\
841	rd	STICK, tmp2					;\
842	LOG_REG(tmp, LOG_STICK_OFF, tmp2)			;\
843	rdpr	%tl, tmp					;\
844	sllx	tmp, 32, tmp2					;\
845	rdpr	%tt, tmp					;\
846	or	tmp2, tmp, tmp2					;\
847	mov	uger, tmp					;\
848	LOG_REG(tmp, LOG_TL_OFF, tmp2)				;\
849	set	OPL_SCRATCHPAD_ERRLOG, tmp2			;\
850	ldxa	[tmp2]ASI_SCRATCHPAD, tmp2				;\
851	mov	uger, tmp					;\
852	LOG_REG(tmp, LOG_ASI3_OFF, tmp2)			;\
853	rdpr	%tstate, tmp2					;\
854	mov	uger, tmp					;\
855	LOG_REG(tmp, LOG_TSTATE_OFF, tmp2)			;\
856	rdpr	%tpc, tmp2					;\
857	mov	uger, tmp					;\
858	LOG_REG(tmp, LOG_TPC_OFF, tmp2)				;\
859	UPDATE_LOGADD(uger, tmp, tmp2)
860
861/*
862 * Scrub the STICK_COMPARE register to clear error by updating
863 * it to a reasonable value for interrupt generation.
864 * Ensure that we observe the CPU_ENABLE flag so that we
865 * don't accidentally enable TICK interrupt in STICK_COMPARE
866 * i.e. no clock interrupt will be generated if CPU_ENABLE flag
867 * is off.
868 */
869#define	UPDATE_STICK_COMPARE(tmp1, tmp2)			\
870	CPU_ADDR(tmp1, tmp2)					;\
871	lduh	[tmp1 + CPU_FLAGS], tmp2			;\
872	andcc	tmp2, CPU_ENABLE, %g0 				;\
873	set	OPL_UGER_STICK_DIFF, tmp2			;\
874	rd	STICK, tmp1					;\
875	add	tmp1, tmp2, tmp1				;\
876	mov	1, tmp2						;\
877	sllx	tmp2, TICKINT_DIS_SHFT, tmp2			;\
878	or	tmp1, tmp2, tmp2				;\
879	movnz	%xcc, tmp1, tmp2				;\
880	wr	tmp2, %g0, STICK_COMPARE
881
882/*
883 * Reset registers that may be corrupted by IAUG_CRE error.
884 * To update interrupt handling related registers force the
885 * clock interrupt.
886 */
887#define	IAG_CRE(tmp1, tmp2)					\
888	set	OPL_SCRATCHPAD_ERRLOG, tmp1			;\
889	ldxa	[tmp1]ASI_SCRATCHPAD, tmp1				;\
890	srlx	tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1		;\
891	set	ERRLOG_REG_EIDR_MASK, tmp2			;\
892	and	tmp1, tmp2, tmp1				;\
893	stxa	tmp1, [%g0]ASI_EIDR				;\
894	wr	%g0, 0, SOFTINT					;\
895	sethi	%hi(hres_last_tick), tmp1			;\
896	ldx	[tmp1 + %lo(hres_last_tick)], tmp1		;\
897	set	OPL_UGER_STICK_DIFF, tmp2			;\
898	add	tmp1, tmp2, tmp1				;\
899	wr	tmp1, %g0, STICK				;\
900	UPDATE_STICK_COMPARE(tmp1, tmp2)
901
902
903#define	CLEAR_FPREGS(tmp)					\
904	wr	%g0, FPRS_FEF, %fprs				;\
905	wr	%g0, %g0, %gsr					;\
906	sethi	%hi(opl_clr_freg), tmp				;\
907	or	tmp, %lo(opl_clr_freg), tmp			;\
908	ldx	[tmp], %fsr					;\
909	fzero	 %d0						;\
910	fzero	 %d2						;\
911	fzero	 %d4						;\
912	fzero	 %d6						;\
913	fzero	 %d8						;\
914	fzero	 %d10						;\
915	fzero	 %d12						;\
916	fzero	 %d14						;\
917	fzero	 %d16						;\
918	fzero	 %d18						;\
919	fzero	 %d20						;\
920	fzero	 %d22						;\
921	fzero	 %d24						;\
922	fzero	 %d26						;\
923	fzero	 %d28						;\
924	fzero	 %d30						;\
925	fzero	 %d32						;\
926	fzero	 %d34						;\
927	fzero	 %d36						;\
928	fzero	 %d38						;\
929	fzero	 %d40						;\
930	fzero	 %d42						;\
931	fzero	 %d44						;\
932	fzero	 %d46						;\
933	fzero	 %d48						;\
934	fzero	 %d50						;\
935	fzero	 %d52						;\
936	fzero	 %d54						;\
937	fzero	 %d56						;\
938	fzero	 %d58						;\
939	fzero	 %d60						;\
940	fzero	 %d62						;\
941	wr	%g0, %g0, %fprs
942
943#define	CLEAR_GLOBALS()						\
944	mov	%g0, %g1					;\
945	mov	%g0, %g2					;\
946	mov	%g0, %g3					;\
947	mov	%g0, %g4					;\
948	mov	%g0, %g5					;\
949	mov	%g0, %g6					;\
950	mov	%g0, %g7
951
952/*
953 * We do not clear the alternative globals here because they
954 * are scratch registers, i.e. there is no code that reads from
955 * them without write to them firstly. In other words every
956 * read always follows write that makes extra write to the
957 * alternative globals unnecessary.
958 */
959#define	CLEAR_GEN_REGS(tmp1, label)				\
960	set	TSTATE_KERN, tmp1				;\
961	wrpr	%g0, tmp1, %tstate				;\
962	mov	%g0, %y						;\
963	mov	%g0, %asi					;\
964	mov	%g0, %ccr					;\
965	mov	%g0, %l0					;\
966	mov	%g0, %l1					;\
967	mov	%g0, %l2					;\
968	mov	%g0, %l3					;\
969	mov	%g0, %l4					;\
970	mov	%g0, %l5					;\
971	mov	%g0, %l6					;\
972	mov	%g0, %l7					;\
973	mov	%g0, %i0					;\
974	mov	%g0, %i1					;\
975	mov	%g0, %i2					;\
976	mov	%g0, %i3					;\
977	mov	%g0, %i4					;\
978	mov	%g0, %i5					;\
979	mov	%g0, %i6					;\
980	mov	%g0, %i7					;\
981	mov	%g0, %o1					;\
982	mov	%g0, %o2					;\
983	mov	%g0, %o3					;\
984	mov	%g0, %o4					;\
985	mov	%g0, %o5					;\
986	mov	%g0, %o6					;\
987	mov	%g0, %o7					;\
988	mov	%g0, %o0					;\
989	mov	%g0, %g4					;\
990	mov	%g0, %g5					;\
991	mov	%g0, %g6					;\
992	mov	%g0, %g7					;\
993	rdpr	%tl, tmp1					;\
994	cmp	tmp1, 1						;\
995	be,pt	%xcc, label/**/1				;\
996	 rdpr	%pstate, tmp1					;\
997	wrpr	tmp1, PSTATE_AG|PSTATE_IG, %pstate		;\
998	CLEAR_GLOBALS()						;\
999	rdpr	%pstate, tmp1					;\
1000	wrpr	tmp1, PSTATE_IG|PSTATE_MG, %pstate		;\
1001	CLEAR_GLOBALS()						;\
1002	rdpr	%pstate, tmp1					;\
1003	wrpr	tmp1, PSTATE_MG|PSTATE_AG, %pstate		;\
1004	ba,pt	%xcc, label/**/2				;\
1005	 nop							;\
1006label/**/1:							;\
1007	wrpr	tmp1, PSTATE_AG, %pstate			;\
1008	CLEAR_GLOBALS()						;\
1009	rdpr	%pstate, tmp1					;\
1010	wrpr	tmp1, PSTATE_AG, %pstate			;\
1011label/**/2:
1012
1013
1014/*
1015 * Reset all window related registers
1016 */
1017#define	RESET_WINREG(tmp)					\
1018	sethi	%hi(nwin_minus_one), tmp			;\
1019	ld	[tmp + %lo(nwin_minus_one)], tmp		;\
1020	wrpr	%g0, tmp, %cwp					;\
1021	wrpr	%g0, tmp, %cleanwin				;\
1022	sub	tmp, 1, tmp					;\
1023	wrpr	%g0, tmp, %cansave				;\
1024	wrpr	%g0, %g0, %canrestore				;\
1025	wrpr	%g0, %g0, %otherwin				;\
1026	wrpr	%g0, PIL_MAX, %pil				;\
1027	wrpr	%g0, WSTATE_KERN, %wstate
1028
1029
1030#define	RESET_PREV_TSTATE(tmp1, tmp2, label)			\
1031	rdpr	%tl, tmp1					;\
1032	subcc	tmp1, 1, tmp1					;\
1033	bz,pt	%xcc, label/**/1				;\
1034	 nop							;\
1035	wrpr	tmp1, %g0, %tl					;\
1036	set	TSTATE_KERN, tmp2				;\
1037	wrpr	tmp2, %g0, %tstate				;\
1038	wrpr	%g0, %g0, %tpc					;\
1039	wrpr	%g0, %g0, %tnpc					;\
1040	add	tmp1, 1, tmp1					;\
1041	wrpr	tmp1, %g0, %tl					;\
1042label/**/1:
1043
1044
1045/*
1046 * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
1047 * and we reset these regiseter here.
1048 */
1049#define	RESET_CUR_TSTATE(tmp)					\
1050	set	TSTATE_KERN, tmp				;\
1051	wrpr	%g0, tmp, %tstate				;\
1052	wrpr	%g0, 0, %tpc					;\
1053	wrpr	%g0, 0, %tnpc					;\
1054	RESET_WINREG(tmp)
1055
1056/*
1057 * In case of urgent errors some MMU registers may be
1058 * corrupted, so we set here some reasonable values for
1059 * them.
1060 */
1061
1062#if !defined(lint)
1063#define	RESET_MMU_REGS(tmp1, tmp2, tmp3)			\
1064	set	MMU_PCONTEXT, tmp1				;\
1065	stxa	%g0, [tmp1]ASI_DMMU				;\
1066	set	MMU_SCONTEXT, tmp1				;\
1067	stxa	%g0, [tmp1]ASI_DMMU				;\
1068	sethi	%hi(ktsb_base), tmp1				;\
1069	ldx	[tmp1 + %lo(ktsb_base)], tmp2			;\
1070	mov	MMU_TSB, tmp3					;\
1071	stxa	tmp2, [tmp3]ASI_IMMU				;\
1072	stxa	tmp2, [tmp3]ASI_DMMU				;\
1073	membar	#Sync
1074
1075#define	RESET_TSB_TAGPTR(tmp)					\
1076	set	MMU_TAG_ACCESS, tmp				;\
1077	stxa	%g0, [tmp]ASI_IMMU				;\
1078	stxa	%g0, [tmp]ASI_DMMU				;\
1079	membar	#Sync
1080#endif /* lint */
1081
1082/*
1083 * RESET_TO_PRIV()
1084 *
1085 * In many cases, we need to force the thread into privilege mode because
1086 * privilege mode is only thing in which the system continue to work
1087 * due to undeterminable user mode information that come from register
1088 * corruption.
1089 *
1090 *  - opl_uger_ctxt
1091 *    If the error is secondary TSB related register parity, we have no idea
1092 *    what value is supposed to be for it.
1093 *
1094 *  The below three cases %tstate is not accessible until it is overwritten
1095 *  with some value, so we have no clue if the thread was running on user mode
1096 *  or not
1097 *   - opl_uger_pstate
1098 *     If the error is %pstate parity, it propagates to %tstate.
1099 *   - opl_uger_tstate
1100 *     No need to say the reason
1101 *   - opl_uger_r
1102 *     If the error is %ccr or %asi parity, it propagates to %tstate
1103 *
1104 * For the above four cases, user mode info may not be available for
1105 * sys_trap() and user_trap() to work consistently. So we have to force
1106 * the thread into privilege mode.
1107 *
1108 * Forcing the thread to privilege mode requires forcing
1109 * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
1110 * %g7 will be set in user_trap(). Also since the %sp may be in
1111 * an inconsistent state, we need to do a stack reset and switch to
1112 * something we know i.e. current thread's kernel stack.
1113 * We also reset the window registers and MMU registers just to
1114 * make sure.
1115 *
1116 * To set regular %g7, we need to clear PSTATE_AG bit and need to
1117 * use one local register. Note that we are panicking and will never
1118 * unwind back so it is ok to clobber a local.
1119 *
1120 * If the thread was running in user mode, the %tpc value itself might be
1121 * within the range of OBP addresses. %tpc must be forced to be zero to prevent
1122 * sys_trap() from going to prom_trap()
1123 *
1124 */
1125#define	RESET_TO_PRIV(tmp, tmp1, tmp2, local)			\
1126	RESET_WINREG(tmp)					;\
1127	RESET_MMU_REGS(tmp, tmp1, tmp2)				;\
1128	CPU_ADDR(tmp, tmp1)					;\
1129	ldx	[tmp + CPU_THREAD], local			;\
1130	ldx	[local + T_STACK], tmp				;\
1131	sub	tmp, STACK_BIAS, %sp				;\
1132	rdpr	%pstate, tmp					;\
1133	wrpr	tmp, PSTATE_AG, %pstate				;\
1134	mov	local, %g7					;\
1135	rdpr	%pstate, local					;\
1136	wrpr	local, PSTATE_AG, %pstate			;\
1137	wrpr	%g0, 1, %tl					;\
1138	set	TSTATE_KERN, tmp				;\
1139	rdpr	%cwp, tmp1					;\
1140	or	tmp, tmp1, tmp					;\
1141	wrpr	tmp, %g0, %tstate				;\
1142	wrpr	%g0, %tpc
1143
1144
1145#if defined(lint)
1146
1147void
1148ce_err(void)
1149{}
1150
1151#else	/* lint */
1152
1153/*
1154 * We normally don't expect CE traps since we disable the
1155 * 0x63 trap reporting at the start of day. There is a
1156 * small window before we disable them, so let check for
1157 * it. Otherwise, panic.
1158 */
1159
1160	.align	128
1161	ENTRY_NP(ce_err)
1162	mov	AFSR_ECR, %g1
1163	ldxa	[%g1]ASI_ECR, %g1
1164	andcc	%g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
1165	bz,pn	%xcc, 1f
1166	 nop
1167	retry
11681:
1169	/*
1170	 * We did disabled the 0x63 trap reporting.
1171	 * This shouldn't happen - panic.
1172	 */
1173	set	trap, %g1
1174	rdpr	%tt, %g3
1175	sethi	%hi(sys_trap), %g5
1176	jmp	%g5 + %lo(sys_trap)
1177	sub	%g0, 1, %g4
1178	SET_SIZE(ce_err)
1179
1180#endif	/* lint */
1181
1182
1183#if defined(lint)
1184
1185void
1186ce_err_tl1(void)
1187{}
1188
1189#else	/* lint */
1190
1191/*
1192 * We don't use trap for CE detection.
1193 */
1194	ENTRY_NP(ce_err_tl1)
1195	set	trap, %g1
1196	rdpr	%tt, %g3
1197	sethi	%hi(sys_trap), %g5
1198	jmp	%g5 + %lo(sys_trap)
1199	sub	%g0, 1, %g4
1200	SET_SIZE(ce_err_tl1)
1201
1202#endif	/* lint */
1203
1204
1205#if defined(lint)
1206
1207void
1208async_err(void)
1209{}
1210
1211#else	/* lint */
1212
1213/*
1214 * async_err is the default handler for IAE/DAE traps.
1215 * For OPL, we patch in the right handler at start of day.
1216 * But if a IAE/DAE trap get generated before the handler
1217 * is patched, panic.
1218 */
1219	ENTRY_NP(async_err)
1220	set	trap, %g1
1221	rdpr	%tt, %g3
1222	sethi	%hi(sys_trap), %g5
1223	jmp	%g5 + %lo(sys_trap)
1224	sub	%g0, 1, %g4
1225	SET_SIZE(async_err)
1226
1227#endif	/* lint */
1228
1229#if defined(lint)
1230void
1231opl_sync_trap(void)
1232{}
1233#else	/* lint */
1234
1235	.seg	".data"
1236	.global	opl_clr_freg
1237	.global opl_cpu0_err_log
1238
1239	.align	16
1240opl_clr_freg:
1241	.word	0
1242	.align	16
1243
1244	.align	MMU_PAGESIZE
1245opl_cpu0_err_log:
1246	.skip	MMU_PAGESIZE
1247
1248/*
1249 * Common synchronous error trap handler (tt=0xA, 0x32)
1250 * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1251 * The error handling can be best summarized as follows:
1252 * 0. Do TRAPTRACE if enabled.
1253 * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1254 * 2. The SFSR register is read and verified as valid by checking
1255 *    SFSR.FV bit being set. If the SFSR.FV is not set, the
1256 *    error cases cannot be decoded/determined and the SFPAR
1257 *    register that contain the physical faultaddr is also
1258 *    not valid. Also the SPFAR is only valid for UE/TO/BERR error
1259 *    cases. Assuming the SFSR.FV is valid:
1260 *    - BERR(bus error)/TO(timeout)/UE case
1261 *      If any of these error cases are detected, read the SFPAR
1262 *      to get the faultaddress. Generate ereport.
1263 *    - TLB Parity case (only recoverable case)
1264 *      For DAE, read SFAR for the faultaddress. For IAE,
1265 *	use %tpc for faultaddress (SFAR is not valid in IAE)
1266 *	Flush all the tlbs.
1267 *	Subtract one from the recoverable error count stored in
1268 *	the error log scratch register. If the threshold limit
1269 *	is reached (zero) - generate ereport. Else
1270 *	restore globals and retry (no ereport is generated).
1271 *    - TLB Multiple hits
1272 *	For DAE, read SFAR for the faultaddress. For IAE,
1273 *	use %tpc for faultaddress (SFAR is not valid in IAE).
1274 *	Flush all tlbs and generate ereport.
1275 * 3. TL=0 and TL>0 considerations
1276 *    - Since both TL=0 & TL>1 traps are made to vector into
1277 *      the same handler, the underlying assumption/design here is
1278 *      that any nested error condition (if happens) occurs only
1279 *	in the handler and the system is assumed to eventually
1280 *      Red-mode. With this philosophy in mind, the recoverable
1281 *      TLB Parity error case never check the TL level before it
1282 *      retry. Note that this is ok for the TL>1 case (assuming we
1283 *	don't have a nested error) since we always save the globals
1284 *      %g1, %g2 & %g3 whenever we enter this trap handler.
1285 *    - Additional TL=0 vs TL>1 handling includes:
1286 *      - For UE error occuring under TL>1, special handling
1287 *        is added to prevent the unlikely chance of a cpu-lockup
1288 *        when a UE was originally detected in user stack and
1289 *        the spill trap handler taken from sys_trap() so happened
1290 *        to reference the same UE location. Under the above
1291 *        condition (TL>1 and UE error), paranoid code is added
1292 *        to reset window regs so that spill traps can't happen
1293 *        during the unwind back to TL=0 handling.
1294 *        Note that we can do that because we are not returning
1295 *	  back.
1296 * 4. Ereport generation.
1297 *    - Ereport generation is performed when we unwind to the TL=0
1298 *      handling code via sys_trap(). on_trap()/lofault protection
1299 *      will apply there.
1300 *
1301 */
1302	ENTRY_NP(opl_sync_trap)
1303#ifdef	TRAPTRACE
1304	OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb)
1305	rdpr	%tt, %g1
1306#endif	/* TRAPTRACE */
1307	cmp	%g1, T_INSTR_ERROR
1308	bne,pt	%xcc, 0f
1309	 mov	MMU_SFSR, %g3
1310	ldxa	[%g3]ASI_IMMU, %g1	! IAE trap case tt = 0xa
1311	andcc	%g1, SFSR_FV, %g0
1312	bz,a,pn %xcc, 2f		! Branch if SFSR is invalid and
1313	 rdpr	%tpc, %g2		! use %tpc for faultaddr instead
1314
1315	sethi	%hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1316	andcc	%g1, %g3, %g0		! Check for UE/BERR/TO errors
1317	bz,a,pt %xcc, 1f		! Branch if not UE/BERR/TO and
1318	 rdpr	%tpc, %g2		! use %tpc as faultaddr
1319	set	OPL_MMU_SFPAR, %g3	! In the UE/BERR/TO cases, use
1320	ba,pt	%xcc, 2f		! SFPAR as faultaddr
1321	 ldxa	[%g3]ASI_IMMU, %g2
13220:
1323	ldxa	[%g3]ASI_DMMU, %g1	! DAE trap case tt = 0x32
1324	andcc	%g1, SFSR_FV, %g0
1325	bnz,pt  %xcc, 7f		! branch if SFSR.FV is valid
1326	 mov	MMU_SFAR, %g2		! set %g2 to use SFAR
1327	ba,pt	%xcc, 2f		! SFSR.FV is not valid, read SFAR
1328	 ldxa	[%g2]ASI_DMMU, %g2	! for faultaddr
13297:
1330	sethi  %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1331	andcc	%g1, %g3, %g0		! Check UE/BERR/TO for valid SFPAR
1332	movnz	%xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for
1333	ldxa	[%g2]ASI_DMMU, %g2	! faultaddr
13341:
1335	sethi	%hi(SFSR_TLB_PRT), %g3
1336	andcc	%g1, %g3, %g0
1337	bz,pt	%xcc, 8f		! branch for TLB multi-hit check
1338	 nop
1339	/*
1340	 * This is the TLB parity error case and it is the
1341	 * only retryable error case.
1342	 * Only %g1, %g2 and %g3 are allowed
1343	 */
1344	FLUSH_ALL_TLB(%g3)
1345	set	OPL_SCRATCHPAD_ERRLOG, %g3
1346	ldxa	[%g3]ASI_SCRATCHPAD, %g3		! Read errlog scratchreg
1347	and	%g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count
1348	subcc	%g3, 1, %g0			! Subtract one from the count
1349	bz,pn	%xcc, 2f		! too many TLB parity errs in a certain
1350	 nop				! period, branch to generate ereport
1351	LOG_SYNC_REG(%g1, %g2, %g3)	! Record into the error log
1352	set	OPL_SCRATCHPAD_ERRLOG, %g3
1353	ldxa	[%g3]ASI_SCRATCHPAD, %g2
1354	sub	%g2, 1, %g2		! decrement error counter by 1
1355	stxa	%g2, [%g3]ASI_SCRATCHPAD	! update the errlog scratchreg
1356	OPL_RESTORE_GLOBAL(%g1, %g2, %g3)
1357	retry
13588:
1359	sethi	%hi(SFSR_TLB_MUL), %g3
1360	andcc	%g1, %g3, %g0
1361	bz,pt	%xcc, 2f		! check for the TLB multi-hit errors
1362	 nop
1363	FLUSH_ALL_TLB(%g3)
13642:
1365	/*
1366	 * non-retryable error handling
1367	 * now we can use other registers since
1368	 * we will not be returning back
1369	 */
1370	mov	%g1, %g5		! %g5 = SFSR
1371	mov	%g2, %g6		! %g6 = SFPAR or SFAR/tpc
1372	LOG_SYNC_REG(%g1, %g2, %g3)	! Record into the error log
1373
1374	/*
1375	 * Special case for UE on user stack.
1376	 * There is a possibility that the same error may come back here
1377	 * by touching the same UE in spill trap handler taken from
1378	 * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
1379	 * Conditions for this handling this case are:
1380	 * - SFSR_FV is valid and SFSR_UE is set
1381	 * - we are at TL > 1
1382	 * If the above conditions are true,  we force %cansave to be a
1383	 * big number to prevent spill trap in sys_trap(). Note that
1384	 * we will not be returning back.
1385	 */
1386	rdpr	%tt, %g4		! %g4 == ttype
1387	rdpr	%tl, %g1		! %g1 == tl
1388	cmp	%g1, 1			! Check if TL == 1
1389	be,pt	%xcc, 3f		! branch if we came from TL=0
1390	 nop
1391	andcc	%g5, SFSR_FV, %g0	! see if SFSR.FV is valid
1392	bz,pn	%xcc, 4f		! branch, checking UE is meaningless
1393	sethi	%hi(SFSR_UE), %g2
1394	andcc	%g5, %g2, %g0		! check for UE
1395	bz,pt	%xcc, 4f		! branch if not UE
1396	 nop
1397	RESET_WINREG(%g1)		! reset windows to prevent spills
13984:
1399	mov	%g5, %g3		! pass SFSR to the 3rd arg
1400	mov	%g6, %g2		! pass SFAR to the 2nd arg
1401	set	opl_cpu_isync_tl1_error, %g1
1402	set	opl_cpu_dsync_tl1_error, %g6
1403	cmp	%g4, T_INSTR_ERROR
1404	movne	%icc, %g6, %g1
1405	ba,pt	%icc, 6f
1406	nop
14073:
1408	mov	%g5, %g3		! pass SFSR to the 3rd arg
1409	mov	%g6, %g2		! pass SFAR to the 2nd arg
1410	set	opl_cpu_isync_tl0_error, %g1
1411	set	opl_cpu_dsync_tl0_error, %g6
1412	cmp	%g4, T_INSTR_ERROR
1413	movne	%icc, %g6, %g1
14146:
1415	sethi	%hi(sys_trap), %g5
1416	jmp	%g5 + %lo(sys_trap)
1417	 mov	PIL_15, %g4
1418	SET_SIZE(opl_sync_trap)
1419#endif	/* lint */
1420
1421#if defined(lint)
1422void
1423opl_uger_trap(void)
1424{}
1425#else	/* lint */
1426/*
1427 * Common Urgent error trap handler (tt=0x40)
1428 * All TL=0 and TL>0 0x40 traps vector to this handler.
1429 * The error handling can be best summarized as follows:
1430 * 1. Read the Urgent error status register (UGERSR)
1431 *    Faultaddress is N/A here and it is not collected.
1432 * 2. Check to see if we have a multiple errors case
1433 *    If so, we enable WEAK_ED (weak error detection) bit
1434 *    to prevent any potential error storms and branch directly
1435 *    to generate ereport. (we don't decode/handle individual
1436 *    error cases when we get a multiple error situation)
1437 * 3. Now look for the recoverable error cases which include
1438 *    IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1439 *    recoverable errors are detected, do the following:
1440 *    - Flush all tlbs.
1441 *    - Verify that we came from TL=0, if not, generate
1442 *      ereport. Note that the reason we don't recover
1443 *      at TL>0 is because the AGs might be corrupted or
1444 *      inconsistent. We can't save/restore them into
1445 *      the scratchpad regs like we did for opl_sync_trap().
1446 *    - Check the INSTEND[5:4] bits in the UGERSR. If the
1447 *      value is 0x3 (11b), this error is not recoverable.
1448 *      Generate ereport.
1449 *    - Subtract one from the recoverable error count stored in
1450 *      the error log scratch register. If the threshold limit
1451 *      is reached (zero) - generate ereport.
1452 *    - If the count is within the limit, update the count
1453 *      in the error log register (subtract one). Log the error
1454 *      info in the log buffer. Capture traptrace if enabled.
1455 *      Retry (no ereport generated)
1456 * 4. The rest of the error cases are unrecoverable and will
1457 *    be handled according (flushing regs, etc as required).
1458 *    For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
1459 *    consult the OPL cpu/mem philosophy doc.
1460 *    Ereport will be generated for these errors.
1461 * 5. Ereport generation.
1462 *    - Ereport generation for urgent error trap always
1463 *      result in a panic when we unwind to the TL=0 handling
1464 *      code via sys_trap(). on_trap()/lofault protection do
1465 *      not apply there.
1466 */
1467	ENTRY_NP(opl_uger_trap)
1468	set	ASI_UGERSR, %g2
1469	ldxa	[%g2]ASI_AFSR, %g1		! Read the UGERSR reg
1470
1471	set	UGESR_MULTI, %g2
1472	andcc	%g1, %g2, %g0			! Check for Multi-errs
1473	bz,pt	%xcc, opl_uger_is_recover	! branch if not Multi-errs
1474	 nop
1475	set	AFSR_ECR, %g2
1476	ldxa	[%g2]ASI_AFSR, %g3		! Enable Weak error
1477	or	%g3, ASI_ECR_WEAK_ED, %g3	! detect mode to prevent
1478	stxa	%g3, [%g2]ASI_AFSR		! potential error storms
1479	ba	%xcc, opl_uger_panic1
1480	 nop
1481
1482opl_uger_is_recover:
1483	set	UGESR_CAN_RECOVER, %g2		! Check for recoverable
1484	andcc	%g1, %g2, %g0			! errors i.e.IUG_DTLB,
1485	bz,pt	%xcc, opl_uger_cre		! IUG_ITLB or COREERR
1486	 nop
1487
1488	/*
1489	 * Fall thru to handle recoverable case
1490	 * Need to do the following additional checks to determine
1491	 * if this is indeed recoverable.
1492	 * 1. Error trap came from TL=0 and
1493	 * 2. INSTEND[5:4] bits in UGERSR is not 0x3
1494	 * 3. Recoverable error count limit not reached
1495	 *
1496	 */
1497	FLUSH_ALL_TLB(%g3)
1498	rdpr	%tl, %g3		! Read TL
1499	cmp	%g3, 1			! Check if we came from TL=0
1500	bne,pt	%xcc, opl_uger_panic	! branch if came from TL>0
1501	 nop
1502	srlx	%g1, 4, %g2		! shift INSTEND[5:4] -> [1:0]
1503	and	%g2, 3, %g2		! extract the shifted [1:0] bits
1504	cmp	%g2, 3			! check if INSTEND is recoverable
1505	be,pt   %xcc, opl_uger_panic	! panic if ([1:0] = 11b)
1506	 nop
1507	set	OPL_SCRATCHPAD_ERRLOG, %g3
1508	ldxa	[%g3]ASI_SCRATCHPAD, %g2		! Read errlog scratch reg
1509	and	%g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and
1510	subcc	%g3, 1, %g3			! subtract one from it
1511	bz,pt   %xcc, opl_uger_panic	! If count reached zero, too many
1512	 nop				! errors, branch to generate ereport
1513	sub	%g2, 1, %g2			! Subtract one from the count
1514	set	OPL_SCRATCHPAD_ERRLOG, %g3	! and write back the updated
1515	stxa	%g2, [%g3]ASI_SCRATCHPAD		! count into the errlog reg
1516	LOG_UGER_REG(%g1, %g2, %g3)		! Log the error info
1517#ifdef	TRAPTRACE
1518	OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb)
1519#endif	/* TRAPTRACE */
1520	retry					! retry - no ereport
1521
1522	/*
1523	 * Process the rest of the unrecoverable error cases
1524	 * All error cases below ultimately branch to either
1525	 * opl_uger_panic or opl_uger_panic1.
1526	 * opl_uger_panic1 is the same as opl_uger_panic except
1527	 * for the additional execution of the RESET_TO_PRIV()
1528	 * macro that does a heavy handed reset. Read the
1529	 * comments for RESET_TO_PRIV() macro for more info.
1530	 */
1531opl_uger_cre:
1532	set	UGESR_IAUG_CRE, %g2
1533	andcc	%g1, %g2, %g0
1534	bz,pt	%xcc, opl_uger_ctxt
1535	 nop
1536	IAG_CRE(%g2, %g3)
1537	set	AFSR_ECR, %g2
1538	ldxa	[%g2]ASI_AFSR, %g3
1539	or	%g3, ASI_ECR_WEAK_ED, %g3
1540	stxa	%g3, [%g2]ASI_AFSR
1541	ba	%xcc, opl_uger_panic
1542	 nop
1543
1544opl_uger_ctxt:
1545	set	UGESR_IAUG_TSBCTXT, %g2
1546	andcc	%g1, %g2, %g0
1547	bz,pt	%xcc, opl_uger_tsbp
1548	 nop
1549	RESET_MMU_REGS(%g2, %g3, %g4)
1550	ba	%xcc, opl_uger_panic
1551	 nop
1552
1553opl_uger_tsbp:
1554	set	UGESR_IUG_TSBP, %g2
1555	andcc	%g1, %g2, %g0
1556	bz,pt	%xcc, opl_uger_pstate
1557	 nop
1558	RESET_TSB_TAGPTR(%g2)
1559	ba	%xcc, opl_uger_panic
1560	 nop
1561
1562opl_uger_pstate:
1563	set	UGESR_IUG_PSTATE, %g2
1564	andcc	%g1, %g2, %g0
1565	bz,pt	%xcc, opl_uger_tstate
1566	 nop
1567	RESET_CUR_TSTATE(%g2)
1568	ba	%xcc, opl_uger_panic1
1569	 nop
1570
1571opl_uger_tstate:
1572	set	UGESR_IUG_TSTATE, %g2
1573	andcc	%g1, %g2, %g0
1574	bz,pt	%xcc, opl_uger_f
1575	 nop
1576	RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1)
1577	ba	%xcc, opl_uger_panic1
1578	 nop
1579
1580opl_uger_f:
1581	set	UGESR_IUG_F, %g2
1582	andcc	%g1, %g2, %g0
1583	bz,pt	%xcc, opl_uger_r
1584	 nop
1585	CLEAR_FPREGS(%g2)
1586	ba	%xcc, opl_uger_panic
1587	 nop
1588
1589opl_uger_r:
1590	set	UGESR_IUG_R, %g2
1591	andcc	%g1, %g2, %g0
1592	bz,pt	%xcc, opl_uger_panic1
1593	 nop
1594	CLEAR_GEN_REGS(%g2, opl_uger_r_1)
1595	ba	%xcc, opl_uger_panic1
1596	 nop
1597
1598opl_uger_panic:
1599	mov	%g1, %g2			! %g2 = arg #1
1600	LOG_UGER_REG(%g1, %g3, %g4)
1601	ba	%xcc, opl_uger_panic_cmn
1602	 nop
1603
1604opl_uger_panic1:
1605	mov	%g1, %g2			! %g2 = arg #1
1606	LOG_UGER_REG(%g1, %g3, %g4)
1607	RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1608
1609	/*
1610	 * Set up the argument for sys_trap.
1611	 * %g2 = arg #1 already set above
1612	 */
1613opl_uger_panic_cmn:
1614	rdpr	%tl, %g3			! arg #2
1615	set	opl_cpu_urgent_error, %g1	! pc
1616	sethi	%hi(sys_trap), %g5
1617	jmp	%g5 + %lo(sys_trap)
1618	 mov	PIL_15, %g4
1619	SET_SIZE(opl_uger_trap)
1620#endif	/* lint */
1621
1622#if defined(lint)
1623
1624void
1625opl_serr_instr(void)
1626{}
1627
1628#else	/* lint */
1629/*
1630 * The actual trap handler for tt=0x0a, and tt=0x32
1631 */
1632	ENTRY_NP(opl_serr_instr)
1633	OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1634	sethi   %hi(opl_sync_trap), %g3
1635	jmp	%g3 + %lo(opl_sync_trap)
1636	 rdpr    %tt, %g1
1637	.align  32
1638	SET_SIZE(opl_serr_instr)
1639
1640#endif	/* lint */
1641
1642#if defined(lint)
1643
1644void
1645opl_ugerr_instr(void)
1646{}
1647
1648#else	/* lint */
1649/*
1650 * The actual trap handler for tt=0x40
1651 */
1652	ENTRY_NP(opl_ugerr_instr)
1653	sethi   %hi(opl_uger_trap), %g3
1654	jmp	%g3 + %lo(opl_uger_trap)
1655	 nop
1656	.align  32
1657	SET_SIZE(opl_ugerr_instr)
1658
1659#endif	/* lint */
1660
1661#if defined(lint)
1662/*
1663 *  Get timestamp (stick).
1664 */
1665/* ARGSUSED */
1666void
1667stick_timestamp(int64_t *ts)
1668{
1669}
1670
1671#else	/* lint */
1672
1673	ENTRY_NP(stick_timestamp)
1674	rd	STICK, %g1	! read stick reg
1675	sllx	%g1, 1, %g1
1676	srlx	%g1, 1, %g1	! clear npt bit
1677
1678	retl
1679	stx	%g1, [%o0]	! store the timestamp
1680	SET_SIZE(stick_timestamp)
1681
1682#endif	/* lint */
1683
1684
1685#if defined(lint)
1686/*
1687 * Set STICK adjusted by skew.
1688 */
1689/* ARGSUSED */
1690void
1691stick_adj(int64_t skew)
1692{
1693}
1694
1695#else	/* lint */
1696
1697	ENTRY_NP(stick_adj)
1698	rdpr	%pstate, %g1		! save processor state
1699	andn	%g1, PSTATE_IE, %g3
1700	ba	1f			! cache align stick adj
1701	wrpr	%g0, %g3, %pstate	! turn off interrupts
1702
1703	.align	16
17041:	nop
1705
1706	rd	STICK, %g4		! read stick reg
1707	add	%g4, %o0, %o1		! adjust stick with skew
1708	wr	%o1, %g0, STICK		! write stick reg
1709
1710	retl
1711	wrpr	%g1, %pstate		! restore processor state
1712	SET_SIZE(stick_adj)
1713
1714#endif	/* lint */
1715
1716#if defined(lint)
1717/*
1718 * Debugger-specific stick retrieval
1719 */
1720/*ARGSUSED*/
1721int
1722kdi_get_stick(uint64_t *stickp)
1723{
1724	return (0);
1725}
1726
1727#else	/* lint */
1728
1729	ENTRY_NP(kdi_get_stick)
1730	rd	STICK, %g1
1731	stx	%g1, [%o0]
1732	retl
1733	mov	%g0, %o0
1734	SET_SIZE(kdi_get_stick)
1735
1736#endif	/* lint */
1737
1738#if defined(lint)
1739
1740/*ARGSUSED*/
1741int
1742dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
1743{ return (0); }
1744
1745#else
1746
1747	ENTRY(dtrace_blksuword32)
1748	save	%sp, -SA(MINFRAME + 4), %sp
1749
1750	rdpr	%pstate, %l1
1751	andn	%l1, PSTATE_IE, %l2		! disable interrupts to
1752	wrpr	%g0, %l2, %pstate		! protect our FPU diddling
1753
1754	rd	%fprs, %l0
1755	andcc	%l0, FPRS_FEF, %g0
1756	bz,a,pt	%xcc, 1f			! if the fpu is disabled
1757	wr	%g0, FPRS_FEF, %fprs		! ... enable the fpu
1758
1759	st	%f0, [%fp + STACK_BIAS - 4]	! save %f0 to the stack
17601:
1761	set	0f, %l5
1762	/*
1763	 * We're about to write a block full or either total garbage
1764	 * (not kernel data, don't worry) or user floating-point data
1765	 * (so it only _looks_ like garbage).
1766	 */
1767	ld	[%i1], %f0			! modify the block
1768	membar	#Sync
1769	stn	%l5, [THREAD_REG + T_LOFAULT]	! set up the lofault handler
1770	stda	%d0, [%i0]ASI_BLK_COMMIT_S	! store the modified block
1771	membar	#Sync
1772	stn	%g0, [THREAD_REG + T_LOFAULT]	! remove the lofault handler
1773
1774	bz,a,pt	%xcc, 1f
1775	wr	%g0, %l0, %fprs			! restore %fprs
1776
1777	ld	[%fp + STACK_BIAS - 4], %f0	! restore %f0
17781:
1779
1780	wrpr	%g0, %l1, %pstate		! restore interrupts
1781
1782	ret
1783	restore	%g0, %g0, %o0
1784
17850:
1786	membar	#Sync
1787	stn	%g0, [THREAD_REG + T_LOFAULT]	! remove the lofault handler
1788
1789	bz,a,pt	%xcc, 1f
1790	wr	%g0, %l0, %fprs			! restore %fprs
1791
1792	ld	[%fp + STACK_BIAS - 4], %f0	! restore %f0
17931:
1794
1795	wrpr	%g0, %l1, %pstate		! restore interrupts
1796
1797	/*
1798	 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1799	 * which deals with watchpoints. Otherwise, just return -1.
1800	 */
1801	brnz,pt	%i2, 1f
1802	nop
1803	ret
1804	restore	%g0, -1, %o0
18051:
1806	call	dtrace_blksuword32_err
1807	restore
1808
1809	SET_SIZE(dtrace_blksuword32)
1810#endif /* lint */
1811
1812#if defined(lint)
1813/*ARGSUSED*/
1814void
1815ras_cntr_reset(void *arg)
1816{
1817}
1818#else
1819	ENTRY_NP(ras_cntr_reset)
1820	set	OPL_SCRATCHPAD_ERRLOG, %o1
1821	ldxa	[%o1]ASI_SCRATCHPAD, %o0
1822	or	%o0, ERRLOG_REG_NUMERR_MASK, %o0
1823	retl
1824	 stxa	%o0, [%o1]ASI_SCRATCHPAD
1825	SET_SIZE(ras_cntr_reset)
1826#endif /* lint */
1827
1828#if defined(lint)
1829/* ARGSUSED */
1830void
1831opl_error_setup(uint64_t cpu_err_log_pa)
1832{
1833}
1834
1835#else	/* lint */
1836	ENTRY_NP(opl_error_setup)
1837	/*
1838	 * Initialize the error log scratchpad register
1839	 */
1840	ldxa	[%g0]ASI_EIDR, %o2
1841	sethi	%hi(ERRLOG_REG_EIDR_MASK), %o1
1842	or	%o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1843	and	%o2, %o1, %o3
1844	sllx	%o3, ERRLOG_REG_EIDR_SHIFT, %o2
1845	or	%o2, %o0, %o3
1846	or	%o3, ERRLOG_REG_NUMERR_MASK, %o0
1847	set	OPL_SCRATCHPAD_ERRLOG, %o1
1848	stxa	%o0, [%o1]ASI_SCRATCHPAD
1849	/*
1850	 * Disable all restrainable error traps
1851	 */
1852	mov	AFSR_ECR, %o1
1853	ldxa	[%o1]ASI_AFSR, %o0
1854	andn	%o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
1855	retl
1856	  stxa	%o0, [%o1]ASI_AFSR
1857	SET_SIZE(opl_error_setup)
1858#endif /* lint */
1859
1860#if defined(lint)
1861/* ARGSUSED */
1862void
1863opl_mpg_enable(void)
1864{
1865}
1866#else	/* lint */
1867	ENTRY_NP(opl_mpg_enable)
1868	/*
1869	 * Enable MMU translating multiple page sizes for
1870	 * sITLB and sDTLB.
1871	 */
1872        mov	LSU_MCNTL, %o0
1873        ldxa	[%o0] ASI_MCNTL, %o1
1874        or	%o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
1875	retl
1876          stxa	%o1, [%o0] ASI_MCNTL
1877	SET_SIZE(opl_mpg_enable)
1878#endif /* lint */
1879
1880#if	defined(lint)
1881/*
1882 * This function is called for each (enabled) CPU. We use it to
1883 * initialize error handling related registers.
1884 */
1885/*ARGSUSED*/
1886void
1887cpu_feature_init(void)
1888{}
1889#else	/* lint */
1890	ENTRY(cpu_feature_init)
1891	!
1892	! get the device_id and store the device_id
1893	! in the appropriate cpunodes structure
1894	! given the cpus index
1895	!
1896	CPU_INDEX(%o0, %o1)
1897	mulx %o0, CPU_NODE_SIZE, %o0
1898	set  cpunodes + DEVICE_ID, %o1
1899	ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
1900	stx  %o2, [%o0 + %o1]
1901	!
1902	! initialize CPU registers
1903	!
1904	ba	opl_cpu_reg_init
1905	nop
1906	SET_SIZE(cpu_feature_init)
1907#endif	/* lint */
1908
1909#if defined(lint)
1910
1911void
1912cpu_cleartickpnt(void)
1913{}
1914
1915#else	/* lint */
1916	/*
1917	 * Clear the NPT (non-privileged trap) bit in the %tick/%stick
1918	 * registers. In an effort to make the change in the
1919	 * tick/stick counter as consistent as possible, we disable
1920	 * all interrupts while we're changing the registers. We also
1921	 * ensure that the read and write instructions are in the same
1922	 * line in the instruction cache.
1923	 */
1924	ENTRY_NP(cpu_clearticknpt)
1925	rdpr	%pstate, %g1		/* save processor state */
1926	andn	%g1, PSTATE_IE, %g3	/* turn off */
1927	wrpr	%g0, %g3, %pstate	/*   interrupts */
1928	rdpr	%tick, %g2		/* get tick register */
1929	brgez,pn %g2, 1f		/* if NPT bit off, we're done */
1930	mov	1, %g3			/* create mask */
1931	sllx	%g3, 63, %g3		/*   for NPT bit */
1932	ba,a,pt	%xcc, 2f
1933	.align	8			/* Ensure rd/wr in same i$ line */
19342:
1935	rdpr	%tick, %g2		/* get tick register */
1936	wrpr	%g3, %g2, %tick		/* write tick register, */
1937					/*   clearing NPT bit   */
19381:
1939	rd	STICK, %g2		/* get stick register */
1940	brgez,pn %g2, 3f		/* if NPT bit off, we're done */
1941	mov	1, %g3			/* create mask */
1942	sllx	%g3, 63, %g3		/*   for NPT bit */
1943	ba,a,pt	%xcc, 4f
1944	.align	8			/* Ensure rd/wr in same i$ line */
19454:
1946	rd	STICK, %g2		/* get stick register */
1947	wr	%g3, %g2, STICK		/* write stick register, */
1948					/*   clearing NPT bit   */
19493:
1950	jmp	%g4 + 4
1951	wrpr	%g0, %g1, %pstate	/* restore processor state */
1952
1953	SET_SIZE(cpu_clearticknpt)
1954
1955#endif	/* lint */
1956
1957#if defined(lint)
1958
1959void
1960cpu_halt_cpu(void)
1961{}
1962
1963void
1964cpu_smt_pause(void)
1965{}
1966
1967#else	/* lint */
1968
1969	/*
1970	 * Halt the current strand with the suspend instruction.
1971	 * The compiler/asm currently does not support this suspend
1972	 * instruction mnemonic, use byte code for now.
1973	 */
1974	ENTRY_NP(cpu_halt_cpu)
1975	.word   0x81b01040
1976	retl
1977	nop
1978	SET_SIZE(cpu_halt_cpu)
1979
1980	/*
1981	 * Pause the current strand with the sleep instruction.
1982	 * The compiler/asm currently does not support this sleep
1983	 * instruction mnemonic, use byte code for now.
1984	 */
1985	ENTRY_NP(cpu_smt_pause)
1986	.word   0x81b01060
1987	retl
1988	nop
1989	SET_SIZE(cpu_smt_pause)
1990
1991#endif	/* lint */
1992