xref: /titanic_41/usr/src/uts/sun4u/cpu/opl_olympus_asm.s (revision b9175c69691c8949bec97fb8f689b7d1efdb05bb)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Assembly code support for the Olympus-C module
26 */
27
28#pragma ident	"%Z%%M%	%I%	%E% SMI"
29
30#if !defined(lint)
31#include "assym.h"
32#endif	/* lint */
33
34#include <sys/asm_linkage.h>
35#include <sys/mmu.h>
36#include <vm/hat_sfmmu.h>
37#include <sys/machparam.h>
38#include <sys/machcpuvar.h>
39#include <sys/machthread.h>
40#include <sys/machtrap.h>
41#include <sys/privregs.h>
42#include <sys/asm_linkage.h>
43#include <sys/trap.h>
44#include <sys/opl_olympus_regs.h>
45#include <sys/opl_module.h>
46#include <sys/xc_impl.h>
47#include <sys/intreg.h>
48#include <sys/async.h>
49#include <sys/clock.h>
50#include <sys/cmpregs.h>
51
52#ifdef TRAPTRACE
53#include <sys/traptrace.h>
54#endif /* TRAPTRACE */
55
56/*
57 * Macro that flushes the entire Ecache.
58 *
59 * arg1 = ecache size
60 * arg2 = ecache linesize
61 * arg3 = ecache flush address - Not used for olympus-C
62 */
63#define	ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1)				\
64	mov	ASI_L2_CTRL_U2_FLUSH, arg1;				\
65	mov	ASI_L2_CTRL_RW_ADDR, arg2;				\
66	stxa	arg1, [arg2]ASI_L2_CTRL
67
68/*
69 * SPARC64-VI MMU and Cache operations.
70 */
71
72#if defined(lint)
73
74/* ARGSUSED */
75void
76vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
77{}
78
79#else	/* lint */
80
81	ENTRY_NP(vtag_flushpage)
82	/*
83	 * flush page from the tlb
84	 *
85	 * %o0 = vaddr
86	 * %o1 = sfmmup
87	 */
88	rdpr	%pstate, %o5
89#ifdef DEBUG
90	PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
91#endif /* DEBUG */
92	/*
93	 * disable ints
94	 */
95	andn	%o5, PSTATE_IE, %o4
96	wrpr	%o4, 0, %pstate
97
98	/*
99	 * Then, blow out the tlb
100	 * Interrupts are disabled to prevent the primary ctx register
101	 * from changing underneath us.
102	 */
103	sethi   %hi(ksfmmup), %o3
104        ldx     [%o3 + %lo(ksfmmup)], %o3
105        cmp     %o3, %o1
106        bne,pt   %xcc, 1f			! if not kernel as, go to 1
107	  sethi	%hi(FLUSH_ADDR), %o3
108	/*
109	 * For Kernel demaps use primary. type = page implicitly
110	 */
111	stxa	%g0, [%o0]ASI_DTLB_DEMAP	/* dmmu flush for KCONTEXT */
112	stxa	%g0, [%o0]ASI_ITLB_DEMAP	/* immu flush for KCONTEXT */
113	flush	%o3
114	retl
115	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1161:
117	/*
118	 * User demap.  We need to set the primary context properly.
119	 * Secondary context cannot be used for SPARC64-VI IMMU.
120	 * %o0 = vaddr
121	 * %o1 = sfmmup
122	 * %o3 = FLUSH_ADDR
123	 */
124	SFMMU_CPU_CNUM(%o1, %g1, %g2)		! %g1 = sfmmu cnum on this CPU
125
126	ldub	[%o1 + SFMMU_CEXT], %o4		! %o4 = sfmmup->sfmmu_cext
127	sll	%o4, CTXREG_EXT_SHIFT, %o4
128	or	%g1, %o4, %g1			! %g1 = primary pgsz | cnum
129
130	wrpr	%g0, 1, %tl
131	set	MMU_PCONTEXT, %o4
132	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
133	ldxa	[%o4]ASI_DMMU, %o2		! %o2 = save old ctxnum
134	srlx	%o2, CTXREG_NEXT_SHIFT, %o1	! need to preserve nucleus pgsz
135	sllx	%o1, CTXREG_NEXT_SHIFT, %o1	! %o1 = nucleus pgsz
136	or	%g1, %o1, %g1			! %g1 = nucleus pgsz | primary pgsz | cnum
137	stxa	%g1, [%o4]ASI_DMMU		! wr new ctxum
138
139	stxa	%g0, [%o0]ASI_DTLB_DEMAP
140	stxa	%g0, [%o0]ASI_ITLB_DEMAP
141	stxa	%o2, [%o4]ASI_DMMU		/* restore old ctxnum */
142	flush	%o3
143	wrpr	%g0, 0, %tl
144
145	retl
146	wrpr	%g0, %o5, %pstate		/* enable interrupts */
147	SET_SIZE(vtag_flushpage)
148
149#endif	/* lint */
150
151
152#if defined(lint)
153
154void
155vtag_flushall(void)
156{}
157
158#else	/* lint */
159
160	ENTRY_NP2(vtag_flushall, demap_all)
161	/*
162	 * flush the tlb
163	 */
164	sethi	%hi(FLUSH_ADDR), %o3
165	set	DEMAP_ALL_TYPE, %g1
166	stxa	%g0, [%g1]ASI_DTLB_DEMAP
167	stxa	%g0, [%g1]ASI_ITLB_DEMAP
168	flush	%o3
169	retl
170	nop
171	SET_SIZE(demap_all)
172	SET_SIZE(vtag_flushall)
173
174#endif	/* lint */
175
176
177#if defined(lint)
178
179/* ARGSUSED */
180void
181vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
182{}
183
184#else	/* lint */
185
186	ENTRY_NP(vtag_flushpage_tl1)
187	/*
188	 * x-trap to flush page from tlb and tsb
189	 *
190	 * %g1 = vaddr, zero-extended on 32-bit kernel
191	 * %g2 = sfmmup
192	 *
193	 * assumes TSBE_TAG = 0
194	 */
195	srln	%g1, MMU_PAGESHIFT, %g1
196
197	sethi   %hi(ksfmmup), %g3
198        ldx     [%g3 + %lo(ksfmmup)], %g3
199        cmp     %g3, %g2
200        bne,pt	%xcc, 1f                        ! if not kernel as, go to 1
201	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
202
203	/* We need to demap in the kernel context */
204	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
205	stxa	%g0, [%g1]ASI_DTLB_DEMAP
206	stxa	%g0, [%g1]ASI_ITLB_DEMAP
207	retry
2081:
209	/* We need to demap in a user context */
210	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
211
212	SFMMU_CPU_CNUM(%g2, %g6, %g3)	! %g6 = sfmmu cnum on this CPU
213
214	ldub	[%g2 + SFMMU_CEXT], %g4		! %g4 = sfmmup->cext
215	sll	%g4, CTXREG_EXT_SHIFT, %g4
216	or	%g6, %g4, %g6			! %g6 = primary pgsz | cnum
217
218	set	MMU_PCONTEXT, %g4
219	ldxa	[%g4]ASI_DMMU, %g5		! %g5 = save old ctxnum
220	srlx	%g5, CTXREG_NEXT_SHIFT, %g2	! %g2 = nucleus pgsz
221	sllx	%g2, CTXREG_NEXT_SHIFT, %g2	! preserve nucleus pgsz
222	or	%g6, %g2, %g6			! %g6 = nucleus pgsz | primary pgsz | cnum
223	stxa	%g6, [%g4]ASI_DMMU		! wr new ctxum
224	stxa	%g0, [%g1]ASI_DTLB_DEMAP
225	stxa	%g0, [%g1]ASI_ITLB_DEMAP
226	stxa	%g5, [%g4]ASI_DMMU		! restore old ctxnum
227	retry
228	SET_SIZE(vtag_flushpage_tl1)
229
230#endif	/* lint */
231
232
233#if defined(lint)
234
235/* ARGSUSED */
236void
237vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
238{}
239
240#else	/* lint */
241
242	ENTRY_NP(vtag_flush_pgcnt_tl1)
243	/*
244	 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
245	 *
246	 * %g1 = vaddr, zero-extended on 32-bit kernel
247	 * %g2 = <sfmmup58|pgcnt6>
248	 *
249	 * NOTE: this handler relies on the fact that no
250	 *	interrupts or traps can occur during the loop
251	 *	issuing the TLB_DEMAP operations. It is assumed
252	 *	that interrupts are disabled and this code is
253	 *	fetching from the kernel locked text address.
254	 *
255	 * assumes TSBE_TAG = 0
256	 */
257	set	SFMMU_PGCNT_MASK, %g4
258	and	%g4, %g2, %g3			/* g3 = pgcnt - 1 */
259	add	%g3, 1, %g3			/* g3 = pgcnt */
260
261	andn	%g2, SFMMU_PGCNT_MASK, %g2	/* g2 = sfmmup */
262	srln	%g1, MMU_PAGESHIFT, %g1
263
264	sethi   %hi(ksfmmup), %g4
265        ldx     [%g4 + %lo(ksfmmup)], %g4
266        cmp     %g4, %g2
267        bne,pn   %xcc, 1f			/* if not kernel as, go to 1 */
268	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
269
270	/* We need to demap in the kernel context */
271	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
272	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
273	sethi   %hi(FLUSH_ADDR), %g5
2744:
275	stxa	%g0, [%g1]ASI_DTLB_DEMAP
276	stxa	%g0, [%g1]ASI_ITLB_DEMAP
277	flush	%g5				! flush required by immu
278
279	deccc	%g3				/* decr pgcnt */
280	bnz,pt	%icc,4b
281	  add	%g1, %g2, %g1			/* next page */
282	retry
2831:
284	/*
285	 * We need to demap in a user context
286	 *
287	 * g2 = sfmmup
288	 * g3 = pgcnt
289	 */
290	SFMMU_CPU_CNUM(%g2, %g5, %g6)		! %g5 = sfmmu cnum on this CPU
291
292	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
293
294	ldub	[%g2 + SFMMU_CEXT], %g4		! %g4 = sfmmup->cext
295	sll	%g4, CTXREG_EXT_SHIFT, %g4
296	or	%g5, %g4, %g5
297
298	set	MMU_PCONTEXT, %g4
299	ldxa	[%g4]ASI_DMMU, %g6		/* rd old ctxnum */
300	srlx	%g6, CTXREG_NEXT_SHIFT, %g2	/* %g2 = nucleus pgsz */
301	sllx	%g2, CTXREG_NEXT_SHIFT, %g2	/* preserve nucleus pgsz */
302	or	%g5, %g2, %g5			/* %g5 = nucleus pgsz | primary pgsz | cnum */
303	stxa	%g5, [%g4]ASI_DMMU		/* wr new ctxum */
304
305	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
306	sethi   %hi(FLUSH_ADDR), %g5
3073:
308	stxa	%g0, [%g1]ASI_DTLB_DEMAP
309	stxa	%g0, [%g1]ASI_ITLB_DEMAP
310	flush	%g5				! flush required by immu
311
312	deccc	%g3				/* decr pgcnt */
313	bnz,pt	%icc,3b
314	  add	%g1, %g2, %g1			/* next page */
315
316	stxa	%g6, [%g4]ASI_DMMU		/* restore old ctxnum */
317	retry
318	SET_SIZE(vtag_flush_pgcnt_tl1)
319
320#endif	/* lint */
321
322
323#if defined(lint)
324
325/*ARGSUSED*/
326void
327vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
328{}
329
330#else	/* lint */
331
332	ENTRY_NP(vtag_flushall_tl1)
333	/*
334	 * x-trap to flush tlb
335	 */
336	set	DEMAP_ALL_TYPE, %g4
337	stxa	%g0, [%g4]ASI_DTLB_DEMAP
338	stxa	%g0, [%g4]ASI_ITLB_DEMAP
339	retry
340	SET_SIZE(vtag_flushall_tl1)
341
342#endif	/* lint */
343
344
345/*
346 * VAC (virtual address conflict) does not apply to OPL.
347 * VAC resolution is managed by the Olympus processor hardware.
348 * As a result, all OPL VAC flushing routines are no-ops.
349 */
350
351#if defined(lint)
352
353/* ARGSUSED */
354void
355vac_flushpage(pfn_t pfnum, int vcolor)
356{}
357
358#else	/* lint */
359
360	ENTRY(vac_flushpage)
361	retl
362	  nop
363	SET_SIZE(vac_flushpage)
364
365#endif	/* lint */
366
367#if defined(lint)
368
369/* ARGSUSED */
370void
371vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
372{}
373
374#else	/* lint */
375
376	ENTRY_NP(vac_flushpage_tl1)
377	retry
378	SET_SIZE(vac_flushpage_tl1)
379
380#endif	/* lint */
381
382
383#if defined(lint)
384
385/* ARGSUSED */
386void
387vac_flushcolor(int vcolor, pfn_t pfnum)
388{}
389
390#else	/* lint */
391
392	ENTRY(vac_flushcolor)
393	retl
394	 nop
395	SET_SIZE(vac_flushcolor)
396
397#endif  /* lint */
398
399
400
401#if defined(lint)
402
403/* ARGSUSED */
404void
405vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
406{}
407
408#else	/* lint */
409
410	ENTRY(vac_flushcolor_tl1)
411	retry
412	SET_SIZE(vac_flushcolor_tl1)
413
414#endif	/* lint */
415
416#if defined(lint)
417
418int
419idsr_busy(void)
420{
421	return (0);
422}
423
424#else	/* lint */
425
426/*
427 * Determine whether or not the IDSR is busy.
428 * Entry: no arguments
429 * Returns: 1 if busy, 0 otherwise
430 */
431	ENTRY(idsr_busy)
432	ldxa	[%g0]ASI_INTR_DISPATCH_STATUS, %g1
433	clr	%o0
434	btst	IDSR_BUSY, %g1
435	bz,a,pt	%xcc, 1f
436	mov	1, %o0
4371:
438	retl
439	nop
440	SET_SIZE(idsr_busy)
441
442#endif	/* lint */
443
444#if defined(lint)
445
446/* ARGSUSED */
447void
448init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
449{}
450
451/* ARGSUSED */
452void
453init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
454{}
455
456#else	/* lint */
457
458	.global _dispatch_status_busy
459_dispatch_status_busy:
460	.asciz	"ASI_INTR_DISPATCH_STATUS error: busy"
461	.align	4
462
463/*
464 * Setup interrupt dispatch data registers
465 * Entry:
466 *	%o0 - function or inumber to call
467 *	%o1, %o2 - arguments (2 uint64_t's)
468 */
469	.seg "text"
470
471	ENTRY(init_mondo)
472#ifdef DEBUG
473	!
474	! IDSR should not be busy at the moment
475	!
476	ldxa	[%g0]ASI_INTR_DISPATCH_STATUS, %g1
477	btst	IDSR_BUSY, %g1
478	bz,pt	%xcc, 1f
479	nop
480	sethi	%hi(_dispatch_status_busy), %o0
481	call	panic
482	or	%o0, %lo(_dispatch_status_busy), %o0
483#endif /* DEBUG */
484
485	ALTENTRY(init_mondo_nocheck)
486	!
487	! interrupt vector dispatch data reg 0
488	!
4891:
490	mov	IDDR_0, %g1
491	mov	IDDR_1, %g2
492	mov	IDDR_2, %g3
493	stxa	%o0, [%g1]ASI_INTR_DISPATCH
494
495	!
496	! interrupt vector dispatch data reg 1
497	!
498	stxa	%o1, [%g2]ASI_INTR_DISPATCH
499
500	!
501	! interrupt vector dispatch data reg 2
502	!
503	stxa	%o2, [%g3]ASI_INTR_DISPATCH
504
505	membar	#Sync
506	retl
507	nop
508	SET_SIZE(init_mondo_nocheck)
509	SET_SIZE(init_mondo)
510
511#endif	/* lint */
512
513
514#if defined(lint)
515
516/* ARGSUSED */
517void
518shipit(int upaid, int bn)
519{ return; }
520
521#else	/* lint */
522
523/*
524 * Ship mondo to aid using busy/nack pair bn
525 */
526	ENTRY_NP(shipit)
527	sll	%o0, IDCR_PID_SHIFT, %g1	! IDCR<23:14> = agent id
528	sll	%o1, IDCR_BN_SHIFT, %g2		! IDCR<28:24> = b/n pair
529	or	%g1, IDCR_OFFSET, %g1		! IDCR<13:0> = 0x70
530	or	%g1, %g2, %g1
531	stxa	%g0, [%g1]ASI_INTR_DISPATCH	! interrupt vector dispatch
532	membar	#Sync
533	retl
534	nop
535	SET_SIZE(shipit)
536
537#endif	/* lint */
538
539
540#if defined(lint)
541
542/* ARGSUSED */
543void
544flush_instr_mem(caddr_t vaddr, size_t len)
545{}
546
547#else	/* lint */
548
549/*
550 * flush_instr_mem:
551 *	Flush 1 page of the I-$ starting at vaddr
552 * 	%o0 vaddr
553 *	%o1 bytes to be flushed
554 *
555 * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
556 * the stores from all processors so that a FLUSH instruction is only needed
557 * to ensure pipeline is consistent. This means a single flush is sufficient at
558 * the end of a sequence of stores that updates the instruction stream to
559 * ensure correct operation.
560 */
561
562	ENTRY(flush_instr_mem)
563	flush	%o0			! address irrelevant
564	retl
565	nop
566	SET_SIZE(flush_instr_mem)
567
568#endif	/* lint */
569
570
571/*
572 * flush_ecache:
573 *	%o0 - 64 bit physical address
574 *	%o1 - ecache size
575 *	%o2 - ecache linesize
576 */
577#if defined(lint)
578
579/*ARGSUSED*/
580void
581flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize)
582{}
583
584#else /* !lint */
585
586	ENTRY(flush_ecache)
587
588	/*
589	 * Flush the entire Ecache.
590	 */
591	ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
592	retl
593	nop
594	SET_SIZE(flush_ecache)
595
596#endif /* lint */
597
598#if defined(lint)
599
600/*ARGSUSED*/
601void
602kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size,
603    int icache_lsize)
604{
605}
606
607#else	/* lint */
608
609	/*
610	 * I/D cache flushing is not needed for OPL processors
611	 */
612	ENTRY(kdi_flush_idcache)
613	retl
614	nop
615	SET_SIZE(kdi_flush_idcache)
616
617#endif	/* lint */
618
619#ifdef	TRAPTRACE
620/*
621 * Simplified trap trace macro for OPL. Adapted from us3.
622 */
623#define	OPL_TRAPTRACE(ptr, scr1, scr2, label)			\
624	CPU_INDEX(scr1, ptr);					\
625	sll	scr1, TRAPTR_SIZE_SHIFT, scr1;			\
626	set	trap_trace_ctl, ptr;				\
627	add	ptr, scr1, scr1;				\
628	ld	[scr1 + TRAPTR_LIMIT], ptr;			\
629	tst	ptr;						\
630	be,pn	%icc, label/**/1;				\
631	 ldx	[scr1 + TRAPTR_PBASE], ptr;			\
632	ld	[scr1 + TRAPTR_OFFSET], scr1;			\
633	add	ptr, scr1, ptr;					\
634	rd	%asi, scr2;					\
635	wr	%g0, TRAPTR_ASI, %asi;				\
636	rd	STICK, scr1;					\
637	stxa    scr1, [ptr + TRAP_ENT_TICK]%asi;		\
638	rdpr	%tl, scr1;					\
639	stha    scr1, [ptr + TRAP_ENT_TL]%asi;			\
640	rdpr	%tt, scr1;					\
641	stha	scr1, [ptr + TRAP_ENT_TT]%asi;			\
642	rdpr	%tpc, scr1;					\
643	stna    scr1, [ptr + TRAP_ENT_TPC]%asi;			\
644	rdpr	%tstate, scr1;					\
645	stxa	scr1, [ptr + TRAP_ENT_TSTATE]%asi;		\
646	stna    %sp, [ptr + TRAP_ENT_SP]%asi;			\
647	stna    %g0, [ptr + TRAP_ENT_TR]%asi;			\
648	stna    %g0, [ptr + TRAP_ENT_F1]%asi;			\
649	stna    %g0, [ptr + TRAP_ENT_F2]%asi;			\
650	stna    %g0, [ptr + TRAP_ENT_F3]%asi;			\
651	stna    %g0, [ptr + TRAP_ENT_F4]%asi;			\
652	wr	%g0, scr2, %asi;				\
653	CPU_INDEX(ptr, scr1);					\
654	sll	ptr, TRAPTR_SIZE_SHIFT, ptr;			\
655	set	trap_trace_ctl, scr1;				\
656	add	scr1, ptr, ptr;					\
657	ld	[ptr + TRAPTR_OFFSET], scr1;			\
658	ld	[ptr + TRAPTR_LIMIT], scr2;			\
659	st	scr1, [ptr + TRAPTR_LAST_OFFSET];		\
660	add	scr1, TRAP_ENT_SIZE, scr1;			\
661	sub	scr2, TRAP_ENT_SIZE, scr2;			\
662	cmp	scr1, scr2;					\
663	movge	%icc, 0, scr1;					\
664	st	scr1, [ptr + TRAPTR_OFFSET];			\
665label/**/1:
666#endif	/* TRAPTRACE */
667
668
669
670/*
671 * Macros facilitating error handling.
672 */
673
674/*
675 * Save alternative global registers reg1, reg2, reg3
676 * to scratchpad registers 1, 2, 3 respectively.
677 */
678#define	OPL_SAVE_GLOBAL(reg1, reg2, reg3)	\
679	stxa	reg1, [%g0]ASI_SCRATCHPAD		;\
680	mov	OPL_SCRATCHPAD_SAVE_AG2, reg1	;\
681	stxa	reg2, [reg1]ASI_SCRATCHPAD		;\
682	mov	OPL_SCRATCHPAD_SAVE_AG3, reg1	;\
683	stxa	reg3, [reg1]ASI_SCRATCHPAD
684
685/*
686 * Restore alternative global registers reg1, reg2, reg3
687 * from scratchpad registers 1, 2, 3 respectively.
688 */
689#define	OPL_RESTORE_GLOBAL(reg1, reg2, reg3)			\
690	mov	OPL_SCRATCHPAD_SAVE_AG3, reg1			;\
691	ldxa	[reg1]ASI_SCRATCHPAD, reg3				;\
692	mov	OPL_SCRATCHPAD_SAVE_AG2, reg1			;\
693	ldxa	[reg1]ASI_SCRATCHPAD, reg2				;\
694	ldxa	[%g0]ASI_SCRATCHPAD, reg1
695
696/*
697 * Logs value `val' into the member `offset' of a structure
698 * at physical address `pa'
699 */
700#define	LOG_REG(pa, offset, val)				\
701	add	pa, offset, pa					;\
702	stxa	val, [pa]ASI_MEM
703
704#define	FLUSH_ALL_TLB(tmp1)					\
705	set	DEMAP_ALL_TYPE, tmp1				;\
706	stxa	%g0, [tmp1]ASI_ITLB_DEMAP			;\
707	stxa	%g0, [tmp1]ASI_DTLB_DEMAP			;\
708	sethi	%hi(FLUSH_ADDR), tmp1				;\
709	flush	tmp1
710
711/*
712 * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
713 * scratch register by zeroing all other fields. Result is in pa.
714 */
715#define	LOG_ADDR(pa)							\
716	mov	OPL_SCRATCHPAD_ERRLOG, pa				;\
717	ldxa	[pa]ASI_SCRATCHPAD, pa					;\
718	sllx	pa, 64-ERRLOG_REG_EIDR_SHIFT, pa			;\
719	srlx	pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa	;\
720	sllx	pa, ERRLOG_REG_ERR_SHIFT, pa
721
722/*
723 * Advance the per-cpu error log buffer pointer to the next
724 * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
725 * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
726 * unused input registers for this macro.
727 *
728 * Algorithm:
729 * 1. logpa = contents of errorlog scratchpad register
730 * 2. bufmask = ERRLOG_BUFSIZ - 1
731 * 3. tmp = logpa & ~(bufmask)     (tmp is now logbase)
732 * 4. logpa += ERRLOG_SZ
733 * 5. logpa = logpa & bufmask      (get new offset to logbase)
734 * 4. logpa = tmp | logpa
735 * 7. write logpa back into errorlog scratchpad register
736 *
737 * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
738 *
739 */
740#define	UPDATE_LOGADD(logpa, bufmask, tmp)			\
741	set	OPL_SCRATCHPAD_ERRLOG, tmp			;\
742	ldxa	[tmp]ASI_SCRATCHPAD, logpa				;\
743	set	(ERRLOG_BUFSZ-1), bufmask			;\
744	andn	logpa, bufmask, tmp				;\
745	add	logpa, ERRLOG_SZ, logpa				;\
746	and	logpa, bufmask, logpa				;\
747	or	tmp, logpa, logpa				;\
748	set	OPL_SCRATCHPAD_ERRLOG, tmp			;\
749	stxa	logpa, [tmp]ASI_SCRATCHPAD
750
751/* Log error status registers into the log buffer */
752#define	LOG_SYNC_REG(sfsr, sfar, tmp)				\
753	LOG_ADDR(tmp)						;\
754	LOG_REG(tmp, LOG_SFSR_OFF, sfsr)			;\
755	LOG_ADDR(tmp)						;\
756	mov	tmp, sfsr					;\
757	LOG_REG(tmp, LOG_SFAR_OFF, sfar)			;\
758	rd	STICK, sfar					;\
759	mov	sfsr, tmp					;\
760	LOG_REG(tmp, LOG_STICK_OFF, sfar)			;\
761	rdpr	%tl, tmp					;\
762	sllx	tmp, 32, sfar					;\
763	rdpr	%tt, tmp					;\
764	or	sfar, tmp, sfar					;\
765	mov	sfsr, tmp					;\
766	LOG_REG(tmp, LOG_TL_OFF, sfar)				;\
767	set	OPL_SCRATCHPAD_ERRLOG, tmp			;\
768	ldxa	[tmp]ASI_SCRATCHPAD, sfar				;\
769	mov	sfsr, tmp					;\
770	LOG_REG(tmp, LOG_ASI3_OFF, sfar)			;\
771	rdpr	%tpc, sfar					;\
772	mov	sfsr, tmp					;\
773	LOG_REG(tmp, LOG_TPC_OFF, sfar)				;\
774	UPDATE_LOGADD(sfsr, sfar, tmp)
775
776#define	LOG_UGER_REG(uger, tmp, tmp2)				\
777	LOG_ADDR(tmp)						;\
778	mov	tmp, tmp2					;\
779	LOG_REG(tmp2, LOG_UGER_OFF, uger)			;\
780	mov	tmp, uger					;\
781	rd	STICK, tmp2					;\
782	LOG_REG(tmp, LOG_STICK_OFF, tmp2)			;\
783	rdpr	%tl, tmp					;\
784	sllx	tmp, 32, tmp2					;\
785	rdpr	%tt, tmp					;\
786	or	tmp2, tmp, tmp2					;\
787	mov	uger, tmp					;\
788	LOG_REG(tmp, LOG_TL_OFF, tmp2)				;\
789	set	OPL_SCRATCHPAD_ERRLOG, tmp2			;\
790	ldxa	[tmp2]ASI_SCRATCHPAD, tmp2				;\
791	mov	uger, tmp					;\
792	LOG_REG(tmp, LOG_ASI3_OFF, tmp2)			;\
793	rdpr	%tstate, tmp2					;\
794	mov	uger, tmp					;\
795	LOG_REG(tmp, LOG_TSTATE_OFF, tmp2)			;\
796	rdpr	%tpc, tmp2					;\
797	mov	uger, tmp					;\
798	LOG_REG(tmp, LOG_TPC_OFF, tmp2)				;\
799	UPDATE_LOGADD(uger, tmp, tmp2)
800
801/*
802 * Scrub the STICK_COMPARE register to clear error by updating
803 * it to a reasonable value for interrupt generation.
804 * Ensure that we observe the CPU_ENABLE flag so that we
805 * don't accidentally enable TICK interrupt in STICK_COMPARE
806 * i.e. no clock interrupt will be generated if CPU_ENABLE flag
807 * is off.
808 */
809#define	UPDATE_STICK_COMPARE(tmp1, tmp2)			\
810	CPU_ADDR(tmp1, tmp2)					;\
811	lduh	[tmp1 + CPU_FLAGS], tmp2			;\
812	andcc	tmp2, CPU_ENABLE, %g0 				;\
813	set	OPL_UGER_STICK_DIFF, tmp2			;\
814	rd	STICK, tmp1					;\
815	add	tmp1, tmp2, tmp1				;\
816	mov	1, tmp2						;\
817	sllx	tmp2, TICKINT_DIS_SHFT, tmp2			;\
818	or	tmp1, tmp2, tmp2				;\
819	movnz	%xcc, tmp1, tmp2				;\
820	wr	tmp2, %g0, STICK_COMPARE
821
822/*
823 * Reset registers that may be corrupted by IAUG_CRE error.
824 * To update interrupt handling related registers force the
825 * clock interrupt.
826 */
827#define	IAG_CRE(tmp1, tmp2)					\
828	set	OPL_SCRATCHPAD_ERRLOG, tmp1			;\
829	ldxa	[tmp1]ASI_SCRATCHPAD, tmp1				;\
830	srlx	tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1		;\
831	set	ERRLOG_REG_EIDR_MASK, tmp2			;\
832	and	tmp1, tmp2, tmp1				;\
833	stxa	tmp1, [%g0]ASI_EIDR				;\
834	wr	%g0, 0, SOFTINT					;\
835	sethi	%hi(hres_last_tick), tmp1			;\
836	ldx	[tmp1 + %lo(hres_last_tick)], tmp1		;\
837	set	OPL_UGER_STICK_DIFF, tmp2			;\
838	add	tmp1, tmp2, tmp1				;\
839	wr	tmp1, %g0, STICK				;\
840	UPDATE_STICK_COMPARE(tmp1, tmp2)
841
842
843#define	CLEAR_FPREGS(tmp)					\
844	wr	%g0, FPRS_FEF, %fprs				;\
845	wr	%g0, %g0, %gsr					;\
846	sethi	%hi(opl_clr_freg), tmp				;\
847	or	tmp, %lo(opl_clr_freg), tmp			;\
848	ldx	[tmp], %fsr					;\
849	fzero	 %d0						;\
850	fzero	 %d2						;\
851	fzero	 %d4						;\
852	fzero	 %d6						;\
853	fzero	 %d8						;\
854	fzero	 %d10						;\
855	fzero	 %d12						;\
856	fzero	 %d14						;\
857	fzero	 %d16						;\
858	fzero	 %d18						;\
859	fzero	 %d20						;\
860	fzero	 %d22						;\
861	fzero	 %d24						;\
862	fzero	 %d26						;\
863	fzero	 %d28						;\
864	fzero	 %d30						;\
865	fzero	 %d32						;\
866	fzero	 %d34						;\
867	fzero	 %d36						;\
868	fzero	 %d38						;\
869	fzero	 %d40						;\
870	fzero	 %d42						;\
871	fzero	 %d44						;\
872	fzero	 %d46						;\
873	fzero	 %d48						;\
874	fzero	 %d50						;\
875	fzero	 %d52						;\
876	fzero	 %d54						;\
877	fzero	 %d56						;\
878	fzero	 %d58						;\
879	fzero	 %d60						;\
880	fzero	 %d62						;\
881	wr	%g0, %g0, %fprs
882
883#define	CLEAR_GLOBALS()						\
884	mov	%g0, %g1					;\
885	mov	%g0, %g2					;\
886	mov	%g0, %g3					;\
887	mov	%g0, %g4					;\
888	mov	%g0, %g5					;\
889	mov	%g0, %g6					;\
890	mov	%g0, %g7
891
892/*
893 * We do not clear the alternative globals here because they
894 * are scratch registers, i.e. there is no code that reads from
895 * them without write to them firstly. In other words every
896 * read always follows write that makes extra write to the
897 * alternative globals unnecessary.
898 */
899#define	CLEAR_GEN_REGS(tmp1, label)				\
900	set	TSTATE_KERN, tmp1				;\
901	wrpr	%g0, tmp1, %tstate				;\
902	mov	%g0, %y						;\
903	mov	%g0, %asi					;\
904	mov	%g0, %ccr					;\
905	mov	%g0, %l0					;\
906	mov	%g0, %l1					;\
907	mov	%g0, %l2					;\
908	mov	%g0, %l3					;\
909	mov	%g0, %l4					;\
910	mov	%g0, %l5					;\
911	mov	%g0, %l6					;\
912	mov	%g0, %l7					;\
913	mov	%g0, %i0					;\
914	mov	%g0, %i1					;\
915	mov	%g0, %i2					;\
916	mov	%g0, %i3					;\
917	mov	%g0, %i4					;\
918	mov	%g0, %i5					;\
919	mov	%g0, %i6					;\
920	mov	%g0, %i7					;\
921	mov	%g0, %o1					;\
922	mov	%g0, %o2					;\
923	mov	%g0, %o3					;\
924	mov	%g0, %o4					;\
925	mov	%g0, %o5					;\
926	mov	%g0, %o6					;\
927	mov	%g0, %o7					;\
928	mov	%g0, %o0					;\
929	mov	%g0, %g4					;\
930	mov	%g0, %g5					;\
931	mov	%g0, %g6					;\
932	mov	%g0, %g7					;\
933	rdpr	%tl, tmp1					;\
934	cmp	tmp1, 1						;\
935	be,pt	%xcc, label/**/1				;\
936	 rdpr	%pstate, tmp1					;\
937	wrpr	tmp1, PSTATE_AG|PSTATE_IG, %pstate		;\
938	CLEAR_GLOBALS()						;\
939	rdpr	%pstate, tmp1					;\
940	wrpr	tmp1, PSTATE_IG|PSTATE_MG, %pstate		;\
941	CLEAR_GLOBALS()						;\
942	rdpr	%pstate, tmp1					;\
943	wrpr	tmp1, PSTATE_MG|PSTATE_AG, %pstate		;\
944	ba,pt	%xcc, label/**/2				;\
945	 nop							;\
946label/**/1:							;\
947	wrpr	tmp1, PSTATE_AG, %pstate			;\
948	CLEAR_GLOBALS()						;\
949	rdpr	%pstate, tmp1					;\
950	wrpr	tmp1, PSTATE_AG, %pstate			;\
951label/**/2:
952
953
954/*
955 * Reset all window related registers
956 */
957#define	RESET_WINREG(tmp)					\
958	sethi	%hi(nwin_minus_one), tmp			;\
959	ld	[tmp + %lo(nwin_minus_one)], tmp		;\
960	wrpr	%g0, tmp, %cwp					;\
961	wrpr	%g0, tmp, %cleanwin				;\
962	sub	tmp, 1, tmp					;\
963	wrpr	%g0, tmp, %cansave				;\
964	wrpr	%g0, %g0, %canrestore				;\
965	wrpr	%g0, %g0, %otherwin				;\
966	wrpr	%g0, PIL_MAX, %pil				;\
967	wrpr	%g0, WSTATE_KERN, %wstate
968
969
970#define	RESET_PREV_TSTATE(tmp1, tmp2, label)			\
971	rdpr	%tl, tmp1					;\
972	subcc	tmp1, 1, tmp1					;\
973	bz,pt	%xcc, label/**/1				;\
974	 nop							;\
975	wrpr	tmp1, %g0, %tl					;\
976	set	TSTATE_KERN, tmp2				;\
977	wrpr	tmp2, %g0, %tstate				;\
978	wrpr	%g0, %g0, %tpc					;\
979	wrpr	%g0, %g0, %tnpc					;\
980	add	tmp1, 1, tmp1					;\
981	wrpr	tmp1, %g0, %tl					;\
982label/**/1:
983
984
985/*
986 * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
987 * and we reset these regiseter here.
988 */
989#define	RESET_CUR_TSTATE(tmp)					\
990	set	TSTATE_KERN, tmp				;\
991	wrpr	%g0, tmp, %tstate				;\
992	wrpr	%g0, 0, %tpc					;\
993	wrpr	%g0, 0, %tnpc					;\
994	RESET_WINREG(tmp)
995
996/*
997 * In case of urgent errors some MMU registers may be
998 * corrupted, so we set here some reasonable values for
999 * them. Note that resetting MMU registers also reset the context
1000 * info, we will need to reset the window registers to prevent
1001 * spill/fill that depends on context info for correct behaviour.
1002 * Note that the TLBs must be flushed before programming the context
1003 * registers.
1004 */
1005
1006#if !defined(lint)
1007#define	RESET_MMU_REGS(tmp1, tmp2, tmp3)			\
1008	FLUSH_ALL_TLB(tmp1)					;\
1009	set	MMU_PCONTEXT, tmp1				;\
1010	sethi	%hi(kcontextreg), tmp2				;\
1011	ldx	[tmp2 + %lo(kcontextreg)], tmp2			;\
1012	stxa	tmp2, [tmp1]ASI_DMMU				;\
1013	set	MMU_SCONTEXT, tmp1				;\
1014	stxa	tmp2, [tmp1]ASI_DMMU				;\
1015	sethi	%hi(ktsb_base), tmp1				;\
1016	ldx	[tmp1 + %lo(ktsb_base)], tmp2			;\
1017	mov	MMU_TSB, tmp3					;\
1018	stxa	tmp2, [tmp3]ASI_IMMU				;\
1019	stxa	tmp2, [tmp3]ASI_DMMU				;\
1020	membar	#Sync						;\
1021	RESET_WINREG(tmp1)
1022
1023#define	RESET_TSB_TAGPTR(tmp)					\
1024	set	MMU_TAG_ACCESS, tmp				;\
1025	stxa	%g0, [tmp]ASI_IMMU				;\
1026	stxa	%g0, [tmp]ASI_DMMU				;\
1027	membar	#Sync
1028#endif /* lint */
1029
1030/*
1031 * In case of errors in the MMU_TSB_PREFETCH registers we have to
1032 * reset them. We can use "0" as the reset value, this way we set
1033 * the "V" bit of the registers to 0, which will disable the prefetch
1034 * so the values of the other fields are irrelevant.
1035 */
1036#if !defined(lint)
1037#define	RESET_TSB_PREFETCH(tmp)			\
1038	set	VA_UTSBPREF_8K, tmp 		;\
1039	stxa	%g0, [tmp]ASI_ITSB_PREFETCH	;\
1040	set	VA_UTSBPREF_4M, tmp 		;\
1041	stxa	%g0, [tmp]ASI_ITSB_PREFETCH	;\
1042	set	VA_KTSBPREF_8K, tmp 		;\
1043	stxa	%g0, [tmp]ASI_ITSB_PREFETCH	;\
1044	set	VA_KTSBPREF_4M, tmp 		;\
1045	stxa	%g0, [tmp]ASI_ITSB_PREFETCH	;\
1046	set	VA_UTSBPREF_8K, tmp 		;\
1047	stxa	%g0, [tmp]ASI_DTSB_PREFETCH	;\
1048	set	VA_UTSBPREF_4M, tmp 		;\
1049	stxa	%g0, [tmp]ASI_DTSB_PREFETCH	;\
1050	set	VA_KTSBPREF_8K, tmp 		;\
1051	stxa	%g0, [tmp]ASI_DTSB_PREFETCH	;\
1052	set	VA_KTSBPREF_4M, tmp 		;\
1053	stxa	%g0, [tmp]ASI_DTSB_PREFETCH
1054#endif /* lint */
1055
1056/*
1057 * In case of errors in the MMU_SHARED_CONTEXT register we have to
1058 * reset its value. We can use "0" as the reset value, it will put
1059 * 0 in the IV field disabling the shared context support, and
1060 * making values of all the other fields of the register irrelevant.
1061 */
1062#if !defined(lint)
1063#define	RESET_SHARED_CTXT(tmp)			\
1064	set	MMU_SHARED_CONTEXT, tmp		;\
1065	stxa	%g0, [tmp]ASI_DMMU
1066#endif /* lint */
1067
1068/*
1069 * RESET_TO_PRIV()
1070 *
1071 * In many cases, we need to force the thread into privilege mode because
1072 * privilege mode is only thing in which the system continue to work
1073 * due to undeterminable user mode information that come from register
1074 * corruption.
1075 *
1076 *  - opl_uger_ctxt
1077 *    If the error is secondary TSB related register parity, we have no idea
1078 *    what value is supposed to be for it.
1079 *
1080 *  The below three cases %tstate is not accessible until it is overwritten
1081 *  with some value, so we have no clue if the thread was running on user mode
1082 *  or not
1083 *   - opl_uger_pstate
1084 *     If the error is %pstate parity, it propagates to %tstate.
1085 *   - opl_uger_tstate
1086 *     No need to say the reason
1087 *   - opl_uger_r
1088 *     If the error is %ccr or %asi parity, it propagates to %tstate
1089 *
1090 * For the above four cases, user mode info may not be available for
1091 * sys_trap() and user_trap() to work consistently. So we have to force
1092 * the thread into privilege mode.
1093 *
1094 * Forcing the thread to privilege mode requires forcing
1095 * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
1096 * %g7 will be set in user_trap(). Also since the %sp may be in
1097 * an inconsistent state, we need to do a stack reset and switch to
1098 * something we know i.e. current thread's kernel stack.
1099 * We also reset the window registers and MMU registers just to
1100 * make sure.
1101 *
1102 * To set regular %g7, we need to clear PSTATE_AG bit and need to
1103 * use one local register. Note that we are panicking and will never
1104 * unwind back so it is ok to clobber a local.
1105 *
1106 * If the thread was running in user mode, the %tpc value itself might be
1107 * within the range of OBP addresses. %tpc must be forced to be zero to prevent
1108 * sys_trap() from going to prom_trap()
1109 *
1110 */
1111#define	RESET_TO_PRIV(tmp, tmp1, tmp2, local)			\
1112	RESET_MMU_REGS(tmp, tmp1, tmp2)				;\
1113	CPU_ADDR(tmp, tmp1)					;\
1114	ldx	[tmp + CPU_THREAD], local			;\
1115	ldx	[local + T_STACK], tmp				;\
1116	sub	tmp, STACK_BIAS, %sp				;\
1117	rdpr	%pstate, tmp					;\
1118	wrpr	tmp, PSTATE_AG, %pstate				;\
1119	mov	local, %g7					;\
1120	rdpr	%pstate, local					;\
1121	wrpr	local, PSTATE_AG, %pstate			;\
1122	wrpr	%g0, 1, %tl					;\
1123	set	TSTATE_KERN, tmp				;\
1124	rdpr	%cwp, tmp1					;\
1125	or	tmp, tmp1, tmp					;\
1126	wrpr	tmp, %g0, %tstate				;\
1127	wrpr	%g0, %tpc
1128
1129
1130#if defined(lint)
1131
1132void
1133ce_err(void)
1134{}
1135
1136#else	/* lint */
1137
1138/*
1139 * We normally don't expect CE traps since we disable the
1140 * 0x63 trap reporting at the start of day. There is a
1141 * small window before we disable them, so let check for
1142 * it. Otherwise, panic.
1143 */
1144
1145	.align	128
1146	ENTRY_NP(ce_err)
1147	mov	AFSR_ECR, %g1
1148	ldxa	[%g1]ASI_ECR, %g1
1149	andcc	%g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
1150	bz,pn	%xcc, 1f
1151	 nop
1152	retry
11531:
1154	/*
1155	 * We did disabled the 0x63 trap reporting.
1156	 * This shouldn't happen - panic.
1157	 */
1158	set	trap, %g1
1159	rdpr	%tt, %g3
1160	sethi	%hi(sys_trap), %g5
1161	jmp	%g5 + %lo(sys_trap)
1162	sub	%g0, 1, %g4
1163	SET_SIZE(ce_err)
1164
1165#endif	/* lint */
1166
1167
1168#if defined(lint)
1169
1170void
1171ce_err_tl1(void)
1172{}
1173
1174#else	/* lint */
1175
1176/*
1177 * We don't use trap for CE detection.
1178 */
1179	ENTRY_NP(ce_err_tl1)
1180	set	trap, %g1
1181	rdpr	%tt, %g3
1182	sethi	%hi(sys_trap), %g5
1183	jmp	%g5 + %lo(sys_trap)
1184	sub	%g0, 1, %g4
1185	SET_SIZE(ce_err_tl1)
1186
1187#endif	/* lint */
1188
1189
1190#if defined(lint)
1191
1192void
1193async_err(void)
1194{}
1195
1196#else	/* lint */
1197
1198/*
1199 * async_err is the default handler for IAE/DAE traps.
1200 * For OPL, we patch in the right handler at start of day.
1201 * But if a IAE/DAE trap get generated before the handler
1202 * is patched, panic.
1203 */
1204	ENTRY_NP(async_err)
1205	set	trap, %g1
1206	rdpr	%tt, %g3
1207	sethi	%hi(sys_trap), %g5
1208	jmp	%g5 + %lo(sys_trap)
1209	sub	%g0, 1, %g4
1210	SET_SIZE(async_err)
1211
1212#endif	/* lint */
1213
1214#if defined(lint)
1215void
1216opl_sync_trap(void)
1217{}
1218#else	/* lint */
1219
1220	.seg	".data"
1221	.global	opl_clr_freg
1222	.global opl_cpu0_err_log
1223
1224	.align	16
1225opl_clr_freg:
1226	.word	0
1227	.align	16
1228
1229	.align	MMU_PAGESIZE
1230opl_cpu0_err_log:
1231	.skip	MMU_PAGESIZE
1232
1233/*
1234 * Common synchronous error trap handler (tt=0xA, 0x32)
1235 * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1236 * The error handling can be best summarized as follows:
1237 * 0. Do TRAPTRACE if enabled.
1238 * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1239 * 2. The SFSR register is read and verified as valid by checking
1240 *    SFSR.FV bit being set. If the SFSR.FV is not set, the
1241 *    error cases cannot be decoded/determined and the SFPAR
1242 *    register that contain the physical faultaddr is also
1243 *    not valid. Also the SPFAR is only valid for UE/TO/BERR error
1244 *    cases. Assuming the SFSR.FV is valid:
1245 *    - BERR(bus error)/TO(timeout)/UE case
1246 *      If any of these error cases are detected, read the SFPAR
1247 *      to get the faultaddress. Generate ereport.
1248 *    - TLB Parity case (only recoverable case)
1249 *      For DAE, read SFAR for the faultaddress. For IAE,
1250 *	use %tpc for faultaddress (SFAR is not valid in IAE)
1251 *	Flush all the tlbs.
1252 *	Subtract one from the recoverable error count stored in
1253 *	the error log scratch register. If the threshold limit
1254 *	is reached (zero) - generate ereport. Else
1255 *	restore globals and retry (no ereport is generated).
1256 *    - TLB Multiple hits
1257 *	For DAE, read SFAR for the faultaddress. For IAE,
1258 *	use %tpc for faultaddress (SFAR is not valid in IAE).
1259 *	Flush all tlbs and generate ereport.
1260 * 3. TL=0 and TL>0 considerations
1261 *    - Since both TL=0 & TL>1 traps are made to vector into
1262 *      the same handler, the underlying assumption/design here is
1263 *      that any nested error condition (if happens) occurs only
1264 *	in the handler and the system is assumed to eventually
1265 *      Red-mode. With this philosophy in mind, the recoverable
1266 *      TLB Parity error case never check the TL level before it
1267 *      retry. Note that this is ok for the TL>1 case (assuming we
1268 *	don't have a nested error) since we always save the globals
1269 *      %g1, %g2 & %g3 whenever we enter this trap handler.
1270 *    - Additional TL=0 vs TL>1 handling includes:
1271 *      - For UE error occuring under TL>1, special handling
1272 *        is added to prevent the unlikely chance of a cpu-lockup
1273 *        when a UE was originally detected in user stack and
1274 *        the spill trap handler taken from sys_trap() so happened
1275 *        to reference the same UE location. Under the above
1276 *        condition (TL>1 and UE error), paranoid code is added
1277 *        to reset window regs so that spill traps can't happen
1278 *        during the unwind back to TL=0 handling.
1279 *        Note that we can do that because we are not returning
1280 *	  back.
1281 * 4. Ereport generation.
1282 *    - Ereport generation is performed when we unwind to the TL=0
1283 *      handling code via sys_trap(). on_trap()/lofault protection
1284 *      will apply there.
1285 *
1286 */
1287	ENTRY_NP(opl_sync_trap)
1288#ifdef	TRAPTRACE
1289	OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb)
1290	rdpr	%tt, %g1
1291#endif	/* TRAPTRACE */
1292	cmp	%g1, T_INSTR_ERROR
1293	bne,pt	%xcc, 0f
1294	 mov	MMU_SFSR, %g3
1295	ldxa	[%g3]ASI_IMMU, %g1	! IAE trap case tt = 0xa
1296	andcc	%g1, SFSR_FV, %g0
1297	bz,a,pn %xcc, 2f		! Branch if SFSR is invalid and
1298	 rdpr	%tpc, %g2		! use %tpc for faultaddr instead
1299
1300	sethi	%hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1301	andcc	%g1, %g3, %g0		! Check for UE/BERR/TO errors
1302	bz,a,pt %xcc, 1f		! Branch if not UE/BERR/TO and
1303	 rdpr	%tpc, %g2		! use %tpc as faultaddr
1304	set	OPL_MMU_SFPAR, %g3	! In the UE/BERR/TO cases, use
1305	ba,pt	%xcc, 2f		! SFPAR as faultaddr
1306	 ldxa	[%g3]ASI_IMMU, %g2
13070:
1308	ldxa	[%g3]ASI_DMMU, %g1	! DAE trap case tt = 0x32
1309	andcc	%g1, SFSR_FV, %g0
1310	bnz,pt  %xcc, 7f		! branch if SFSR.FV is valid
1311	 mov	MMU_SFAR, %g2		! set %g2 to use SFAR
1312	ba,pt	%xcc, 2f		! SFSR.FV is not valid, read SFAR
1313	 ldxa	[%g2]ASI_DMMU, %g2	! for faultaddr
13147:
1315	sethi  %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1316	andcc	%g1, %g3, %g0		! Check UE/BERR/TO for valid SFPAR
1317	movnz	%xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for
1318	ldxa	[%g2]ASI_DMMU, %g2	! faultaddr
13191:
1320	sethi	%hi(SFSR_TLB_PRT), %g3
1321	andcc	%g1, %g3, %g0
1322	bz,pt	%xcc, 8f		! branch for TLB multi-hit check
1323	 nop
1324	/*
1325	 * This is the TLB parity error case and it is the
1326	 * only retryable error case.
1327	 * Only %g1, %g2 and %g3 are allowed
1328	 */
1329	FLUSH_ALL_TLB(%g3)
1330	set	OPL_SCRATCHPAD_ERRLOG, %g3
1331	ldxa	[%g3]ASI_SCRATCHPAD, %g3		! Read errlog scratchreg
1332	and	%g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count
1333	subcc	%g3, 1, %g0			! Subtract one from the count
1334	bz,pn	%xcc, 2f		! too many TLB parity errs in a certain
1335	 nop				! period, branch to generate ereport
1336	LOG_SYNC_REG(%g1, %g2, %g3)	! Record into the error log
1337	set	OPL_SCRATCHPAD_ERRLOG, %g3
1338	ldxa	[%g3]ASI_SCRATCHPAD, %g2
1339	sub	%g2, 1, %g2		! decrement error counter by 1
1340	stxa	%g2, [%g3]ASI_SCRATCHPAD	! update the errlog scratchreg
1341	OPL_RESTORE_GLOBAL(%g1, %g2, %g3)
1342	retry
13438:
1344	sethi	%hi(SFSR_TLB_MUL), %g3
1345	andcc	%g1, %g3, %g0
1346	bz,pt	%xcc, 2f		! check for the TLB multi-hit errors
1347	 nop
1348	FLUSH_ALL_TLB(%g3)
13492:
1350	/*
1351	 * non-retryable error handling
1352	 * now we can use other registers since
1353	 * we will not be returning back
1354	 */
1355	mov	%g1, %g5		! %g5 = SFSR
1356	mov	%g2, %g6		! %g6 = SFPAR or SFAR/tpc
1357	LOG_SYNC_REG(%g1, %g2, %g3)	! Record into the error log
1358
1359	/*
1360	 * Special case for UE on user stack.
1361	 * There is a possibility that the same error may come back here
1362	 * by touching the same UE in spill trap handler taken from
1363	 * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
1364	 * Conditions for this handling this case are:
1365	 * - SFSR_FV is valid and SFSR_UE is set
1366	 * - we are at TL > 1
1367	 * If the above conditions are true,  we force %cansave to be a
1368	 * big number to prevent spill trap in sys_trap(). Note that
1369	 * we will not be returning back.
1370	 */
1371	rdpr	%tt, %g4		! %g4 == ttype
1372	rdpr	%tl, %g1		! %g1 == tl
1373	cmp	%g1, 1			! Check if TL == 1
1374	be,pt	%xcc, 3f		! branch if we came from TL=0
1375	 nop
1376	andcc	%g5, SFSR_FV, %g0	! see if SFSR.FV is valid
1377	bz,pn	%xcc, 4f		! branch, checking UE is meaningless
1378	sethi	%hi(SFSR_UE), %g2
1379	andcc	%g5, %g2, %g0		! check for UE
1380	bz,pt	%xcc, 4f		! branch if not UE
1381	 nop
1382	RESET_WINREG(%g1)		! reset windows to prevent spills
13834:
1384	RESET_USER_RTT_REGS(%g2, %g3, opl_sync_trap_resetskip)
1385opl_sync_trap_resetskip:
1386	mov	%g5, %g3		! pass SFSR to the 3rd arg
1387	mov	%g6, %g2		! pass SFAR to the 2nd arg
1388	set	opl_cpu_isync_tl1_error, %g1
1389	set	opl_cpu_dsync_tl1_error, %g6
1390	cmp	%g4, T_INSTR_ERROR
1391	movne	%icc, %g6, %g1
1392	ba,pt	%icc, 6f
1393	nop
13943:
1395	mov	%g5, %g3		! pass SFSR to the 3rd arg
1396	mov	%g6, %g2		! pass SFAR to the 2nd arg
1397	set	opl_cpu_isync_tl0_error, %g1
1398	set	opl_cpu_dsync_tl0_error, %g6
1399	cmp	%g4, T_INSTR_ERROR
1400	movne	%icc, %g6, %g1
14016:
1402	sethi	%hi(sys_trap), %g5
1403	jmp	%g5 + %lo(sys_trap)
1404	 mov	PIL_15, %g4
1405	SET_SIZE(opl_sync_trap)
1406#endif	/* lint */
1407
1408#if defined(lint)
1409void
1410opl_uger_trap(void)
1411{}
1412#else	/* lint */
1413/*
1414 * Common Urgent error trap handler (tt=0x40)
1415 * All TL=0 and TL>0 0x40 traps vector to this handler.
1416 * The error handling can be best summarized as follows:
1417 * 1. Read the Urgent error status register (UGERSR)
1418 *    Faultaddress is N/A here and it is not collected.
1419 * 2. Check to see if we have a multiple errors case
1420 *    If so, we enable WEAK_ED (weak error detection) bit
1421 *    to prevent any potential error storms and branch directly
1422 *    to generate ereport. (we don't decode/handle individual
1423 *    error cases when we get a multiple error situation)
1424 * 3. Now look for the recoverable error cases which include
1425 *    IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1426 *    recoverable errors are detected, do the following:
1427 *    - Flush all tlbs.
1428 *    - Verify that we came from TL=0, if not, generate
1429 *      ereport. Note that the reason we don't recover
1430 *      at TL>0 is because the AGs might be corrupted or
1431 *      inconsistent. We can't save/restore them into
1432 *      the scratchpad regs like we did for opl_sync_trap().
1433 *    - Check the INSTEND[5:4] bits in the UGERSR. If the
1434 *      value is 0x3 (11b), this error is not recoverable.
1435 *      Generate ereport.
1436 *    - Subtract one from the recoverable error count stored in
1437 *      the error log scratch register. If the threshold limit
1438 *      is reached (zero) - generate ereport.
1439 *    - If the count is within the limit, update the count
1440 *      in the error log register (subtract one). Log the error
1441 *      info in the log buffer. Capture traptrace if enabled.
1442 *      Retry (no ereport generated)
1443 * 4. The rest of the error cases are unrecoverable and will
1444 *    be handled according (flushing regs, etc as required).
1445 *    For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
1446 *    consult the OPL cpu/mem philosophy doc.
1447 *    Ereport will be generated for these errors.
1448 * 5. Ereport generation.
1449 *    - Ereport generation for urgent error trap always
1450 *      result in a panic when we unwind to the TL=0 handling
1451 *      code via sys_trap(). on_trap()/lofault protection do
1452 *      not apply there.
1453 */
1454	ENTRY_NP(opl_uger_trap)
1455	set	ASI_UGERSR, %g2
1456	ldxa	[%g2]ASI_AFSR, %g1		! Read the UGERSR reg
1457
1458	set	UGESR_MULTI, %g2
1459	andcc	%g1, %g2, %g0			! Check for Multi-errs
1460	bz,pt	%xcc, opl_uger_is_recover	! branch if not Multi-errs
1461	 nop
1462	set	AFSR_ECR, %g2
1463	ldxa	[%g2]ASI_AFSR, %g3		! Enable Weak error
1464	or	%g3, ASI_ECR_WEAK_ED, %g3	! detect mode to prevent
1465	stxa	%g3, [%g2]ASI_AFSR		! potential error storms
1466	ba	%xcc, opl_uger_panic1
1467	 nop
1468
1469opl_uger_is_recover:
1470	set	UGESR_CAN_RECOVER, %g2		! Check for recoverable
1471	andcc	%g1, %g2, %g0			! errors i.e.IUG_DTLB,
1472	bz,pt	%xcc, opl_uger_cre		! IUG_ITLB or COREERR
1473	 nop
1474
1475	/*
1476	 * Fall thru to handle recoverable case
1477	 * Need to do the following additional checks to determine
1478	 * if this is indeed recoverable.
1479	 * 1. Error trap came from TL=0 and
1480	 * 2. INSTEND[5:4] bits in UGERSR is not 0x3
1481	 * 3. Recoverable error count limit not reached
1482	 *
1483	 */
1484	FLUSH_ALL_TLB(%g3)
1485	rdpr	%tl, %g3		! Read TL
1486	cmp	%g3, 1			! Check if we came from TL=0
1487	bne,pt	%xcc, opl_uger_panic	! branch if came from TL>0
1488	 nop
1489	srlx	%g1, 4, %g2		! shift INSTEND[5:4] -> [1:0]
1490	and	%g2, 3, %g2		! extract the shifted [1:0] bits
1491	cmp	%g2, 3			! check if INSTEND is recoverable
1492	be,pt   %xcc, opl_uger_panic	! panic if ([1:0] = 11b)
1493	 nop
1494	set	OPL_SCRATCHPAD_ERRLOG, %g3
1495	ldxa	[%g3]ASI_SCRATCHPAD, %g2		! Read errlog scratch reg
1496	and	%g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and
1497	subcc	%g3, 1, %g3			! subtract one from it
1498	bz,pt   %xcc, opl_uger_panic	! If count reached zero, too many
1499	 nop				! errors, branch to generate ereport
1500	sub	%g2, 1, %g2			! Subtract one from the count
1501	set	OPL_SCRATCHPAD_ERRLOG, %g3	! and write back the updated
1502	stxa	%g2, [%g3]ASI_SCRATCHPAD		! count into the errlog reg
1503	LOG_UGER_REG(%g1, %g2, %g3)		! Log the error info
1504#ifdef	TRAPTRACE
1505	OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb)
1506#endif	/* TRAPTRACE */
1507	retry					! retry - no ereport
1508
1509	/*
1510	 * Process the rest of the unrecoverable error cases
1511	 * All error cases below ultimately branch to either
1512	 * opl_uger_panic or opl_uger_panic1.
1513	 * opl_uger_panic1 is the same as opl_uger_panic except
1514	 * for the additional execution of the RESET_TO_PRIV()
1515	 * macro that does a heavy handed reset. Read the
1516	 * comments for RESET_TO_PRIV() macro for more info.
1517	 */
1518opl_uger_cre:
1519	set	UGESR_IAUG_CRE, %g2
1520	andcc	%g1, %g2, %g0
1521	bz,pt	%xcc, opl_uger_ctxt
1522	 nop
1523	IAG_CRE(%g2, %g3)
1524	set	AFSR_ECR, %g2
1525	ldxa	[%g2]ASI_AFSR, %g3
1526	or	%g3, ASI_ECR_WEAK_ED, %g3
1527	stxa	%g3, [%g2]ASI_AFSR
1528	ba	%xcc, opl_uger_panic
1529	 nop
1530
1531opl_uger_ctxt:
1532	set	UGESR_IAUG_TSBCTXT, %g2
1533	andcc	%g1, %g2, %g0
1534	bz,pt	%xcc, opl_uger_tsbp
1535	 nop
1536	GET_CPU_IMPL(%g2)
1537	cmp	%g2, JUPITER_IMPL
1538	bne	%xcc, 1f
1539	  nop
1540	RESET_SHARED_CTXT(%g2)
15411:
1542	RESET_MMU_REGS(%g2, %g3, %g4)
1543	ba	%xcc, opl_uger_panic
1544	 nop
1545
1546opl_uger_tsbp:
1547	set	UGESR_IUG_TSBP, %g2
1548	andcc	%g1, %g2, %g0
1549	bz,pt	%xcc, opl_uger_pstate
1550	 nop
1551	GET_CPU_IMPL(%g2)
1552	cmp	%g2, JUPITER_IMPL
1553	bne	%xcc, 1f
1554	  nop
1555	RESET_TSB_PREFETCH(%g2)
15561:
1557	RESET_TSB_TAGPTR(%g2)
1558
1559	/*
1560	 * IUG_TSBP error may corrupt MMU registers
1561	 * Reset them here.
1562	 */
1563	RESET_MMU_REGS(%g2, %g3, %g4)
1564	ba	%xcc, opl_uger_panic
1565	 nop
1566
1567opl_uger_pstate:
1568	set	UGESR_IUG_PSTATE, %g2
1569	andcc	%g1, %g2, %g0
1570	bz,pt	%xcc, opl_uger_tstate
1571	 nop
1572	RESET_CUR_TSTATE(%g2)
1573	ba	%xcc, opl_uger_panic1
1574	 nop
1575
1576opl_uger_tstate:
1577	set	UGESR_IUG_TSTATE, %g2
1578	andcc	%g1, %g2, %g0
1579	bz,pt	%xcc, opl_uger_f
1580	 nop
1581	RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1)
1582	ba	%xcc, opl_uger_panic1
1583	 nop
1584
1585opl_uger_f:
1586	set	UGESR_IUG_F, %g2
1587	andcc	%g1, %g2, %g0
1588	bz,pt	%xcc, opl_uger_r
1589	 nop
1590	CLEAR_FPREGS(%g2)
1591	ba	%xcc, opl_uger_panic
1592	 nop
1593
1594opl_uger_r:
1595	set	UGESR_IUG_R, %g2
1596	andcc	%g1, %g2, %g0
1597	bz,pt	%xcc, opl_uger_panic1
1598	 nop
1599	CLEAR_GEN_REGS(%g2, opl_uger_r_1)
1600	ba	%xcc, opl_uger_panic1
1601	 nop
1602
1603opl_uger_panic:
1604	mov	%g1, %g2			! %g2 = arg #1
1605	LOG_UGER_REG(%g1, %g3, %g4)
1606	ba	%xcc, opl_uger_panic_cmn
1607	 nop
1608
1609opl_uger_panic1:
1610	mov	%g1, %g2			! %g2 = arg #1
1611	LOG_UGER_REG(%g1, %g3, %g4)
1612	RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1613
1614	/*
1615	 * Set up the argument for sys_trap.
1616	 * %g2 = arg #1 already set above
1617	 */
1618opl_uger_panic_cmn:
1619	RESET_USER_RTT_REGS(%g4, %g5, opl_uger_panic_resetskip)
1620opl_uger_panic_resetskip:
1621	rdpr	%tl, %g3			! arg #2
1622	set	opl_cpu_urgent_error, %g1	! pc
1623	sethi	%hi(sys_trap), %g5
1624	jmp	%g5 + %lo(sys_trap)
1625	 mov	PIL_15, %g4
1626	SET_SIZE(opl_uger_trap)
1627#endif	/* lint */
1628
1629#if defined(lint)
1630void
1631opl_ta3_trap(void)
1632{}
1633void
1634opl_cleanw_subr(void)
1635{}
1636#else	/* lint */
1637/*
1638 * OPL ta3 support (note please, that win_reg
1639 * area size for each cpu is 2^7 bytes)
1640 */
1641
1642#define	RESTORE_WREGS(tmp1, tmp2)		\
1643	CPU_INDEX(tmp1, tmp2)			;\
1644	sethi	%hi(opl_ta3_save), tmp2		;\
1645	ldx	[tmp2 +%lo(opl_ta3_save)], tmp2	;\
1646	sllx	tmp1, 7, tmp1			;\
1647	add	tmp2, tmp1, tmp2		;\
1648	ldx	[tmp2 + 0], %l0			;\
1649	ldx	[tmp2 + 8], %l1			;\
1650	ldx	[tmp2 + 16], %l2		;\
1651	ldx	[tmp2 + 24], %l3		;\
1652	ldx	[tmp2 + 32], %l4		;\
1653	ldx	[tmp2 + 40], %l5		;\
1654	ldx	[tmp2 + 48], %l6		;\
1655	ldx	[tmp2 + 56], %l7		;\
1656	ldx	[tmp2 + 64], %i0		;\
1657	ldx	[tmp2 + 72], %i1		;\
1658	ldx	[tmp2 + 80], %i2		;\
1659	ldx	[tmp2 + 88], %i3		;\
1660	ldx	[tmp2 + 96], %i4		;\
1661	ldx	[tmp2 + 104], %i5		;\
1662	ldx	[tmp2 + 112], %i6		;\
1663	ldx	[tmp2 + 120], %i7
1664
1665#define	SAVE_WREGS(tmp1, tmp2)			\
1666	CPU_INDEX(tmp1, tmp2)			;\
1667	sethi	%hi(opl_ta3_save), tmp2		;\
1668	ldx	[tmp2 +%lo(opl_ta3_save)], tmp2	;\
1669	sllx	tmp1, 7, tmp1			;\
1670	add	tmp2, tmp1, tmp2		;\
1671	stx	%l0, [tmp2 + 0] 		;\
1672	stx	%l1, [tmp2 + 8] 		;\
1673	stx	%l2, [tmp2 + 16] 		;\
1674	stx	%l3, [tmp2 + 24]		;\
1675	stx	%l4, [tmp2 + 32]		;\
1676	stx	%l5, [tmp2 + 40]		;\
1677	stx	%l6, [tmp2 + 48] 		;\
1678	stx	%l7, [tmp2 + 56]		;\
1679	stx	%i0, [tmp2 + 64]		;\
1680	stx	%i1, [tmp2 + 72]		;\
1681	stx	%i2, [tmp2 + 80]		;\
1682	stx	%i3, [tmp2 + 88]		;\
1683	stx	%i4, [tmp2 + 96]		;\
1684	stx	%i5, [tmp2 + 104]		;\
1685	stx	%i6, [tmp2 + 112]		;\
1686	stx	%i7, [tmp2 + 120]
1687
1688
1689/*
1690 * The purpose of this function is to make sure that the restore
1691 * instruction after the flushw does not cause a fill trap. The sun4u
1692 * fill trap handler can not handle a tlb fault of an unmapped stack
1693 * except at the restore instruction at user_rtt. On OPL systems the
1694 * stack can get unmapped between the flushw and restore instructions
1695 * since multiple strands share the tlb.
1696 */
1697	ENTRY_NP(opl_ta3_trap)
1698	set	trap, %g1
1699	mov	T_FLUSHW, %g3
1700	sub	%g0, 1, %g4
1701	rdpr	%cwp, %g5
1702	SAVE_WREGS(%g2, %g6)
1703	save
1704	flushw
1705	rdpr	%cwp, %g6
1706	wrpr	%g5, %cwp
1707	RESTORE_WREGS(%g2, %g5)
1708	wrpr	%g6, %cwp
1709	restored
1710	restore
1711
1712	ba,a    fast_trap_done
1713	SET_SIZE(opl_ta3_trap)
1714
1715	ENTRY_NP(opl_cleanw_subr)
1716	set	trap, %g1
1717	mov	T_FLUSHW, %g3
1718	sub	%g0, 1, %g4
1719	rdpr	%cwp, %g5
1720	SAVE_WREGS(%g2, %g6)
1721	save
1722	flushw
1723	rdpr	%cwp, %g6
1724	wrpr	%g5, %cwp
1725	RESTORE_WREGS(%g2, %g5)
1726	wrpr	%g6, %cwp
1727	restored
1728	restore
1729	jmp	%g7
1730	  nop
1731	SET_SIZE(opl_cleanw_subr)
1732#endif	/* lint */
1733
1734#if defined(lint)
1735
1736void
1737opl_serr_instr(void)
1738{}
1739
1740#else	/* lint */
1741/*
1742 * The actual trap handler for tt=0x0a, and tt=0x32
1743 */
1744	ENTRY_NP(opl_serr_instr)
1745	OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1746	sethi   %hi(opl_sync_trap), %g3
1747	jmp	%g3 + %lo(opl_sync_trap)
1748	 rdpr    %tt, %g1
1749	.align  32
1750	SET_SIZE(opl_serr_instr)
1751
1752#endif	/* lint */
1753
1754#if defined(lint)
1755
1756void
1757opl_ugerr_instr(void)
1758{}
1759
1760#else	/* lint */
1761/*
1762 * The actual trap handler for tt=0x40
1763 */
1764	ENTRY_NP(opl_ugerr_instr)
1765	sethi   %hi(opl_uger_trap), %g3
1766	jmp	%g3 + %lo(opl_uger_trap)
1767	 nop
1768	.align  32
1769	SET_SIZE(opl_ugerr_instr)
1770
1771#endif	/* lint */
1772
1773#if defined(lint)
1774
1775void
1776opl_ta3_instr(void)
1777{}
1778
1779#else	/* lint */
1780/*
1781 * The actual trap handler for tt=0x103 (flushw)
1782 */
1783	ENTRY_NP(opl_ta3_instr)
1784	sethi   %hi(opl_ta3_trap), %g3
1785	jmp	%g3 + %lo(opl_ta3_trap)
1786	 nop
1787	.align  32
1788	SET_SIZE(opl_ta3_instr)
1789
1790#endif	/* lint */
1791
1792#if defined(lint)
1793
1794void
1795opl_ta4_instr(void)
1796{}
1797
1798#else	/* lint */
1799/*
1800 * The patch for the .clean_windows code
1801 */
1802	ENTRY_NP(opl_ta4_instr)
1803	sethi   %hi(opl_cleanw_subr), %g3
1804	add	%g3, %lo(opl_cleanw_subr), %g3
1805	jmpl	%g3, %g7
1806	  add	%g7, 8, %g7
1807	nop
1808	nop
1809	nop
1810	SET_SIZE(opl_ta4_instr)
1811
1812#endif	/* lint */
1813
1814#if defined(lint)
1815/*
1816 *  Get timestamp (stick).
1817 */
1818/* ARGSUSED */
1819void
1820stick_timestamp(int64_t *ts)
1821{
1822}
1823
1824#else	/* lint */
1825
1826	ENTRY_NP(stick_timestamp)
1827	rd	STICK, %g1	! read stick reg
1828	sllx	%g1, 1, %g1
1829	srlx	%g1, 1, %g1	! clear npt bit
1830
1831	retl
1832	stx	%g1, [%o0]	! store the timestamp
1833	SET_SIZE(stick_timestamp)
1834
1835#endif	/* lint */
1836
1837
1838#if defined(lint)
1839/*
1840 * Set STICK adjusted by skew.
1841 */
1842/* ARGSUSED */
1843void
1844stick_adj(int64_t skew)
1845{
1846}
1847
1848#else	/* lint */
1849
1850	ENTRY_NP(stick_adj)
1851	rdpr	%pstate, %g1		! save processor state
1852	andn	%g1, PSTATE_IE, %g3
1853	ba	1f			! cache align stick adj
1854	wrpr	%g0, %g3, %pstate	! turn off interrupts
1855
1856	.align	16
18571:	nop
1858
1859	rd	STICK, %g4		! read stick reg
1860	add	%g4, %o0, %o1		! adjust stick with skew
1861	wr	%o1, %g0, STICK		! write stick reg
1862
1863	retl
1864	wrpr	%g1, %pstate		! restore processor state
1865	SET_SIZE(stick_adj)
1866
1867#endif	/* lint */
1868
1869#if defined(lint)
1870/*
1871 * Debugger-specific stick retrieval
1872 */
1873/*ARGSUSED*/
1874int
1875kdi_get_stick(uint64_t *stickp)
1876{
1877	return (0);
1878}
1879
1880#else	/* lint */
1881
1882	ENTRY_NP(kdi_get_stick)
1883	rd	STICK, %g1
1884	stx	%g1, [%o0]
1885	retl
1886	mov	%g0, %o0
1887	SET_SIZE(kdi_get_stick)
1888
1889#endif	/* lint */
1890
1891#if defined(lint)
1892
1893/*ARGSUSED*/
1894int
1895dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
1896{ return (0); }
1897
1898#else
1899
1900	ENTRY(dtrace_blksuword32)
1901	save	%sp, -SA(MINFRAME + 4), %sp
1902
1903	rdpr	%pstate, %l1
1904	andn	%l1, PSTATE_IE, %l2		! disable interrupts to
1905	wrpr	%g0, %l2, %pstate		! protect our FPU diddling
1906
1907	rd	%fprs, %l0
1908	andcc	%l0, FPRS_FEF, %g0
1909	bz,a,pt	%xcc, 1f			! if the fpu is disabled
1910	wr	%g0, FPRS_FEF, %fprs		! ... enable the fpu
1911
1912	st	%f0, [%fp + STACK_BIAS - 4]	! save %f0 to the stack
19131:
1914	set	0f, %l5
1915	/*
1916	 * We're about to write a block full or either total garbage
1917	 * (not kernel data, don't worry) or user floating-point data
1918	 * (so it only _looks_ like garbage).
1919	 */
1920	ld	[%i1], %f0			! modify the block
1921	membar	#Sync
1922	stn	%l5, [THREAD_REG + T_LOFAULT]	! set up the lofault handler
1923	stda	%d0, [%i0]ASI_BLK_COMMIT_S	! store the modified block
1924	membar	#Sync
1925	flush	%i0				! flush instruction pipeline
1926	stn	%g0, [THREAD_REG + T_LOFAULT]	! remove the lofault handler
1927
1928	bz,a,pt	%xcc, 1f
1929	wr	%g0, %l0, %fprs			! restore %fprs
1930
1931	ld	[%fp + STACK_BIAS - 4], %f0	! restore %f0
19321:
1933
1934	wrpr	%g0, %l1, %pstate		! restore interrupts
1935
1936	ret
1937	restore	%g0, %g0, %o0
1938
19390:
1940	membar	#Sync
1941	stn	%g0, [THREAD_REG + T_LOFAULT]	! remove the lofault handler
1942
1943	bz,a,pt	%xcc, 1f
1944	wr	%g0, %l0, %fprs			! restore %fprs
1945
1946	ld	[%fp + STACK_BIAS - 4], %f0	! restore %f0
19471:
1948
1949	wrpr	%g0, %l1, %pstate		! restore interrupts
1950
1951	/*
1952	 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1953	 * which deals with watchpoints. Otherwise, just return -1.
1954	 */
1955	brnz,pt	%i2, 1f
1956	nop
1957	ret
1958	restore	%g0, -1, %o0
19591:
1960	call	dtrace_blksuword32_err
1961	restore
1962
1963	SET_SIZE(dtrace_blksuword32)
1964#endif /* lint */
1965
1966#if defined(lint)
1967/*ARGSUSED*/
1968void
1969ras_cntr_reset(void *arg)
1970{
1971}
1972#else
1973	ENTRY_NP(ras_cntr_reset)
1974	set	OPL_SCRATCHPAD_ERRLOG, %o1
1975	ldxa	[%o1]ASI_SCRATCHPAD, %o0
1976	or	%o0, ERRLOG_REG_NUMERR_MASK, %o0
1977	retl
1978	 stxa	%o0, [%o1]ASI_SCRATCHPAD
1979	SET_SIZE(ras_cntr_reset)
1980#endif /* lint */
1981
1982#if defined(lint)
1983/* ARGSUSED */
1984void
1985opl_error_setup(uint64_t cpu_err_log_pa)
1986{
1987}
1988
1989#else	/* lint */
1990	ENTRY_NP(opl_error_setup)
1991	/*
1992	 * Initialize the error log scratchpad register
1993	 */
1994	ldxa	[%g0]ASI_EIDR, %o2
1995	sethi	%hi(ERRLOG_REG_EIDR_MASK), %o1
1996	or	%o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1997	and	%o2, %o1, %o3
1998	sllx	%o3, ERRLOG_REG_EIDR_SHIFT, %o2
1999	or	%o2, %o0, %o3
2000	or	%o3, ERRLOG_REG_NUMERR_MASK, %o0
2001	set	OPL_SCRATCHPAD_ERRLOG, %o1
2002	stxa	%o0, [%o1]ASI_SCRATCHPAD
2003	/*
2004	 * Disable all restrainable error traps
2005	 */
2006	mov	AFSR_ECR, %o1
2007	ldxa	[%o1]ASI_AFSR, %o0
2008	andn	%o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
2009	retl
2010	  stxa	%o0, [%o1]ASI_AFSR
2011	SET_SIZE(opl_error_setup)
2012#endif /* lint */
2013
2014#if defined(lint)
2015/* ARGSUSED */
2016void
2017opl_mpg_enable(void)
2018{
2019}
2020#else	/* lint */
2021	ENTRY_NP(opl_mpg_enable)
2022	/*
2023	 * Enable MMU translating multiple page sizes for
2024	 * sITLB and sDTLB.
2025	 */
2026        mov	LSU_MCNTL, %o0
2027        ldxa	[%o0] ASI_MCNTL, %o1
2028        or	%o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
2029	retl
2030          stxa	%o1, [%o0] ASI_MCNTL
2031	SET_SIZE(opl_mpg_enable)
2032#endif /* lint */
2033
2034#if	defined(lint)
2035/*
2036 * This function is called for each (enabled) CPU. We use it to
2037 * initialize error handling related registers.
2038 */
2039/*ARGSUSED*/
2040void
2041cpu_feature_init(void)
2042{}
2043#else	/* lint */
2044	ENTRY(cpu_feature_init)
2045	!
2046	! get the device_id and store the device_id
2047	! in the appropriate cpunodes structure
2048	! given the cpus index
2049	!
2050	CPU_INDEX(%o0, %o1)
2051	mulx %o0, CPU_NODE_SIZE, %o0
2052	set  cpunodes + DEVICE_ID, %o1
2053	ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
2054	stx  %o2, [%o0 + %o1]
2055	!
2056	! initialize CPU registers
2057	!
2058	ba	opl_cpu_reg_init
2059	nop
2060	SET_SIZE(cpu_feature_init)
2061#endif	/* lint */
2062
2063#if defined(lint)
2064
2065void
2066cpu_cleartickpnt(void)
2067{}
2068
2069#else	/* lint */
2070	/*
2071	 * Clear the NPT (non-privileged trap) bit in the %tick/%stick
2072	 * registers. In an effort to make the change in the
2073	 * tick/stick counter as consistent as possible, we disable
2074	 * all interrupts while we're changing the registers. We also
2075	 * ensure that the read and write instructions are in the same
2076	 * line in the instruction cache.
2077	 */
2078	ENTRY_NP(cpu_clearticknpt)
2079	rdpr	%pstate, %g1		/* save processor state */
2080	andn	%g1, PSTATE_IE, %g3	/* turn off */
2081	wrpr	%g0, %g3, %pstate	/*   interrupts */
2082	rdpr	%tick, %g2		/* get tick register */
2083	brgez,pn %g2, 1f		/* if NPT bit off, we're done */
2084	mov	1, %g3			/* create mask */
2085	sllx	%g3, 63, %g3		/*   for NPT bit */
2086	ba,a,pt	%xcc, 2f
2087	.align	8			/* Ensure rd/wr in same i$ line */
20882:
2089	rdpr	%tick, %g2		/* get tick register */
2090	wrpr	%g3, %g2, %tick		/* write tick register, */
2091					/*   clearing NPT bit   */
20921:
2093	rd	STICK, %g2		/* get stick register */
2094	brgez,pn %g2, 3f		/* if NPT bit off, we're done */
2095	mov	1, %g3			/* create mask */
2096	sllx	%g3, 63, %g3		/*   for NPT bit */
2097	ba,a,pt	%xcc, 4f
2098	.align	8			/* Ensure rd/wr in same i$ line */
20994:
2100	rd	STICK, %g2		/* get stick register */
2101	wr	%g3, %g2, STICK		/* write stick register, */
2102					/*   clearing NPT bit   */
21033:
2104	jmp	%g4 + 4
2105	wrpr	%g0, %g1, %pstate	/* restore processor state */
2106
2107	SET_SIZE(cpu_clearticknpt)
2108
2109#endif	/* lint */
2110
2111#if defined(lint)
2112
2113void
2114cpu_halt_cpu(void)
2115{}
2116
2117void
2118cpu_smt_pause(void)
2119{}
2120
2121#else	/* lint */
2122
2123	/*
2124	 * Halt the current strand with the suspend instruction.
2125	 * The compiler/asm currently does not support this suspend
2126	 * instruction mnemonic, use byte code for now.
2127	 */
2128	ENTRY_NP(cpu_halt_cpu)
2129	.word   0x81b01040
2130	retl
2131	nop
2132	SET_SIZE(cpu_halt_cpu)
2133
2134	/*
2135	 * Pause the current strand with the sleep instruction.
2136	 * The compiler/asm currently does not support this sleep
2137	 * instruction mnemonic, use byte code for now.
2138	 */
2139	ENTRY_NP(cpu_smt_pause)
2140	.word   0x81b01060
2141	retl
2142	nop
2143	SET_SIZE(cpu_smt_pause)
2144
2145#endif	/* lint */
2146