xref: /titanic_51/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s (revision 6f45ec7b0b964c3be967c4880e8867ac1e7763a5)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Assembly code support for the Cheetah+ module
27 */
28
29#pragma ident	"%Z%%M%	%I%	%E% SMI"
30
31#if !defined(lint)
32#include "assym.h"
33#endif	/* lint */
34
35#include <sys/asm_linkage.h>
36#include <sys/mmu.h>
37#include <vm/hat_sfmmu.h>
38#include <sys/machparam.h>
39#include <sys/machcpuvar.h>
40#include <sys/machthread.h>
41#include <sys/machtrap.h>
42#include <sys/privregs.h>
43#include <sys/asm_linkage.h>
44#include <sys/trap.h>
45#include <sys/cheetahregs.h>
46#include <sys/us3_module.h>
47#include <sys/xc_impl.h>
48#include <sys/intreg.h>
49#include <sys/async.h>
50#include <sys/clock.h>
51#include <sys/cheetahasm.h>
52
53#ifdef TRAPTRACE
54#include <sys/traptrace.h>
55#endif /* TRAPTRACE */
56
57#if !defined(lint)
58
59/* BEGIN CSTYLED */
60
61/*
62 * Cheetah+ version to reflush an Ecache line by index.
63 *
64 * By default we assume the Ecache is 2-way so we flush both
65 * ways. Even if the cache is direct-mapped no harm will come
66 * from performing the flush twice, apart from perhaps a performance
67 * penalty.
68 *
69 * XXX - scr2 not used.
70 */
71#define	ECACHE_REFLUSH_LINE(ec_set_size, index, scr2)			\
72	ldxa	[index]ASI_EC_DIAG, %g0;				\
73	ldxa	[index + ec_set_size]ASI_EC_DIAG, %g0;
74
75/*
76 * Cheetah+ version of ecache_flush_line.  Uses Cheetah+ Ecache Displacement
77 * Flush feature.
78 */
79#define	ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2)		\
80	sub	ec_set_size, 1, scr1;					\
81	and	physaddr, scr1, scr1;					\
82	set	CHP_ECACHE_IDX_DISP_FLUSH, scr2;			\
83	or	scr2, scr1, scr1;					\
84	ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
85
86/* END CSTYLED */
87
88/*
89 * Panther version to reflush a line from both the L2 cache and L3
90 * cache by the respective indexes. Flushes all ways of the line from
91 * each cache.
92 *
93 * l2_index	Index into the L2$ of the line to be flushed. This
94 *		register will not be modified by this routine.
95 * l3_index	Index into the L3$ of the line to be flushed. This
96 *		register will not be modified by this routine.
97 * scr2		scratch register.
98 * scr3		scratch register.
99 *
100 */
101#define	PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3)		\
102	set	PN_L2_MAX_SET, scr2;					\
103	set	PN_L2_SET_SIZE, scr3;					\
1041:									\
105	ldxa	[l2_index + scr2]ASI_L2_TAG, %g0;			\
106	cmp	scr2, %g0;						\
107	bg,a	1b;							\
108	  sub	scr2, scr3, scr2;					\
109	set	PN_L3_MAX_SET, scr2;					\
110	set	PN_L3_SET_SIZE, scr3;					\
1112:									\
112	ldxa	[l3_index + scr2]ASI_EC_DIAG, %g0;			\
113	cmp	scr2, %g0;						\
114	bg,a	2b;							\
115	  sub	scr2, scr3, scr2;
116
117
118/*
119 * Panther version of ecache_flush_line. Flushes the line corresponding
120 * to physaddr from both the L2 cache and the L3 cache.
121 *
122 * physaddr	Input: Physical address to flush.
123 *              Output: Physical address to flush (preserved).
124 * l2_idx_out	Input: scratch register.
125 *              Output: Index into the L2$ of the line to be flushed.
126 * l3_idx_out	Input: scratch register.
127 *              Output: Index into the L3$ of the line to be flushed.
128 * scr3		scratch register.
129 * scr4		scratch register.
130 *
131 */
132#define	PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4)	\
133	set	PN_L3_SET_SIZE, l2_idx_out;					\
134	sub	l2_idx_out, 1, l2_idx_out;					\
135	and	physaddr, l2_idx_out, l3_idx_out;				\
136	set	PN_L3_IDX_DISP_FLUSH, l2_idx_out;				\
137	or	l2_idx_out, l3_idx_out, l3_idx_out;				\
138	set	PN_L2_SET_SIZE, l2_idx_out;					\
139	sub	l2_idx_out, 1, l2_idx_out;					\
140	and	physaddr, l2_idx_out, l2_idx_out;				\
141	set	PN_L2_IDX_DISP_FLUSH, scr3;					\
142	or	l2_idx_out, scr3, l2_idx_out;					\
143	PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
144
145#endif	/* !lint */
146
147/*
148 * Fast ECC error at TL>0 handler
149 * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
150 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
151 * For a complete description of the Fast ECC at TL>0 handling see the
152 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
153 * us3_common_asm.s
154 */
155#if defined(lint)
156
157void
158fast_ecc_tl1_err(void)
159{}
160
161#else	/* lint */
162
163	.section ".text"
164	.align	64
165	ENTRY_NP(fast_ecc_tl1_err)
166
167	/*
168	 * This macro turns off the D$/I$ if they are on and saves their
169	 * original state in ch_err_tl1_tmp, saves all the %g registers in the
170	 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
171	 * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
172	 * point to the ch_err_tl1_data structure and the original D$/I$ state
173	 * will be saved in ch_err_tl1_tmp.  All %g registers except for %g1
174	 * will be available.
175	 */
176	CH_ERR_TL1_FECC_ENTER;
177
178	/*
179	 * Get the diagnostic logout data.  %g4 must be initialized to
180	 * current CEEN state, %g5 must point to logout structure in
181	 * ch_err_tl1_data_t.  %g3 will contain the nesting count upon
182	 * return.
183	 */
184	ldxa	[%g0]ASI_ESTATE_ERR, %g4
185	and	%g4, EN_REG_CEEN, %g4
186	add	%g1, CH_ERR_TL1_LOGOUT, %g5
187	DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
188
189	/*
190	 * If the logout nesting count is exceeded, we're probably
191	 * not making any progress, try to panic instead.
192	 */
193	cmp	%g3, CLO_NESTING_MAX
194	bge	fecc_tl1_err
195	  nop
196
197	/*
198	 * Save the current CEEN and NCEEN state in %g7 and turn them off
199	 * before flushing the Ecache.
200	 */
201	ldxa	[%g0]ASI_ESTATE_ERR, %g7
202	andn	%g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
203	stxa	%g5, [%g0]ASI_ESTATE_ERR
204	membar	#Sync
205
206	/*
207	 * Flush the Ecache, using the largest possible cache size with the
208	 * smallest possible line size since we can't get the actual sizes
209	 * from the cpu_node due to DTLB misses.
210	 */
211	PN_L2_FLUSHALL(%g3, %g4, %g5)
212
213	set	CH_ECACHE_MAX_SIZE, %g4
214	set	CH_ECACHE_MIN_LSIZE, %g5
215
216	GET_CPU_IMPL(%g6)
217	cmp	%g6, PANTHER_IMPL
218	bne	%xcc, 2f
219	  nop
220	set	PN_L3_SIZE, %g4
2212:
222	mov	%g6, %g3
223	CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
224
225	/*
226	 * Restore CEEN and NCEEN to the previous state.
227	 */
228	stxa	%g7, [%g0]ASI_ESTATE_ERR
229	membar	#Sync
230
231	/*
232	 * If we turned off the D$, then flush it and turn it back on.
233	 */
234	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
235	andcc	%g3, CH_ERR_TSTATE_DC_ON, %g0
236	bz	%xcc, 3f
237	  nop
238
239	/*
240	 * Flush the D$.
241	 */
242	ASM_LD(%g4, dcache_size)
243	ASM_LD(%g5, dcache_linesize)
244	CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
245
246	/*
247	 * Turn the D$ back on.
248	 */
249	ldxa	[%g0]ASI_DCU, %g3
250	or	%g3, DCU_DC, %g3
251	stxa	%g3, [%g0]ASI_DCU
252	membar	#Sync
2533:
254	/*
255	 * If we turned off the I$, then flush it and turn it back on.
256	 */
257	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
258	andcc	%g3, CH_ERR_TSTATE_IC_ON, %g0
259	bz	%xcc, 4f
260	  nop
261
262	/*
263	 * Flush the I$.  Panther has different I$ parameters, and we
264	 * can't access the logout I$ params without possibly generating
265	 * a MMU miss.
266	 */
267	GET_CPU_IMPL(%g6)
268	set	PN_ICACHE_SIZE, %g3
269	set	CH_ICACHE_SIZE, %g4
270	mov	CH_ICACHE_LSIZE, %g5
271	cmp	%g6, PANTHER_IMPL
272	movz	%xcc, %g3, %g4
273	movz	%xcc, PN_ICACHE_LSIZE, %g5
274	CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
275
276	/*
277	 * Turn the I$ back on.  Changing DCU_IC requires flush.
278	 */
279	ldxa	[%g0]ASI_DCU, %g3
280	or	%g3, DCU_IC, %g3
281	stxa	%g3, [%g0]ASI_DCU
282	flush	%g0
2834:
284
285#ifdef TRAPTRACE
286	/*
287	 * Get current trap trace entry physical pointer.
288	 */
289	CPU_INDEX(%g6, %g5)
290	sll	%g6, TRAPTR_SIZE_SHIFT, %g6
291	set	trap_trace_ctl, %g5
292	add	%g6, %g5, %g6
293	ld	[%g6 + TRAPTR_LIMIT], %g5
294	tst	%g5
295	be	%icc, skip_traptrace
296	  nop
297	ldx	[%g6 + TRAPTR_PBASE], %g5
298	ld	[%g6 + TRAPTR_OFFSET], %g4
299	add	%g5, %g4, %g5
300
301	/*
302	 * Create trap trace entry.
303	 */
304	rd	%asi, %g7
305	wr	%g0, TRAPTR_ASI, %asi
306	rd	STICK, %g4
307	stxa	%g4, [%g5 + TRAP_ENT_TICK]%asi
308	rdpr	%tl, %g4
309	stha	%g4, [%g5 + TRAP_ENT_TL]%asi
310	rdpr	%tt, %g4
311	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
312	rdpr	%tpc, %g4
313	stna	%g4, [%g5 + TRAP_ENT_TPC]%asi
314	rdpr	%tstate, %g4
315	stxa	%g4, [%g5 + TRAP_ENT_TSTATE]%asi
316	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
317	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
318	wr	%g0, %g7, %asi
319	ldxa	[%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
320	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
321	wr	%g0, TRAPTR_ASI, %asi
322	stna	%g3, [%g5 + TRAP_ENT_F1]%asi
323	stna	%g4, [%g5 + TRAP_ENT_F2]%asi
324	wr	%g0, %g7, %asi
325	ldxa	[%g1 + CH_ERR_TL1_AFAR]%asi, %g3
326	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4
327	wr	%g0, TRAPTR_ASI, %asi
328	stna	%g3, [%g5 + TRAP_ENT_F3]%asi
329	stna	%g4, [%g5 + TRAP_ENT_F4]%asi
330	wr	%g0, %g7, %asi
331
332	/*
333	 * Advance trap trace pointer.
334	 */
335	ld	[%g6 + TRAPTR_OFFSET], %g5
336	ld	[%g6 + TRAPTR_LIMIT], %g4
337	st	%g5, [%g6 + TRAPTR_LAST_OFFSET]
338	add	%g5, TRAP_ENT_SIZE, %g5
339	sub	%g4, TRAP_ENT_SIZE, %g4
340	cmp	%g5, %g4
341	movge	%icc, 0, %g5
342	st	%g5, [%g6 + TRAPTR_OFFSET]
343skip_traptrace:
344#endif	/* TRAPTRACE */
345
346	/*
347	 * If nesting count is not zero, skip all the AFSR/AFAR
348	 * handling and just do the necessary cache-flushing.
349	 */
350	ldxa	[%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
351	brnz	%g2, 6f
352	  nop
353
354	/*
355	 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
356	 * and panic since a UE will occur (on the retry) before the
357	 * UCU and WDU messages are enqueued.
358	 */
359	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
360	set	1, %g4
361	sllx	%g4, C_AFSR_UCU_SHIFT, %g4
362	btst	%g4, %g3		! UCU in original shadow AFSR?
363	bnz	%xcc, 5f
364	  mov	1, %g4
365	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
366	sllx	%g4, C_AFSR_L3_UCU_SHIFT, %g4
367	btst	%g4, %g3		! L3_UCU in original shadow AFSR_EXT?
368	bz	%xcc, 6f
369	  nop
3705:
371	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4	! original AFSR
372	ldxa	[%g0]ASI_AFSR, %g3	! current AFSR
373	or	%g3, %g4, %g3		! %g3 = original + current AFSR
374	set	1, %g4
375	sllx	%g4, C_AFSR_WDU_SHIFT, %g4
376	btst	%g4, %g3		! WDU in original or current AFSR?
377	bnz	%xcc, fecc_tl1_err
378	  nop
379
3806:
381	/*
382	 * We fall into this macro if we've successfully logged the error in
383	 * the ch_err_tl1_data structure and want the PIL15 softint to pick
384	 * it up and log it.  %g1 must point to the ch_err_tl1_data structure.
385	 * Restores the %g registers and issues retry.
386	 */
387	CH_ERR_TL1_EXIT;
388
389	/*
390	 * Establish panic exit label.
391	 */
392	CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
393
394	SET_SIZE(fast_ecc_tl1_err)
395
396#endif	/* lint */
397
398
399#if defined(lint)
400/*
401 * scrubphys - Pass in the aligned physical memory address
402 * that you want to scrub, along with the ecache set size.
403 *
404 *	1) Displacement flush the E$ line corresponding to %addr.
405 *	   The first ldxa guarantees that the %addr is no longer in
406 *	   M, O, or E (goes to I or S (if instruction fetch also happens).
407 *	2) "Write" the data using a CAS %addr,%g0,%g0.
408 *	   The casxa guarantees a transition from I to M or S to M.
409 *	3) Displacement flush the E$ line corresponding to %addr.
410 *	   The second ldxa pushes the M line out of the ecache, into the
411 *	   writeback buffers, on the way to memory.
412 *	4) The "membar #Sync" pushes the cache line out of the writeback
413 *	   buffers onto the bus, on the way to dram finally.
414 *
415 * This is a modified version of the algorithm suggested by Gary Lauterbach.
416 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
417 * as modified, but then we found out that for spitfire, if it misses in the
418 * E$ it will probably install as an M, but if it hits in the E$, then it
419 * will stay E, if the store doesn't happen. So the first displacement flush
420 * should ensure that the CAS will miss in the E$.  Arrgh.
421 */
422/* ARGSUSED */
423void
424scrubphys(uint64_t paddr, int ecache_set_size)
425{}
426
427#else	/* lint */
428	ENTRY(scrubphys)
429	rdpr	%pstate, %o4
430	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
431	wrpr	%o5, %g0, %pstate	! clear IE, AM bits
432
433	GET_CPU_IMPL(%o5)		! Panther Ecache is flushed differently
434	cmp	%o5, PANTHER_IMPL
435	bne	scrubphys_1
436	  nop
437	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
438	casxa	[%o0]ASI_MEM, %g0, %g0
439	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
440	b	scrubphys_2
441	  nop
442scrubphys_1:
443	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
444	casxa	[%o0]ASI_MEM, %g0, %g0
445	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
446scrubphys_2:
447	wrpr	%g0, %o4, %pstate	! restore earlier pstate register value
448
449	retl
450	membar	#Sync			! move the data out of the load buffer
451	SET_SIZE(scrubphys)
452
453#endif	/* lint */
454
455
456#if defined(lint)
457/*
458 * clearphys - Pass in the aligned physical memory address
459 * that you want to push out, as a ecache_linesize byte block of zeros,
460 * from the ecache zero-filled.
461 */
462/* ARGSUSED */
463void
464clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
465{
466}
467
468#else	/* lint */
469	ENTRY(clearphys)
470	/* turn off IE, AM bits */
471	rdpr	%pstate, %o4
472	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
473	wrpr	%o5, %g0, %pstate
474
475	/* turn off NCEEN */
476	ldxa	[%g0]ASI_ESTATE_ERR, %o5
477	andn	%o5, EN_REG_NCEEN, %o3
478	stxa	%o3, [%g0]ASI_ESTATE_ERR
479	membar	#Sync
480
481	/* zero the E$ line */
482clearphys_1:
483	subcc	%o2, 8, %o2
484	bge	clearphys_1
485	  stxa	%g0, [%o0 + %o2]ASI_MEM
486
487	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
488	cmp	%o3, PANTHER_IMPL
489	bne	clearphys_2
490	  nop
491	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
492	casxa	[%o0]ASI_MEM, %g0, %g0
493	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
494	b	clearphys_3
495	  nop
496clearphys_2:
497	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
498	casxa	[%o0]ASI_MEM, %g0, %g0
499	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
500clearphys_3:
501	/* clear the AFSR */
502	ldxa	[%g0]ASI_AFSR, %o1
503	stxa	%o1, [%g0]ASI_AFSR
504	membar	#Sync
505
506	/* turn NCEEN back on */
507	stxa	%o5, [%g0]ASI_ESTATE_ERR
508	membar	#Sync
509
510	/* return and re-enable IE and AM */
511	retl
512	  wrpr	%g0, %o4, %pstate
513	SET_SIZE(clearphys)
514
515#endif	/* lint */
516
517
518#if defined(lint)
519/*
520 * Cheetah+ Ecache displacement flush the specified line from the E$
521 *
522 * For Panther, this means flushing the specified line from both the
523 * L2 cache and L3 cache.
524 *
525 * Register usage:
526 *	%o0 - 64 bit physical address for flushing
527 *	%o1 - Ecache set size
528 */
529/*ARGSUSED*/
530void
531ecache_flush_line(uint64_t flushaddr, int ec_set_size)
532{
533}
534#else	/* lint */
535	ENTRY(ecache_flush_line)
536
537	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
538	cmp	%o3, PANTHER_IMPL
539	bne	ecache_flush_line_1
540	  nop
541
542	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
543	b	ecache_flush_line_2
544	  nop
545ecache_flush_line_1:
546	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
547ecache_flush_line_2:
548	retl
549	  nop
550	SET_SIZE(ecache_flush_line)
551#endif	/* lint */
552
553#if defined(lint)
554void
555set_afsr_ext(uint64_t afsr_ext)
556{
557	afsr_ext = afsr_ext;
558}
559#else /* lint */
560
561	ENTRY(set_afsr_ext)
562	set	ASI_AFSR_EXT_VA, %o1
563	stxa	%o0, [%o1]ASI_AFSR		! afsr_ext reg
564	membar	#Sync
565	retl
566	nop
567	SET_SIZE(set_afsr_ext)
568
569#endif /* lint */
570
571
572#if defined(lint)
573/*
574 * The CPU jumps here from the MMU exception handler if an ITLB parity
575 * error is detected and we are running on Panther.
576 *
577 * In this routine we collect diagnostic information and write it to our
578 * logout structure (if possible) and clear all ITLB entries that may have
579 * caused our parity trap.
580 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
581 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
582 * send two:
583 *
584 * %g2	- Contains the VA whose lookup in the ITLB caused the parity error
585 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
586 *	  regardless of whether or not we actually used the logout struct.
587 *
588 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
589 * parameters to the data contained in the logout structure in order to
590 * determine whether the logout information is valid for this particular
591 * error or not.
592 */
593void
594itlb_parity_trap(void)
595{}
596
597#else	/* lint */
598
599	ENTRY_NP(itlb_parity_trap)
600	/*
601	 * Collect important information about the trap which will be
602	 * used as a parameter to the TL0 handler.
603	 */
604	wr	%g0, ASI_IMMU, %asi
605	rdpr	%tpc, %g2			! VA that caused the IMMU trap
606	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page size
607	set	PN_ITLB_PGSZ_MASK, %g4
608	and	%g3, %g4, %g3
609	ldxa	[MMU_TAG_ACCESS]%asi, %g4
610	set	TAGREAD_CTX_MASK, %g5
611	and	%g4, %g5, %g4
612	or	%g4, %g3, %g3			! 'or' in the trap context and
613	mov	1, %g4				! add the IMMU flag to complete
614	sllx	%g4, PN_TLO_INFO_IMMU_SHIFT, %g4
615	or	%g4, %g3, %g3			! the tlo_info field for logout
616	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
617	membar	#Sync
618
619	/*
620	 * at this point:
621	 *    %g2 - contains the VA whose lookup caused the trap
622	 *    %g3 - contains the tlo_info field
623	 *
624	 * Next, we calculate the TLB index value for the failing VA.
625	 */
626	mov	%g2, %g4			! We need the ITLB index
627	set	PN_ITLB_PGSZ_MASK, %g5
628	and	%g3, %g5, %g5
629	srlx	%g5, PN_ITLB_PGSZ_SHIFT, %g5
630	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the index
631	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
632	set	PN_ITLB_T512, %g5
633	or	%g4, %g5, %g4			! and add in the TLB ID
634
635	/*
636	 * at this point:
637	 *    %g2 - contains the VA whose lookup caused the trap
638	 *    %g3 - contains the tlo_info field
639	 *    %g4 - contains the TLB access index value for the
640	 *          VA/PgSz in question
641	 *
642	 * Check to see if the logout structure is available.
643	 */
644	set	CHPR_TLB_LOGOUT, %g6
645	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
646	set	LOGOUT_INVALID_U32, %g6
647	sllx	%g6, 32, %g6			! if our logout structure is
648	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
649	or	%g5, %g6, %g5			! already being used, then we
650	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
651	cmp	%g6, %g5			! information before clearing
652	bne	itlb_parity_trap_1		! and logging the error.
653	  nop
654
655	/*
656	 * Record the logout information. %g4 contains our index + TLB ID
657	 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
658	 * the pointer to our logout struct.
659	 */
660	stx	%g3, [%g1 + PN_TLO_INFO]
661	stx	%g2, [%g1 + PN_TLO_ADDR]
662	stx	%g2, [%g1 + PN_TLO_PC]		! %tpc == fault addr for IMMU
663
664	add	%g1, PN_TLO_ITLB_TTE, %g1	! move up the pointer
665
666	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
667	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
668	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
669	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
670
671	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
672	or	%g4, %g6, %g4
673	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
674
675	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
676	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
677	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
678	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
679
680	andn	%g4, %g6, %g4			! back to way 0
681
682itlb_parity_trap_1:
683	/*
684	 * at this point:
685	 *    %g2 - contains the VA whose lookup caused the trap
686	 *    %g3 - contains the tlo_info field
687	 *    %g4 - contains the TLB access index value for the
688	 *          VA/PgSz in question
689	 *
690	 * Here we will clear the errors from the TLB.
691	 */
692	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
693	stxa	%g0, [%g5]ASI_IMMU		! 0 as it will be invalid.
694	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write the data and tag
695	membar	#Sync
696
697	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
698	or	%g4, %g6, %g4
699
700	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write same data and tag
701	membar	#Sync
702
703	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
704	flush   %g6				! flush after writing MMU regs
705
706	/*
707	 * at this point:
708	 *    %g2 - contains the VA whose lookup caused the trap
709	 *    %g3 - contains the tlo_info field
710	 *
711	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
712	 * already at PIL 15.	 */
713	set	cpu_tlb_parity_error, %g1
714	rdpr	%pil, %g4
715	cmp	%g4, PIL_14
716	movl	%icc, PIL_14, %g4
717	ba	sys_trap
718	  nop
719	SET_SIZE(itlb_parity_trap)
720
721#endif	/* lint */
722
723#if defined(lint)
724/*
725 * The CPU jumps here from the MMU exception handler if a DTLB parity
726 * error is detected and we are running on Panther.
727 *
728 * In this routine we collect diagnostic information and write it to our
729 * logout structure (if possible) and clear all DTLB entries that may have
730 * caused our parity trap.
731 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
732 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
733 * send two:
734 *
735 * %g2	- Contains the VA whose lookup in the DTLB caused the parity error
736 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
737 *	  regardless of whether or not we actually used the logout struct.
738 *
739 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
740 * parameters to the data contained in the logout structure in order to
741 * determine whether the logout information is valid for this particular
742 * error or not.
743 */
744void
745dtlb_parity_trap(void)
746{}
747
748#else	/* lint */
749
750	ENTRY_NP(dtlb_parity_trap)
751	/*
752	 * Collect important information about the trap which will be
753	 * used as a parameter to the TL0 handler.
754	 */
755	wr	%g0, ASI_DMMU, %asi
756	ldxa	[MMU_SFAR]%asi, %g2		! VA that caused the IMMU trap
757	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page sizes
758	set	PN_DTLB_PGSZ_MASK, %g4
759	and	%g3, %g4, %g3
760	ldxa	[MMU_TAG_ACCESS]%asi, %g4
761	set	TAGREAD_CTX_MASK, %g5		! 'or' in the trap context
762	and	%g4, %g5, %g4			! to complete the tlo_info
763	or	%g4, %g3, %g3			! field for logout
764	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
765	membar	#Sync
766
767	/*
768	 * at this point:
769	 *    %g2 - contains the VA whose lookup caused the trap
770	 *    %g3 - contains the tlo_info field
771	 *
772	 * Calculate the TLB index values for the failing VA. Since the T512
773	 * TLBs can be configured for different page sizes, we need to find
774	 * the index into each one separately.
775	 */
776	mov	%g2, %g4			! First we get the DTLB_0 index
777	set	PN_DTLB_PGSZ0_MASK, %g5
778	and	%g3, %g5, %g5
779	srlx	%g5, PN_DTLB_PGSZ0_SHIFT, %g5
780	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the DTLB_0 index
781	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
782	set	PN_DTLB_T512_0, %g5
783	or	%g4, %g5, %g4			! and add in the TLB ID
784
785	mov	%g2, %g7			! Next we get the DTLB_1 index
786	set	PN_DTLB_PGSZ1_MASK, %g5
787	and	%g3, %g5, %g5
788	srlx	%g5, PN_DTLB_PGSZ1_SHIFT, %g5
789	PN_GET_TLB_INDEX(%g7, %g5)		! %g7 has the DTLB_1 index
790	sllx	%g7, PN_TLB_ACC_IDX_SHIFT, %g7	! shift the index into place
791	set	PN_DTLB_T512_1, %g5
792	or	%g7, %g5, %g7			! and add in the TLB ID
793
794	/*
795	 * at this point:
796	 *    %g2 - contains the VA whose lookup caused the trap
797	 *    %g3 - contains the tlo_info field
798	 *    %g4 - contains the T512_0 access index value for the
799	 *          VA/PgSz in question
800	 *    %g7 - contains the T512_1 access index value for the
801	 *          VA/PgSz in question
802	 *
803	 * If this trap happened at TL>0, then we don't want to mess
804	 * with the normal logout struct since that could caused a TLB
805	 * miss.
806	 */
807	rdpr	%tl, %g6			! read current trap level
808	cmp	%g6, 1				! skip over the tl>1 code
809	ble	dtlb_parity_trap_1		! if TL <= 1.
810	  nop
811
812	/*
813	 * If we are here, then the trap happened at TL>1. Simply
814	 * update our tlo_info field and then skip to the TLB flush
815	 * code.
816	 */
817	mov	1, %g6
818	sllx	%g6, PN_TLO_INFO_TL1_SHIFT, %g6
819	or	%g6, %g3, %g3
820	ba	dtlb_parity_trap_2
821	  nop
822
823dtlb_parity_trap_1:
824	/*
825	 * at this point:
826	 *    %g2 - contains the VA whose lookup caused the trap
827	 *    %g3 - contains the tlo_info field
828	 *    %g4 - contains the T512_0 access index value for the
829	 *          VA/PgSz in question
830	 *    %g7 - contains the T512_1 access index value for the
831	 *          VA/PgSz in question
832	 *
833	 * Check to see if the logout structure is available.
834	 */
835	set	CHPR_TLB_LOGOUT, %g6
836	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
837	set	LOGOUT_INVALID_U32, %g6
838	sllx	%g6, 32, %g6			! if our logout structure is
839	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
840	or	%g5, %g6, %g5			! already being used, then we
841	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
842	cmp	%g6, %g5			! information before clearing
843	bne	dtlb_parity_trap_2		! and logging the error.
844	  nop
845
846	/*
847	 * Record the logout information. %g4 contains our DTLB_0
848	 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
849	 * both of which will be used for ASI_DTLB_ACCESS and
850	 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
851	 * struct.
852	 */
853	stx	%g3, [%g1 + PN_TLO_INFO]
854	stx	%g2, [%g1 + PN_TLO_ADDR]
855	rdpr	%tpc, %g5
856	stx	%g5, [%g1 + PN_TLO_PC]
857
858	add	%g1, PN_TLO_DTLB_TTE, %g1	! move up the pointer
859
860	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
861	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 0 and store it away
862	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
863	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 0 and store it away
864
865	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 0
866	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
867	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
868	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
869
870	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
871	or	%g4, %g6, %g4			! of each TLB.
872	or	%g7, %g6, %g7
873	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
874
875	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
876	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 1 and store it away
877	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
878	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 1 and store it away
879
880	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 1
881	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
882	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
883	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
884
885	andn	%g4, %g6, %g4			! back to way 0
886	andn	%g7, %g6, %g7			! back to way 0
887
888dtlb_parity_trap_2:
889	/*
890	 * at this point:
891	 *    %g2 - contains the VA whose lookup caused the trap
892	 *    %g3 - contains the tlo_info field
893	 *    %g4 - contains the T512_0 access index value for the
894	 *          VA/PgSz in question
895	 *    %g7 - contains the T512_1 access index value for the
896	 *          VA/PgSz in question
897	 *
898	 * Here we will clear the errors from the DTLB.
899	 */
900	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
901	stxa	%g0, [%g5]ASI_DMMU		! 0 as it will be invalid.
902	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write the data and tag.
903	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
904	membar	#Sync
905
906	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
907	or	%g4, %g6, %g4
908	or	%g7, %g6, %g7
909
910	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write same data and tag.
911	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
912	membar	#Sync
913
914	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
915	flush   %g6				! flush after writing MMU regs
916
917	/*
918	 * at this point:
919	 *    %g2 - contains the VA whose lookup caused the trap
920	 *    %g3 - contains the tlo_info field
921	 *
922	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
923	 * already at PIL 15. We do this even for TL>1 traps since
924	 * those will lead to a system panic.
925	 */
926	set	cpu_tlb_parity_error, %g1
927	rdpr	%pil, %g4
928	cmp	%g4, PIL_14
929	movl	%icc, PIL_14, %g4
930	ba	sys_trap
931	  nop
932	SET_SIZE(dtlb_parity_trap)
933
934#endif	/* lint */
935
936
937#if defined(lint)
938/*
939 * Calculates the Panther TLB index based on a virtual address and page size
940 *
941 * Register usage:
942 *	%o0 - virtual address whose index we want
943 *	%o1 - Page Size of the TLB in question as encoded in the
944 *	      ASI_[D|I]MMU_TAG_ACCESS_EXT register.
945 */
946uint64_t
947pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
948{
949	return ((va + pg_sz)-(va + pg_sz));
950}
951#else	/* lint */
952	ENTRY(pn_get_tlb_index)
953
954	PN_GET_TLB_INDEX(%o0, %o1)
955
956	retl
957	  nop
958	SET_SIZE(pn_get_tlb_index)
959#endif	/* lint */
960
961
962#if defined(lint)
963/*
964 * For Panther CPUs we need to flush the IPB after any I$ or D$
965 * parity errors are detected.
966 */
967void
968flush_ipb(void)
969{ return; }
970
971#else	/* lint */
972
973	ENTRY(flush_ipb)
974	clr	%o0
975
976flush_ipb_1:
977	stxa	%g0, [%o0]ASI_IPB_TAG
978	membar	#Sync
979	cmp	%o0, PN_IPB_TAG_ADDR_MAX
980	blt	flush_ipb_1
981	  add	%o0, PN_IPB_TAG_ADDR_LINESIZE, 	%o0
982
983	sethi	%hi(FLUSH_ADDR), %o0
984	flush   %o0
985	retl
986	nop
987	SET_SIZE(flush_ipb)
988
989#endif	/* lint */
990