xref: /titanic_52/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s (revision c0dd49bdd68c0d758a67d56f07826f3b45cfc664)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Assembly code support for the Cheetah+ module
26 */
27
28#pragma ident	"%Z%%M%	%I%	%E% SMI"
29
30#if !defined(lint)
31#include "assym.h"
32#endif	/* lint */
33
34#include <sys/asm_linkage.h>
35#include <sys/mmu.h>
36#include <vm/hat_sfmmu.h>
37#include <sys/machparam.h>
38#include <sys/machcpuvar.h>
39#include <sys/machthread.h>
40#include <sys/machtrap.h>
41#include <sys/privregs.h>
42#include <sys/asm_linkage.h>
43#include <sys/trap.h>
44#include <sys/cheetahregs.h>
45#include <sys/us3_module.h>
46#include <sys/xc_impl.h>
47#include <sys/intreg.h>
48#include <sys/async.h>
49#include <sys/clock.h>
50#include <sys/cheetahasm.h>
51#include <sys/cmpregs.h>
52
53#ifdef TRAPTRACE
54#include <sys/traptrace.h>
55#endif /* TRAPTRACE */
56
57
58#if !defined(lint)
59
60/* BEGIN CSTYLED */
61
62/*
63 * Cheetah+ version to reflush an Ecache line by index.
64 *
65 * By default we assume the Ecache is 2-way so we flush both
66 * ways. Even if the cache is direct-mapped no harm will come
67 * from performing the flush twice, apart from perhaps a performance
68 * penalty.
69 *
70 * XXX - scr2 not used.
71 */
72#define	ECACHE_REFLUSH_LINE(ec_set_size, index, scr2)			\
73	ldxa	[index]ASI_EC_DIAG, %g0;				\
74	ldxa	[index + ec_set_size]ASI_EC_DIAG, %g0;
75
76/*
77 * Cheetah+ version of ecache_flush_line.  Uses Cheetah+ Ecache Displacement
78 * Flush feature.
79 */
80#define	ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2)		\
81	sub	ec_set_size, 1, scr1;					\
82	and	physaddr, scr1, scr1;					\
83	set	CHP_ECACHE_IDX_DISP_FLUSH, scr2;			\
84	or	scr2, scr1, scr1;					\
85	ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
86
87/* END CSTYLED */
88
89/*
90 * Panther version to reflush a line from both the L2 cache and L3
91 * cache by the respective indexes. Flushes all ways of the line from
92 * each cache.
93 *
94 * l2_index	Index into the L2$ of the line to be flushed. This
95 *		register will not be modified by this routine.
96 * l3_index	Index into the L3$ of the line to be flushed. This
97 *		register will not be modified by this routine.
98 * scr2		scratch register.
99 * scr3		scratch register.
100 *
101 */
102#define	PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3)		\
103	set	PN_L2_MAX_SET, scr2;					\
104	set	PN_L2_SET_SIZE, scr3;					\
1051:									\
106	ldxa	[l2_index + scr2]ASI_L2_TAG, %g0;			\
107	cmp	scr2, %g0;						\
108	bg,a	1b;							\
109	  sub	scr2, scr3, scr2;					\
110	mov	6, scr2;						\
1117:									\
112	cmp	scr2, %g0;						\
113	bg,a	7b;							\
114	  sub	scr2, 1, scr2;						\
115	set	PN_L3_MAX_SET, scr2;					\
116	set	PN_L3_SET_SIZE, scr3;					\
1172:									\
118	ldxa	[l3_index + scr2]ASI_EC_DIAG, %g0;			\
119	cmp	scr2, %g0;						\
120	bg,a	2b;							\
121	  sub	scr2, scr3, scr2;
122
123/*
124 * Panther version of ecache_flush_line. Flushes the line corresponding
125 * to physaddr from both the L2 cache and the L3 cache.
126 *
127 * physaddr	Input: Physical address to flush.
128 *              Output: Physical address to flush (preserved).
129 * l2_idx_out	Input: scratch register.
130 *              Output: Index into the L2$ of the line to be flushed.
131 * l3_idx_out	Input: scratch register.
132 *              Output: Index into the L3$ of the line to be flushed.
133 * scr3		scratch register.
134 * scr4		scratch register.
135 *
136 */
137#define	PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4)	\
138	set	PN_L3_SET_SIZE, l2_idx_out;					\
139	sub	l2_idx_out, 1, l2_idx_out;					\
140	and	physaddr, l2_idx_out, l3_idx_out;				\
141	set	PN_L3_IDX_DISP_FLUSH, l2_idx_out;				\
142	or	l2_idx_out, l3_idx_out, l3_idx_out;				\
143	set	PN_L2_SET_SIZE, l2_idx_out;					\
144	sub	l2_idx_out, 1, l2_idx_out;					\
145	and	physaddr, l2_idx_out, l2_idx_out;				\
146	set	PN_L2_IDX_DISP_FLUSH, scr3;					\
147	or	l2_idx_out, scr3, l2_idx_out;					\
148	PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
149
150#endif	/* !lint */
151
152/*
153 * Fast ECC error at TL>0 handler
154 * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
155 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
156 * For a complete description of the Fast ECC at TL>0 handling see the
157 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
158 * us3_common_asm.s
159 */
160#if defined(lint)
161
162void
163fast_ecc_tl1_err(void)
164{}
165
166#else	/* lint */
167
168	.section ".text"
169	.align	64
170	ENTRY_NP(fast_ecc_tl1_err)
171
172	/*
173	 * This macro turns off the D$/I$ if they are on and saves their
174	 * original state in ch_err_tl1_tmp, saves all the %g registers in the
175	 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
176	 * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
177	 * point to the ch_err_tl1_data structure and the original D$/I$ state
178	 * will be saved in ch_err_tl1_tmp.  All %g registers except for %g1
179	 * will be available.
180	 */
181	CH_ERR_TL1_FECC_ENTER;
182
183	/*
184	 * Get the diagnostic logout data.  %g4 must be initialized to
185	 * current CEEN state, %g5 must point to logout structure in
186	 * ch_err_tl1_data_t.  %g3 will contain the nesting count upon
187	 * return.
188	 */
189	ldxa	[%g0]ASI_ESTATE_ERR, %g4
190	and	%g4, EN_REG_CEEN, %g4
191	add	%g1, CH_ERR_TL1_LOGOUT, %g5
192	DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
193
194	/*
195	 * If the logout nesting count is exceeded, we're probably
196	 * not making any progress, try to panic instead.
197	 */
198	cmp	%g3, CLO_NESTING_MAX
199	bge	fecc_tl1_err
200	  nop
201
202	/*
203	 * Save the current CEEN and NCEEN state in %g7 and turn them off
204	 * before flushing the Ecache.
205	 */
206	ldxa	[%g0]ASI_ESTATE_ERR, %g7
207	andn	%g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
208	stxa	%g5, [%g0]ASI_ESTATE_ERR
209	membar	#Sync
210
211	/*
212	 * Flush the Ecache, using the largest possible cache size with the
213	 * smallest possible line size since we can't get the actual sizes
214	 * from the cpu_node due to DTLB misses.
215	 */
216	PN_L2_FLUSHALL(%g3, %g4, %g5)
217
218	set	CH_ECACHE_MAX_SIZE, %g4
219	set	CH_ECACHE_MIN_LSIZE, %g5
220
221	GET_CPU_IMPL(%g6)
222	cmp	%g6, PANTHER_IMPL
223	bne	%xcc, 2f
224	  nop
225	set	PN_L3_SIZE, %g4
2262:
227	mov	%g6, %g3
228	CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
229
230	/*
231	 * Restore CEEN and NCEEN to the previous state.
232	 */
233	stxa	%g7, [%g0]ASI_ESTATE_ERR
234	membar	#Sync
235
236	/*
237	 * If we turned off the D$, then flush it and turn it back on.
238	 */
239	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
240	andcc	%g3, CH_ERR_TSTATE_DC_ON, %g0
241	bz	%xcc, 3f
242	  nop
243
244	/*
245	 * Flush the D$.
246	 */
247	ASM_LD(%g4, dcache_size)
248	ASM_LD(%g5, dcache_linesize)
249	CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
250
251	/*
252	 * Turn the D$ back on.
253	 */
254	ldxa	[%g0]ASI_DCU, %g3
255	or	%g3, DCU_DC, %g3
256	stxa	%g3, [%g0]ASI_DCU
257	membar	#Sync
2583:
259	/*
260	 * If we turned off the I$, then flush it and turn it back on.
261	 */
262	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
263	andcc	%g3, CH_ERR_TSTATE_IC_ON, %g0
264	bz	%xcc, 4f
265	  nop
266
267	/*
268	 * Flush the I$.  Panther has different I$ parameters, and we
269	 * can't access the logout I$ params without possibly generating
270	 * a MMU miss.
271	 */
272	GET_CPU_IMPL(%g6)
273	set	PN_ICACHE_SIZE, %g3
274	set	CH_ICACHE_SIZE, %g4
275	mov	CH_ICACHE_LSIZE, %g5
276	cmp	%g6, PANTHER_IMPL
277	movz	%xcc, %g3, %g4
278	movz	%xcc, PN_ICACHE_LSIZE, %g5
279	CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
280
281	/*
282	 * Turn the I$ back on.  Changing DCU_IC requires flush.
283	 */
284	ldxa	[%g0]ASI_DCU, %g3
285	or	%g3, DCU_IC, %g3
286	stxa	%g3, [%g0]ASI_DCU
287	flush	%g0
2884:
289
290#ifdef TRAPTRACE
291	/*
292	 * Get current trap trace entry physical pointer.
293	 */
294	CPU_INDEX(%g6, %g5)
295	sll	%g6, TRAPTR_SIZE_SHIFT, %g6
296	set	trap_trace_ctl, %g5
297	add	%g6, %g5, %g6
298	ld	[%g6 + TRAPTR_LIMIT], %g5
299	tst	%g5
300	be	%icc, skip_traptrace
301	  nop
302	ldx	[%g6 + TRAPTR_PBASE], %g5
303	ld	[%g6 + TRAPTR_OFFSET], %g4
304	add	%g5, %g4, %g5
305
306	/*
307	 * Create trap trace entry.
308	 */
309	rd	%asi, %g7
310	wr	%g0, TRAPTR_ASI, %asi
311	rd	STICK, %g4
312	stxa	%g4, [%g5 + TRAP_ENT_TICK]%asi
313	rdpr	%tl, %g4
314	stha	%g4, [%g5 + TRAP_ENT_TL]%asi
315	rdpr	%tt, %g4
316	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
317	rdpr	%tpc, %g4
318	stna	%g4, [%g5 + TRAP_ENT_TPC]%asi
319	rdpr	%tstate, %g4
320	stxa	%g4, [%g5 + TRAP_ENT_TSTATE]%asi
321	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
322	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
323	wr	%g0, %g7, %asi
324	ldxa	[%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
325	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
326	wr	%g0, TRAPTR_ASI, %asi
327	stna	%g3, [%g5 + TRAP_ENT_F1]%asi
328	stna	%g4, [%g5 + TRAP_ENT_F2]%asi
329	wr	%g0, %g7, %asi
330	ldxa	[%g1 + CH_ERR_TL1_AFAR]%asi, %g3
331	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4
332	wr	%g0, TRAPTR_ASI, %asi
333	stna	%g3, [%g5 + TRAP_ENT_F3]%asi
334	stna	%g4, [%g5 + TRAP_ENT_F4]%asi
335	wr	%g0, %g7, %asi
336
337	/*
338	 * Advance trap trace pointer.
339	 */
340	ld	[%g6 + TRAPTR_OFFSET], %g5
341	ld	[%g6 + TRAPTR_LIMIT], %g4
342	st	%g5, [%g6 + TRAPTR_LAST_OFFSET]
343	add	%g5, TRAP_ENT_SIZE, %g5
344	sub	%g4, TRAP_ENT_SIZE, %g4
345	cmp	%g5, %g4
346	movge	%icc, 0, %g5
347	st	%g5, [%g6 + TRAPTR_OFFSET]
348skip_traptrace:
349#endif	/* TRAPTRACE */
350
351	/*
352	 * If nesting count is not zero, skip all the AFSR/AFAR
353	 * handling and just do the necessary cache-flushing.
354	 */
355	ldxa	[%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
356	brnz	%g2, 6f
357	  nop
358
359	/*
360	 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
361	 * and panic since a UE will occur (on the retry) before the
362	 * UCU and WDU messages are enqueued.  On a Panther processor,
363	 * we need to also see an L3_WDU before panicking.  Note that
364	 * we avoid accessing the _EXT ASIs if not on a Panther.
365	 */
366	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
367	set	1, %g4
368	sllx	%g4, C_AFSR_UCU_SHIFT, %g4
369	btst	%g4, %g3		! UCU in original shadow AFSR?
370	bnz	%xcc, 5f
371	  nop
372	GET_CPU_IMPL(%g6)
373	cmp	%g6, PANTHER_IMPL
374	bne	%xcc, 6f		! not Panther, no UCU, skip the rest
375	  nop
376	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
377	btst	C_AFSR_L3_UCU, %g3	! L3_UCU in original shadow AFSR_EXT?
378	bz	%xcc, 6f		! neither UCU nor L3_UCU was seen
379	  nop
3805:
381	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4	! original AFSR
382	ldxa	[%g0]ASI_AFSR, %g3	! current AFSR
383	or	%g3, %g4, %g3		! %g3 = original + current AFSR
384	set	1, %g4
385	sllx	%g4, C_AFSR_WDU_SHIFT, %g4
386	btst	%g4, %g3		! WDU in original or current AFSR?
387	bz	%xcc, 6f                ! no WDU, skip remaining tests
388	  nop
389	GET_CPU_IMPL(%g6)
390	cmp	%g6, PANTHER_IMPL
391	bne	%xcc, fecc_tl1_err	! if not Panther, panic (saw UCU, WDU)
392	  nop
393	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g4 ! original AFSR_EXT
394	set	ASI_AFSR_EXT_VA, %g6	! ASI of current AFSR_EXT
395	ldxa	[%g6]ASI_AFSR, %g3	! value of current AFSR_EXT
396	or	%g3, %g4, %g3		! %g3 = original + current AFSR_EXT
397	btst	C_AFSR_L3_WDU, %g3	! L3_WDU in original or current AFSR?
398	bnz	%xcc, fecc_tl1_err	! panic (saw L3_WDU and UCU or L3_UCU)
399	  nop
4006:
401	/*
402	 * We fall into this macro if we've successfully logged the error in
403	 * the ch_err_tl1_data structure and want the PIL15 softint to pick
404	 * it up and log it.  %g1 must point to the ch_err_tl1_data structure.
405	 * Restores the %g registers and issues retry.
406	 */
407	CH_ERR_TL1_EXIT;
408
409	/*
410	 * Establish panic exit label.
411	 */
412	CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
413
414	SET_SIZE(fast_ecc_tl1_err)
415
416#endif	/* lint */
417
418
419#if defined(lint)
420/*
421 * scrubphys - Pass in the aligned physical memory address
422 * that you want to scrub, along with the ecache set size.
423 *
424 *	1) Displacement flush the E$ line corresponding to %addr.
425 *	   The first ldxa guarantees that the %addr is no longer in
426 *	   M, O, or E (goes to I or S (if instruction fetch also happens).
427 *	2) "Write" the data using a CAS %addr,%g0,%g0.
428 *	   The casxa guarantees a transition from I to M or S to M.
429 *	3) Displacement flush the E$ line corresponding to %addr.
430 *	   The second ldxa pushes the M line out of the ecache, into the
431 *	   writeback buffers, on the way to memory.
432 *	4) The "membar #Sync" pushes the cache line out of the writeback
433 *	   buffers onto the bus, on the way to dram finally.
434 *
435 * This is a modified version of the algorithm suggested by Gary Lauterbach.
436 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
437 * as modified, but then we found out that for spitfire, if it misses in the
438 * E$ it will probably install as an M, but if it hits in the E$, then it
439 * will stay E, if the store doesn't happen. So the first displacement flush
440 * should ensure that the CAS will miss in the E$.  Arrgh.
441 */
442/* ARGSUSED */
443void
444scrubphys(uint64_t paddr, int ecache_set_size)
445{}
446
447#else	/* lint */
448	ENTRY(scrubphys)
449	rdpr	%pstate, %o4
450	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
451	wrpr	%o5, %g0, %pstate	! clear IE, AM bits
452
453	GET_CPU_IMPL(%o5)		! Panther Ecache is flushed differently
454	cmp	%o5, PANTHER_IMPL
455	bne	scrubphys_1
456	  nop
457	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
458	casxa	[%o0]ASI_MEM, %g0, %g0
459	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
460	b	scrubphys_2
461	  nop
462scrubphys_1:
463	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
464	casxa	[%o0]ASI_MEM, %g0, %g0
465	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
466scrubphys_2:
467	wrpr	%g0, %o4, %pstate	! restore earlier pstate register value
468
469	retl
470	membar	#Sync			! move the data out of the load buffer
471	SET_SIZE(scrubphys)
472
473#endif	/* lint */
474
475
476#if defined(lint)
477/*
478 * clearphys - Pass in the physical memory address of the checkblock
479 * that you want to push out, cleared with a recognizable pattern,
480 * from the ecache.
481 *
482 * To ensure that the ecc gets recalculated after the bad data is cleared,
483 * we must write out enough data to fill the w$ line (64 bytes). So we read
484 * in an entire ecache subblock's worth of data, and write it back out.
485 * Then we overwrite the 16 bytes of bad data with the pattern.
486 */
487/* ARGSUSED */
488void
489clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
490{
491}
492
493#else	/* lint */
494	ENTRY(clearphys)
495	/* turn off IE, AM bits */
496	rdpr	%pstate, %o4
497	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
498	wrpr	%o5, %g0, %pstate
499
500	/* turn off NCEEN */
501	ldxa	[%g0]ASI_ESTATE_ERR, %o5
502	andn	%o5, EN_REG_NCEEN, %o3
503	stxa	%o3, [%g0]ASI_ESTATE_ERR
504	membar	#Sync
505
506	/* align address passed with 64 bytes subblock size */
507	mov	CH_ECACHE_SUBBLK_SIZE, %o2
508	andn	%o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
509
510	/* move the good data into the W$ */
511clearphys_1:
512	subcc	%o2, 8, %o2
513	ldxa	[%g1 + %o2]ASI_MEM, %g2
514	bge	clearphys_1
515	  stxa	%g2, [%g1 + %o2]ASI_MEM
516
517	/* now overwrite the bad data */
518	setx	0xbadecc00badecc01, %g1, %g2
519	stxa	%g2, [%o0]ASI_MEM
520	mov	8, %g1
521	stxa	%g2, [%o0 + %g1]ASI_MEM
522
523	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
524	cmp	%o3, PANTHER_IMPL
525	bne	clearphys_2
526	  nop
527	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
528	casxa	[%o0]ASI_MEM, %g0, %g0
529	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
530	b	clearphys_3
531	  nop
532clearphys_2:
533	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
534	casxa	[%o0]ASI_MEM, %g0, %g0
535	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
536clearphys_3:
537	/* clear the AFSR */
538	ldxa	[%g0]ASI_AFSR, %o1
539	stxa	%o1, [%g0]ASI_AFSR
540	membar	#Sync
541
542	/* turn NCEEN back on */
543	stxa	%o5, [%g0]ASI_ESTATE_ERR
544	membar	#Sync
545
546	/* return and re-enable IE and AM */
547	retl
548	  wrpr	%g0, %o4, %pstate
549	SET_SIZE(clearphys)
550
551#endif	/* lint */
552
553
554#if defined(lint)
555/*
556 * Cheetah+ Ecache displacement flush the specified line from the E$
557 *
558 * For Panther, this means flushing the specified line from both the
559 * L2 cache and L3 cache.
560 *
561 * Register usage:
562 *	%o0 - 64 bit physical address for flushing
563 *	%o1 - Ecache set size
564 */
565/*ARGSUSED*/
566void
567ecache_flush_line(uint64_t flushaddr, int ec_set_size)
568{
569}
570#else	/* lint */
571	ENTRY(ecache_flush_line)
572
573	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
574	cmp	%o3, PANTHER_IMPL
575	bne	ecache_flush_line_1
576	  nop
577
578	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
579	b	ecache_flush_line_2
580	  nop
581ecache_flush_line_1:
582	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
583ecache_flush_line_2:
584	retl
585	  nop
586	SET_SIZE(ecache_flush_line)
587#endif	/* lint */
588
589#if defined(lint)
590void
591set_afsr_ext(uint64_t afsr_ext)
592{
593	afsr_ext = afsr_ext;
594}
595#else /* lint */
596
597	ENTRY(set_afsr_ext)
598	set	ASI_AFSR_EXT_VA, %o1
599	stxa	%o0, [%o1]ASI_AFSR		! afsr_ext reg
600	membar	#Sync
601	retl
602	nop
603	SET_SIZE(set_afsr_ext)
604
605#endif /* lint */
606
607
608#if defined(lint)
609/*
610 * The CPU jumps here from the MMU exception handler if an ITLB parity
611 * error is detected and we are running on Panther.
612 *
613 * In this routine we collect diagnostic information and write it to our
614 * logout structure (if possible) and clear all ITLB entries that may have
615 * caused our parity trap.
616 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
617 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
618 * send two:
619 *
620 * %g2	- Contains the VA whose lookup in the ITLB caused the parity error
621 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
622 *	  regardless of whether or not we actually used the logout struct.
623 *
624 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
625 * parameters to the data contained in the logout structure in order to
626 * determine whether the logout information is valid for this particular
627 * error or not.
628 */
629void
630itlb_parity_trap(void)
631{}
632
633#else	/* lint */
634
635	ENTRY_NP(itlb_parity_trap)
636	/*
637	 * Collect important information about the trap which will be
638	 * used as a parameter to the TL0 handler.
639	 */
640	wr	%g0, ASI_IMMU, %asi
641	rdpr	%tpc, %g2			! VA that caused the IMMU trap
642	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page size
643	set	PN_ITLB_PGSZ_MASK, %g4
644	and	%g3, %g4, %g3
645	ldxa	[MMU_TAG_ACCESS]%asi, %g4
646	set	TAGREAD_CTX_MASK, %g5
647	and	%g4, %g5, %g4
648	or	%g4, %g3, %g3			! 'or' in the trap context and
649	mov	1, %g4				! add the IMMU flag to complete
650	sllx	%g4, PN_TLO_INFO_IMMU_SHIFT, %g4
651	or	%g4, %g3, %g3			! the tlo_info field for logout
652	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
653	membar	#Sync
654
655	/*
656	 * at this point:
657	 *    %g2 - contains the VA whose lookup caused the trap
658	 *    %g3 - contains the tlo_info field
659	 *
660	 * Next, we calculate the TLB index value for the failing VA.
661	 */
662	mov	%g2, %g4			! We need the ITLB index
663	set	PN_ITLB_PGSZ_MASK, %g5
664	and	%g3, %g5, %g5
665	srlx	%g5, PN_ITLB_PGSZ_SHIFT, %g5
666	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the index
667	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
668	set	PN_ITLB_T512, %g5
669	or	%g4, %g5, %g4			! and add in the TLB ID
670
671	/*
672	 * at this point:
673	 *    %g2 - contains the VA whose lookup caused the trap
674	 *    %g3 - contains the tlo_info field
675	 *    %g4 - contains the TLB access index value for the
676	 *          VA/PgSz in question
677	 *
678	 * Check to see if the logout structure is available.
679	 */
680	set	CHPR_TLB_LOGOUT, %g6
681	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
682	set	LOGOUT_INVALID_U32, %g6
683	sllx	%g6, 32, %g6			! if our logout structure is
684	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
685	or	%g5, %g6, %g5			! already being used, then we
686	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
687	cmp	%g6, %g5			! information before clearing
688	bne	itlb_parity_trap_1		! and logging the error.
689	  nop
690
691	/*
692	 * Record the logout information. %g4 contains our index + TLB ID
693	 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
694	 * the pointer to our logout struct.
695	 */
696	stx	%g3, [%g1 + PN_TLO_INFO]
697	stx	%g2, [%g1 + PN_TLO_ADDR]
698	stx	%g2, [%g1 + PN_TLO_PC]		! %tpc == fault addr for IMMU
699
700	add	%g1, PN_TLO_ITLB_TTE, %g1	! move up the pointer
701
702	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
703	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
704	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
705	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
706
707	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
708	or	%g4, %g6, %g4
709	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
710
711	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
712	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
713	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
714	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
715
716	andn	%g4, %g6, %g4			! back to way 0
717
718itlb_parity_trap_1:
719	/*
720	 * at this point:
721	 *    %g2 - contains the VA whose lookup caused the trap
722	 *    %g3 - contains the tlo_info field
723	 *    %g4 - contains the TLB access index value for the
724	 *          VA/PgSz in question
725	 *
726	 * Here we will clear the errors from the TLB.
727	 */
728	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
729	stxa	%g0, [%g5]ASI_IMMU		! 0 as it will be invalid.
730	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write the data and tag
731	membar	#Sync
732
733	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
734	or	%g4, %g6, %g4
735
736	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write same data and tag
737	membar	#Sync
738
739	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
740	flush   %g6				! flush after writing MMU regs
741
742	/*
743	 * at this point:
744	 *    %g2 - contains the VA whose lookup caused the trap
745	 *    %g3 - contains the tlo_info field
746	 *
747	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
748	 * already at PIL 15.	 */
749	set	cpu_tlb_parity_error, %g1
750	rdpr	%pil, %g4
751	cmp	%g4, PIL_14
752	movl	%icc, PIL_14, %g4
753	ba	sys_trap
754	  nop
755	SET_SIZE(itlb_parity_trap)
756
757#endif	/* lint */
758
759#if defined(lint)
760/*
761 * The CPU jumps here from the MMU exception handler if a DTLB parity
762 * error is detected and we are running on Panther.
763 *
764 * In this routine we collect diagnostic information and write it to our
765 * logout structure (if possible) and clear all DTLB entries that may have
766 * caused our parity trap.
767 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
768 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
769 * send two:
770 *
771 * %g2	- Contains the VA whose lookup in the DTLB caused the parity error
772 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
773 *	  regardless of whether or not we actually used the logout struct.
774 *
775 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
776 * parameters to the data contained in the logout structure in order to
777 * determine whether the logout information is valid for this particular
778 * error or not.
779 */
780void
781dtlb_parity_trap(void)
782{}
783
784#else	/* lint */
785
786	ENTRY_NP(dtlb_parity_trap)
787	/*
788	 * Collect important information about the trap which will be
789	 * used as a parameter to the TL0 handler.
790	 */
791	wr	%g0, ASI_DMMU, %asi
792	ldxa	[MMU_SFAR]%asi, %g2		! VA that caused the IMMU trap
793	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page sizes
794	set	PN_DTLB_PGSZ_MASK, %g4
795	and	%g3, %g4, %g3
796	ldxa	[MMU_TAG_ACCESS]%asi, %g4
797	set	TAGREAD_CTX_MASK, %g5		! 'or' in the trap context
798	and	%g4, %g5, %g4			! to complete the tlo_info
799	or	%g4, %g3, %g3			! field for logout
800	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
801	membar	#Sync
802
803	/*
804	 * at this point:
805	 *    %g2 - contains the VA whose lookup caused the trap
806	 *    %g3 - contains the tlo_info field
807	 *
808	 * Calculate the TLB index values for the failing VA. Since the T512
809	 * TLBs can be configured for different page sizes, we need to find
810	 * the index into each one separately.
811	 */
812	mov	%g2, %g4			! First we get the DTLB_0 index
813	set	PN_DTLB_PGSZ0_MASK, %g5
814	and	%g3, %g5, %g5
815	srlx	%g5, PN_DTLB_PGSZ0_SHIFT, %g5
816	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the DTLB_0 index
817	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
818	set	PN_DTLB_T512_0, %g5
819	or	%g4, %g5, %g4			! and add in the TLB ID
820
821	mov	%g2, %g7			! Next we get the DTLB_1 index
822	set	PN_DTLB_PGSZ1_MASK, %g5
823	and	%g3, %g5, %g5
824	srlx	%g5, PN_DTLB_PGSZ1_SHIFT, %g5
825	PN_GET_TLB_INDEX(%g7, %g5)		! %g7 has the DTLB_1 index
826	sllx	%g7, PN_TLB_ACC_IDX_SHIFT, %g7	! shift the index into place
827	set	PN_DTLB_T512_1, %g5
828	or	%g7, %g5, %g7			! and add in the TLB ID
829
830	/*
831	 * at this point:
832	 *    %g2 - contains the VA whose lookup caused the trap
833	 *    %g3 - contains the tlo_info field
834	 *    %g4 - contains the T512_0 access index value for the
835	 *          VA/PgSz in question
836	 *    %g7 - contains the T512_1 access index value for the
837	 *          VA/PgSz in question
838	 *
839	 * If this trap happened at TL>0, then we don't want to mess
840	 * with the normal logout struct since that could caused a TLB
841	 * miss.
842	 */
843	rdpr	%tl, %g6			! read current trap level
844	cmp	%g6, 1				! skip over the tl>1 code
845	ble	dtlb_parity_trap_1		! if TL <= 1.
846	  nop
847
848	/*
849	 * If we are here, then the trap happened at TL>1. Simply
850	 * update our tlo_info field and then skip to the TLB flush
851	 * code.
852	 */
853	mov	1, %g6
854	sllx	%g6, PN_TLO_INFO_TL1_SHIFT, %g6
855	or	%g6, %g3, %g3
856	ba	dtlb_parity_trap_2
857	  nop
858
859dtlb_parity_trap_1:
860	/*
861	 * at this point:
862	 *    %g2 - contains the VA whose lookup caused the trap
863	 *    %g3 - contains the tlo_info field
864	 *    %g4 - contains the T512_0 access index value for the
865	 *          VA/PgSz in question
866	 *    %g7 - contains the T512_1 access index value for the
867	 *          VA/PgSz in question
868	 *
869	 * Check to see if the logout structure is available.
870	 */
871	set	CHPR_TLB_LOGOUT, %g6
872	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
873	set	LOGOUT_INVALID_U32, %g6
874	sllx	%g6, 32, %g6			! if our logout structure is
875	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
876	or	%g5, %g6, %g5			! already being used, then we
877	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
878	cmp	%g6, %g5			! information before clearing
879	bne	dtlb_parity_trap_2		! and logging the error.
880	  nop
881
882	/*
883	 * Record the logout information. %g4 contains our DTLB_0
884	 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
885	 * both of which will be used for ASI_DTLB_ACCESS and
886	 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
887	 * struct.
888	 */
889	stx	%g3, [%g1 + PN_TLO_INFO]
890	stx	%g2, [%g1 + PN_TLO_ADDR]
891	rdpr	%tpc, %g5
892	stx	%g5, [%g1 + PN_TLO_PC]
893
894	add	%g1, PN_TLO_DTLB_TTE, %g1	! move up the pointer
895
896	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
897	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 0 and store it away
898	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
899	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 0 and store it away
900
901	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 0
902	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
903	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
904	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
905
906	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
907	or	%g4, %g6, %g4			! of each TLB.
908	or	%g7, %g6, %g7
909	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
910
911	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
912	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 1 and store it away
913	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
914	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 1 and store it away
915
916	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 1
917	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
918	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
919	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
920
921	andn	%g4, %g6, %g4			! back to way 0
922	andn	%g7, %g6, %g7			! back to way 0
923
924dtlb_parity_trap_2:
925	/*
926	 * at this point:
927	 *    %g2 - contains the VA whose lookup caused the trap
928	 *    %g3 - contains the tlo_info field
929	 *    %g4 - contains the T512_0 access index value for the
930	 *          VA/PgSz in question
931	 *    %g7 - contains the T512_1 access index value for the
932	 *          VA/PgSz in question
933	 *
934	 * Here we will clear the errors from the DTLB.
935	 */
936	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
937	stxa	%g0, [%g5]ASI_DMMU		! 0 as it will be invalid.
938	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write the data and tag.
939	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
940	membar	#Sync
941
942	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
943	or	%g4, %g6, %g4
944	or	%g7, %g6, %g7
945
946	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write same data and tag.
947	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
948	membar	#Sync
949
950	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
951	flush   %g6				! flush after writing MMU regs
952
953	/*
954	 * at this point:
955	 *    %g2 - contains the VA whose lookup caused the trap
956	 *    %g3 - contains the tlo_info field
957	 *
958	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
959	 * already at PIL 15. We do this even for TL>1 traps since
960	 * those will lead to a system panic.
961	 */
962	set	cpu_tlb_parity_error, %g1
963	rdpr	%pil, %g4
964	cmp	%g4, PIL_14
965	movl	%icc, PIL_14, %g4
966	ba	sys_trap
967	  nop
968	SET_SIZE(dtlb_parity_trap)
969
970#endif	/* lint */
971
972
973#if defined(lint)
974/*
975 * Calculates the Panther TLB index based on a virtual address and page size
976 *
977 * Register usage:
978 *	%o0 - virtual address whose index we want
979 *	%o1 - Page Size of the TLB in question as encoded in the
980 *	      ASI_[D|I]MMU_TAG_ACCESS_EXT register.
981 */
982uint64_t
983pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
984{
985	return ((va + pg_sz)-(va + pg_sz));
986}
987#else	/* lint */
988	ENTRY(pn_get_tlb_index)
989
990	PN_GET_TLB_INDEX(%o0, %o1)
991
992	retl
993	  nop
994	SET_SIZE(pn_get_tlb_index)
995#endif	/* lint */
996
997
998#if defined(lint)
999/*
1000 * For Panther CPUs we need to flush the IPB after any I$ or D$
1001 * parity errors are detected.
1002 */
1003void
1004flush_ipb(void)
1005{ return; }
1006
1007#else	/* lint */
1008
1009	ENTRY(flush_ipb)
1010	clr	%o0
1011
1012flush_ipb_1:
1013	stxa	%g0, [%o0]ASI_IPB_TAG
1014	membar	#Sync
1015	cmp	%o0, PN_IPB_TAG_ADDR_MAX
1016	blt	flush_ipb_1
1017	  add	%o0, PN_IPB_TAG_ADDR_LINESIZE, 	%o0
1018
1019	sethi	%hi(FLUSH_ADDR), %o0
1020	flush   %o0
1021	retl
1022	nop
1023	SET_SIZE(flush_ipb)
1024
1025#endif	/* lint */
1026
1027
1028