xref: /titanic_44/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s (revision fa60c371cd00bdca17de2ff18fe3e64d051ae61b)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Assembly code support for the Cheetah+ module
26 */
27
28#pragma ident	"%Z%%M%	%I%	%E% SMI"
29
30#if !defined(lint)
31#include "assym.h"
32#endif	/* lint */
33
34#include <sys/asm_linkage.h>
35#include <sys/mmu.h>
36#include <vm/hat_sfmmu.h>
37#include <sys/machparam.h>
38#include <sys/machcpuvar.h>
39#include <sys/machthread.h>
40#include <sys/machtrap.h>
41#include <sys/privregs.h>
42#include <sys/asm_linkage.h>
43#include <sys/trap.h>
44#include <sys/cheetahregs.h>
45#include <sys/us3_module.h>
46#include <sys/xc_impl.h>
47#include <sys/intreg.h>
48#include <sys/async.h>
49#include <sys/clock.h>
50#include <sys/cheetahasm.h>
51#include <sys/cmpregs.h>
52
53#ifdef TRAPTRACE
54#include <sys/traptrace.h>
55#endif /* TRAPTRACE */
56
57
58#if !defined(lint)
59
60	.global retire_l2_start
61	.global retire_l2_end
62	.global unretire_l2_start
63	.global unretire_l2_end
64	.global retire_l3_start
65	.global retire_l3_end
66	.global unretire_l3_start
67	.global unretire_l3_end
68
69/* BEGIN CSTYLED */
70
71/*
72 * Cheetah+ version to reflush an Ecache line by index.
73 *
74 * By default we assume the Ecache is 2-way so we flush both
75 * ways. Even if the cache is direct-mapped no harm will come
76 * from performing the flush twice, apart from perhaps a performance
77 * penalty.
78 *
79 * XXX - scr2 not used.
80 */
81#define	ECACHE_REFLUSH_LINE(ec_set_size, index, scr2)			\
82	ldxa	[index]ASI_EC_DIAG, %g0;				\
83	ldxa	[index + ec_set_size]ASI_EC_DIAG, %g0;
84
85/*
86 * Cheetah+ version of ecache_flush_line.  Uses Cheetah+ Ecache Displacement
87 * Flush feature.
88 */
89#define	ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2)		\
90	sub	ec_set_size, 1, scr1;					\
91	and	physaddr, scr1, scr1;					\
92	set	CHP_ECACHE_IDX_DISP_FLUSH, scr2;			\
93	or	scr2, scr1, scr1;					\
94	ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
95
96/* END CSTYLED */
97
98/*
99 * Panther version to reflush a line from both the L2 cache and L3
100 * cache by the respective indexes. Flushes all ways of the line from
101 * each cache.
102 *
103 * l2_index	Index into the L2$ of the line to be flushed. This
104 *		register will not be modified by this routine.
105 * l3_index	Index into the L3$ of the line to be flushed. This
106 *		register will not be modified by this routine.
107 * scr2		scratch register.
108 * scr3		scratch register.
109 *
110 */
111#define	PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3)		\
112	set	PN_L2_MAX_SET, scr2;					\
113	set	PN_L2_SET_SIZE, scr3;					\
1141:									\
115	ldxa	[l2_index + scr2]ASI_L2_TAG, %g0;			\
116	cmp	scr2, %g0;						\
117	bg,a	1b;							\
118	  sub	scr2, scr3, scr2;					\
119	mov	6, scr2;						\
1207:									\
121	cmp	scr2, %g0;						\
122	bg,a	7b;							\
123	  sub	scr2, 1, scr2;						\
124	set	PN_L3_MAX_SET, scr2;					\
125	set	PN_L3_SET_SIZE, scr3;					\
1262:									\
127	ldxa	[l3_index + scr2]ASI_EC_DIAG, %g0;			\
128	cmp	scr2, %g0;						\
129	bg,a	2b;							\
130	  sub	scr2, scr3, scr2;
131
132#define	PN_L2_REFLUSH_LINE(l2_index, scr2, scr3)			\
133	set	PN_L2_MAX_SET, scr2;					\
134	set	PN_L2_SET_SIZE, scr3;					\
1351:									\
136	ldxa	[l2_index + scr2]ASI_L2_TAG, %g0;			\
137	cmp	scr2, %g0;						\
138	bg,a	1b;							\
139	  sub	scr2, scr3, scr2;
140
141/*
142 * Panther version of ecache_flush_line. Flushes the line corresponding
143 * to physaddr from both the L2 cache and the L3 cache.
144 *
145 * physaddr	Input: Physical address to flush.
146 *              Output: Physical address to flush (preserved).
147 * l2_idx_out	Input: scratch register.
148 *              Output: Index into the L2$ of the line to be flushed.
149 * l3_idx_out	Input: scratch register.
150 *              Output: Index into the L3$ of the line to be flushed.
151 * scr3		scratch register.
152 * scr4		scratch register.
153 *
154 */
155#define	PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4)	\
156	set	PN_L3_SET_SIZE, l2_idx_out;					\
157	sub	l2_idx_out, 1, l2_idx_out;					\
158	and	physaddr, l2_idx_out, l3_idx_out;				\
159	set	PN_L3_IDX_DISP_FLUSH, l2_idx_out;				\
160	or	l2_idx_out, l3_idx_out, l3_idx_out;				\
161	set	PN_L2_SET_SIZE, l2_idx_out;					\
162	sub	l2_idx_out, 1, l2_idx_out;					\
163	and	physaddr, l2_idx_out, l2_idx_out;				\
164	set	PN_L2_IDX_DISP_FLUSH, scr3;					\
165	or	l2_idx_out, scr3, l2_idx_out;					\
166	PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
167
168/*
169 * Panther version of L2_flush_line. Flushes the line corresponding
170 * to physaddr from the L2 cache
171 *
172 * physaddr	Input: Physical address to flush.
173 *              Output: Physical address to flush (preserved).
174 * l2_idx_out	Input: scratch register.
175 *              Output: Index into the L2$ of the line to be flushed.
176 * scr3		scratch register.
177 *
178 */
179#define	PN_L2_FLUSH_LINE(physaddr, l2_idx_out, scr2, scr3)	\
180	set	PN_L2_SET_SIZE, l2_idx_out;		\
181	sub	l2_idx_out, 1, l2_idx_out;		\
182	and	physaddr, l2_idx_out, l2_idx_out;	\
183	set	PN_L2_IDX_DISP_FLUSH, scr3;		\
184	or	l2_idx_out, scr3, l2_idx_out;		\
185	PN_L2_REFLUSH_LINE(l2_idx_out, scr2, scr3)
186
187#endif	/* !lint */
188
189/*
190 * Fast ECC error at TL>0 handler
191 * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
192 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
193 * For a complete description of the Fast ECC at TL>0 handling see the
194 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
195 * us3_common_asm.s
196 */
197#if defined(lint)
198
199void
200fast_ecc_tl1_err(void)
201{}
202
203#else	/* lint */
204
205	.section ".text"
206	.align	64
207	ENTRY_NP(fast_ecc_tl1_err)
208
209	/*
210	 * This macro turns off the D$/I$ if they are on and saves their
211	 * original state in ch_err_tl1_tmp, saves all the %g registers in the
212	 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
213	 * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
214	 * point to the ch_err_tl1_data structure and the original D$/I$ state
215	 * will be saved in ch_err_tl1_tmp.  All %g registers except for %g1
216	 * will be available.
217	 */
218	CH_ERR_TL1_FECC_ENTER;
219
220	/*
221	 * Get the diagnostic logout data.  %g4 must be initialized to
222	 * current CEEN state, %g5 must point to logout structure in
223	 * ch_err_tl1_data_t.  %g3 will contain the nesting count upon
224	 * return.
225	 */
226	ldxa	[%g0]ASI_ESTATE_ERR, %g4
227	and	%g4, EN_REG_CEEN, %g4
228	add	%g1, CH_ERR_TL1_LOGOUT, %g5
229	DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
230
231	/*
232	 * If the logout nesting count is exceeded, we're probably
233	 * not making any progress, try to panic instead.
234	 */
235	cmp	%g3, CLO_NESTING_MAX
236	bge	fecc_tl1_err
237	  nop
238
239	/*
240	 * Save the current CEEN and NCEEN state in %g7 and turn them off
241	 * before flushing the Ecache.
242	 */
243	ldxa	[%g0]ASI_ESTATE_ERR, %g7
244	andn	%g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
245	stxa	%g5, [%g0]ASI_ESTATE_ERR
246	membar	#Sync
247
248	/*
249	 * Flush the Ecache, using the largest possible cache size with the
250	 * smallest possible line size since we can't get the actual sizes
251	 * from the cpu_node due to DTLB misses.
252	 */
253	PN_L2_FLUSHALL(%g3, %g4, %g5)
254
255	set	CH_ECACHE_MAX_SIZE, %g4
256	set	CH_ECACHE_MIN_LSIZE, %g5
257
258	GET_CPU_IMPL(%g6)
259	cmp	%g6, PANTHER_IMPL
260	bne	%xcc, 2f
261	  nop
262	set	PN_L3_SIZE, %g4
2632:
264	mov	%g6, %g3
265	CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
266
267	/*
268	 * Restore CEEN and NCEEN to the previous state.
269	 */
270	stxa	%g7, [%g0]ASI_ESTATE_ERR
271	membar	#Sync
272
273	/*
274	 * If we turned off the D$, then flush it and turn it back on.
275	 */
276	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
277	andcc	%g3, CH_ERR_TSTATE_DC_ON, %g0
278	bz	%xcc, 3f
279	  nop
280
281	/*
282	 * Flush the D$.
283	 */
284	ASM_LD(%g4, dcache_size)
285	ASM_LD(%g5, dcache_linesize)
286	CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
287
288	/*
289	 * Turn the D$ back on.
290	 */
291	ldxa	[%g0]ASI_DCU, %g3
292	or	%g3, DCU_DC, %g3
293	stxa	%g3, [%g0]ASI_DCU
294	membar	#Sync
2953:
296	/*
297	 * If we turned off the I$, then flush it and turn it back on.
298	 */
299	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
300	andcc	%g3, CH_ERR_TSTATE_IC_ON, %g0
301	bz	%xcc, 4f
302	  nop
303
304	/*
305	 * Flush the I$.  Panther has different I$ parameters, and we
306	 * can't access the logout I$ params without possibly generating
307	 * a MMU miss.
308	 */
309	GET_CPU_IMPL(%g6)
310	set	PN_ICACHE_SIZE, %g3
311	set	CH_ICACHE_SIZE, %g4
312	mov	CH_ICACHE_LSIZE, %g5
313	cmp	%g6, PANTHER_IMPL
314	movz	%xcc, %g3, %g4
315	movz	%xcc, PN_ICACHE_LSIZE, %g5
316	CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
317
318	/*
319	 * Turn the I$ back on.  Changing DCU_IC requires flush.
320	 */
321	ldxa	[%g0]ASI_DCU, %g3
322	or	%g3, DCU_IC, %g3
323	stxa	%g3, [%g0]ASI_DCU
324	flush	%g0
3254:
326
327#ifdef TRAPTRACE
328	/*
329	 * Get current trap trace entry physical pointer.
330	 */
331	CPU_INDEX(%g6, %g5)
332	sll	%g6, TRAPTR_SIZE_SHIFT, %g6
333	set	trap_trace_ctl, %g5
334	add	%g6, %g5, %g6
335	ld	[%g6 + TRAPTR_LIMIT], %g5
336	tst	%g5
337	be	%icc, skip_traptrace
338	  nop
339	ldx	[%g6 + TRAPTR_PBASE], %g5
340	ld	[%g6 + TRAPTR_OFFSET], %g4
341	add	%g5, %g4, %g5
342
343	/*
344	 * Create trap trace entry.
345	 */
346	rd	%asi, %g7
347	wr	%g0, TRAPTR_ASI, %asi
348	rd	STICK, %g4
349	stxa	%g4, [%g5 + TRAP_ENT_TICK]%asi
350	rdpr	%tl, %g4
351	stha	%g4, [%g5 + TRAP_ENT_TL]%asi
352	rdpr	%tt, %g4
353	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
354	rdpr	%tpc, %g4
355	stna	%g4, [%g5 + TRAP_ENT_TPC]%asi
356	rdpr	%tstate, %g4
357	stxa	%g4, [%g5 + TRAP_ENT_TSTATE]%asi
358	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
359	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
360	wr	%g0, %g7, %asi
361	ldxa	[%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
362	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
363	wr	%g0, TRAPTR_ASI, %asi
364	stna	%g3, [%g5 + TRAP_ENT_F1]%asi
365	stna	%g4, [%g5 + TRAP_ENT_F2]%asi
366	wr	%g0, %g7, %asi
367	ldxa	[%g1 + CH_ERR_TL1_AFAR]%asi, %g3
368	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4
369	wr	%g0, TRAPTR_ASI, %asi
370	stna	%g3, [%g5 + TRAP_ENT_F3]%asi
371	stna	%g4, [%g5 + TRAP_ENT_F4]%asi
372	wr	%g0, %g7, %asi
373
374	/*
375	 * Advance trap trace pointer.
376	 */
377	ld	[%g6 + TRAPTR_OFFSET], %g5
378	ld	[%g6 + TRAPTR_LIMIT], %g4
379	st	%g5, [%g6 + TRAPTR_LAST_OFFSET]
380	add	%g5, TRAP_ENT_SIZE, %g5
381	sub	%g4, TRAP_ENT_SIZE, %g4
382	cmp	%g5, %g4
383	movge	%icc, 0, %g5
384	st	%g5, [%g6 + TRAPTR_OFFSET]
385skip_traptrace:
386#endif	/* TRAPTRACE */
387
388	/*
389	 * If nesting count is not zero, skip all the AFSR/AFAR
390	 * handling and just do the necessary cache-flushing.
391	 */
392	ldxa	[%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
393	brnz	%g2, 6f
394	  nop
395
396	/*
397	 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
398	 * and panic since a UE will occur (on the retry) before the
399	 * UCU and WDU messages are enqueued.
400	 */
401	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
402	set	1, %g4
403	sllx	%g4, C_AFSR_UCU_SHIFT, %g4
404	btst	%g4, %g3		! UCU in original shadow AFSR?
405	bnz	%xcc, 5f
406	  mov	1, %g4
407	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
408	sllx	%g4, C_AFSR_L3_UCU_SHIFT, %g4
409	btst	%g4, %g3		! L3_UCU in original shadow AFSR_EXT?
410	bz	%xcc, 6f
411	  nop
4125:
413	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4	! original AFSR
414	ldxa	[%g0]ASI_AFSR, %g3	! current AFSR
415	or	%g3, %g4, %g3		! %g3 = original + current AFSR
416	set	1, %g4
417	sllx	%g4, C_AFSR_WDU_SHIFT, %g4
418	btst	%g4, %g3		! WDU in original or current AFSR?
419	bnz	%xcc, fecc_tl1_err
420	  nop
421
4226:
423	/*
424	 * We fall into this macro if we've successfully logged the error in
425	 * the ch_err_tl1_data structure and want the PIL15 softint to pick
426	 * it up and log it.  %g1 must point to the ch_err_tl1_data structure.
427	 * Restores the %g registers and issues retry.
428	 */
429	CH_ERR_TL1_EXIT;
430
431	/*
432	 * Establish panic exit label.
433	 */
434	CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
435
436	SET_SIZE(fast_ecc_tl1_err)
437
438#endif	/* lint */
439
440
441#if defined(lint)
442/*
443 * scrubphys - Pass in the aligned physical memory address
444 * that you want to scrub, along with the ecache set size.
445 *
446 *	1) Displacement flush the E$ line corresponding to %addr.
447 *	   The first ldxa guarantees that the %addr is no longer in
448 *	   M, O, or E (goes to I or S (if instruction fetch also happens).
449 *	2) "Write" the data using a CAS %addr,%g0,%g0.
450 *	   The casxa guarantees a transition from I to M or S to M.
451 *	3) Displacement flush the E$ line corresponding to %addr.
452 *	   The second ldxa pushes the M line out of the ecache, into the
453 *	   writeback buffers, on the way to memory.
454 *	4) The "membar #Sync" pushes the cache line out of the writeback
455 *	   buffers onto the bus, on the way to dram finally.
456 *
457 * This is a modified version of the algorithm suggested by Gary Lauterbach.
458 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
459 * as modified, but then we found out that for spitfire, if it misses in the
460 * E$ it will probably install as an M, but if it hits in the E$, then it
461 * will stay E, if the store doesn't happen. So the first displacement flush
462 * should ensure that the CAS will miss in the E$.  Arrgh.
463 */
464/* ARGSUSED */
465void
466scrubphys(uint64_t paddr, int ecache_set_size)
467{}
468
469#else	/* lint */
470	ENTRY(scrubphys)
471	rdpr	%pstate, %o4
472	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
473	wrpr	%o5, %g0, %pstate	! clear IE, AM bits
474
475	GET_CPU_IMPL(%o5)		! Panther Ecache is flushed differently
476	cmp	%o5, PANTHER_IMPL
477	bne	scrubphys_1
478	  nop
479	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
480	casxa	[%o0]ASI_MEM, %g0, %g0
481	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
482	b	scrubphys_2
483	  nop
484scrubphys_1:
485	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
486	casxa	[%o0]ASI_MEM, %g0, %g0
487	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
488scrubphys_2:
489	wrpr	%g0, %o4, %pstate	! restore earlier pstate register value
490
491	retl
492	membar	#Sync			! move the data out of the load buffer
493	SET_SIZE(scrubphys)
494
495#endif	/* lint */
496
497
498#if defined(lint)
499/*
500 * clearphys - Pass in the physical memory address of the checkblock
501 * that you want to push out, cleared with a recognizable pattern,
502 * from the ecache.
503 *
504 * To ensure that the ecc gets recalculated after the bad data is cleared,
505 * we must write out enough data to fill the w$ line (64 bytes). So we read
506 * in an entire ecache subblock's worth of data, and write it back out.
507 * Then we overwrite the 16 bytes of bad data with the pattern.
508 */
509/* ARGSUSED */
510void
511clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
512{
513}
514
515#else	/* lint */
516	ENTRY(clearphys)
517	/* turn off IE, AM bits */
518	rdpr	%pstate, %o4
519	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
520	wrpr	%o5, %g0, %pstate
521
522	/* turn off NCEEN */
523	ldxa	[%g0]ASI_ESTATE_ERR, %o5
524	andn	%o5, EN_REG_NCEEN, %o3
525	stxa	%o3, [%g0]ASI_ESTATE_ERR
526	membar	#Sync
527
528	/* align address passed with 64 bytes subblock size */
529	mov	CH_ECACHE_SUBBLK_SIZE, %o2
530	andn	%o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
531
532	/* move the good data into the W$ */
533clearphys_1:
534	subcc	%o2, 8, %o2
535	ldxa	[%g1 + %o2]ASI_MEM, %g2
536	bge	clearphys_1
537	  stxa	%g2, [%g1 + %o2]ASI_MEM
538
539	/* now overwrite the bad data */
540	setx	0xbadecc00badecc01, %g1, %g2
541	stxa	%g2, [%o0]ASI_MEM
542	mov	8, %g1
543	stxa	%g2, [%o0 + %g1]ASI_MEM
544
545	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
546	cmp	%o3, PANTHER_IMPL
547	bne	clearphys_2
548	  nop
549	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
550	casxa	[%o0]ASI_MEM, %g0, %g0
551	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
552	b	clearphys_3
553	  nop
554clearphys_2:
555	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
556	casxa	[%o0]ASI_MEM, %g0, %g0
557	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
558clearphys_3:
559	/* clear the AFSR */
560	ldxa	[%g0]ASI_AFSR, %o1
561	stxa	%o1, [%g0]ASI_AFSR
562	membar	#Sync
563
564	/* turn NCEEN back on */
565	stxa	%o5, [%g0]ASI_ESTATE_ERR
566	membar	#Sync
567
568	/* return and re-enable IE and AM */
569	retl
570	  wrpr	%g0, %o4, %pstate
571	SET_SIZE(clearphys)
572
573#endif	/* lint */
574
575
576#if defined(lint)
577/*
578 * Cheetah+ Ecache displacement flush the specified line from the E$
579 *
580 * For Panther, this means flushing the specified line from both the
581 * L2 cache and L3 cache.
582 *
583 * Register usage:
584 *	%o0 - 64 bit physical address for flushing
585 *	%o1 - Ecache set size
586 */
587/*ARGSUSED*/
588void
589ecache_flush_line(uint64_t flushaddr, int ec_set_size)
590{
591}
592#else	/* lint */
593	ENTRY(ecache_flush_line)
594
595	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
596	cmp	%o3, PANTHER_IMPL
597	bne	ecache_flush_line_1
598	  nop
599
600	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
601	b	ecache_flush_line_2
602	  nop
603ecache_flush_line_1:
604	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
605ecache_flush_line_2:
606	retl
607	  nop
608	SET_SIZE(ecache_flush_line)
609#endif	/* lint */
610
611#if defined(lint)
612void
613set_afsr_ext(uint64_t afsr_ext)
614{
615	afsr_ext = afsr_ext;
616}
617#else /* lint */
618
619	ENTRY(set_afsr_ext)
620	set	ASI_AFSR_EXT_VA, %o1
621	stxa	%o0, [%o1]ASI_AFSR		! afsr_ext reg
622	membar	#Sync
623	retl
624	nop
625	SET_SIZE(set_afsr_ext)
626
627#endif /* lint */
628
629
630#if defined(lint)
631/*
632 * The CPU jumps here from the MMU exception handler if an ITLB parity
633 * error is detected and we are running on Panther.
634 *
635 * In this routine we collect diagnostic information and write it to our
636 * logout structure (if possible) and clear all ITLB entries that may have
637 * caused our parity trap.
638 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
639 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
640 * send two:
641 *
642 * %g2	- Contains the VA whose lookup in the ITLB caused the parity error
643 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
644 *	  regardless of whether or not we actually used the logout struct.
645 *
646 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
647 * parameters to the data contained in the logout structure in order to
648 * determine whether the logout information is valid for this particular
649 * error or not.
650 */
651void
652itlb_parity_trap(void)
653{}
654
655#else	/* lint */
656
657	ENTRY_NP(itlb_parity_trap)
658	/*
659	 * Collect important information about the trap which will be
660	 * used as a parameter to the TL0 handler.
661	 */
662	wr	%g0, ASI_IMMU, %asi
663	rdpr	%tpc, %g2			! VA that caused the IMMU trap
664	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page size
665	set	PN_ITLB_PGSZ_MASK, %g4
666	and	%g3, %g4, %g3
667	ldxa	[MMU_TAG_ACCESS]%asi, %g4
668	set	TAGREAD_CTX_MASK, %g5
669	and	%g4, %g5, %g4
670	or	%g4, %g3, %g3			! 'or' in the trap context and
671	mov	1, %g4				! add the IMMU flag to complete
672	sllx	%g4, PN_TLO_INFO_IMMU_SHIFT, %g4
673	or	%g4, %g3, %g3			! the tlo_info field for logout
674	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
675	membar	#Sync
676
677	/*
678	 * at this point:
679	 *    %g2 - contains the VA whose lookup caused the trap
680	 *    %g3 - contains the tlo_info field
681	 *
682	 * Next, we calculate the TLB index value for the failing VA.
683	 */
684	mov	%g2, %g4			! We need the ITLB index
685	set	PN_ITLB_PGSZ_MASK, %g5
686	and	%g3, %g5, %g5
687	srlx	%g5, PN_ITLB_PGSZ_SHIFT, %g5
688	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the index
689	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
690	set	PN_ITLB_T512, %g5
691	or	%g4, %g5, %g4			! and add in the TLB ID
692
693	/*
694	 * at this point:
695	 *    %g2 - contains the VA whose lookup caused the trap
696	 *    %g3 - contains the tlo_info field
697	 *    %g4 - contains the TLB access index value for the
698	 *          VA/PgSz in question
699	 *
700	 * Check to see if the logout structure is available.
701	 */
702	set	CHPR_TLB_LOGOUT, %g6
703	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
704	set	LOGOUT_INVALID_U32, %g6
705	sllx	%g6, 32, %g6			! if our logout structure is
706	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
707	or	%g5, %g6, %g5			! already being used, then we
708	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
709	cmp	%g6, %g5			! information before clearing
710	bne	itlb_parity_trap_1		! and logging the error.
711	  nop
712
713	/*
714	 * Record the logout information. %g4 contains our index + TLB ID
715	 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
716	 * the pointer to our logout struct.
717	 */
718	stx	%g3, [%g1 + PN_TLO_INFO]
719	stx	%g2, [%g1 + PN_TLO_ADDR]
720	stx	%g2, [%g1 + PN_TLO_PC]		! %tpc == fault addr for IMMU
721
722	add	%g1, PN_TLO_ITLB_TTE, %g1	! move up the pointer
723
724	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
725	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
726	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
727	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
728
729	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
730	or	%g4, %g6, %g4
731	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
732
733	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
734	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
735	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
736	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
737
738	andn	%g4, %g6, %g4			! back to way 0
739
740itlb_parity_trap_1:
741	/*
742	 * at this point:
743	 *    %g2 - contains the VA whose lookup caused the trap
744	 *    %g3 - contains the tlo_info field
745	 *    %g4 - contains the TLB access index value for the
746	 *          VA/PgSz in question
747	 *
748	 * Here we will clear the errors from the TLB.
749	 */
750	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
751	stxa	%g0, [%g5]ASI_IMMU		! 0 as it will be invalid.
752	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write the data and tag
753	membar	#Sync
754
755	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
756	or	%g4, %g6, %g4
757
758	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write same data and tag
759	membar	#Sync
760
761	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
762	flush   %g6				! flush after writing MMU regs
763
764	/*
765	 * at this point:
766	 *    %g2 - contains the VA whose lookup caused the trap
767	 *    %g3 - contains the tlo_info field
768	 *
769	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
770	 * already at PIL 15.	 */
771	set	cpu_tlb_parity_error, %g1
772	rdpr	%pil, %g4
773	cmp	%g4, PIL_14
774	movl	%icc, PIL_14, %g4
775	ba	sys_trap
776	  nop
777	SET_SIZE(itlb_parity_trap)
778
779#endif	/* lint */
780
781#if defined(lint)
782/*
783 * The CPU jumps here from the MMU exception handler if a DTLB parity
784 * error is detected and we are running on Panther.
785 *
786 * In this routine we collect diagnostic information and write it to our
787 * logout structure (if possible) and clear all DTLB entries that may have
788 * caused our parity trap.
789 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
790 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
791 * send two:
792 *
793 * %g2	- Contains the VA whose lookup in the DTLB caused the parity error
794 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
795 *	  regardless of whether or not we actually used the logout struct.
796 *
797 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
798 * parameters to the data contained in the logout structure in order to
799 * determine whether the logout information is valid for this particular
800 * error or not.
801 */
802void
803dtlb_parity_trap(void)
804{}
805
806#else	/* lint */
807
808	ENTRY_NP(dtlb_parity_trap)
809	/*
810	 * Collect important information about the trap which will be
811	 * used as a parameter to the TL0 handler.
812	 */
813	wr	%g0, ASI_DMMU, %asi
814	ldxa	[MMU_SFAR]%asi, %g2		! VA that caused the IMMU trap
815	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page sizes
816	set	PN_DTLB_PGSZ_MASK, %g4
817	and	%g3, %g4, %g3
818	ldxa	[MMU_TAG_ACCESS]%asi, %g4
819	set	TAGREAD_CTX_MASK, %g5		! 'or' in the trap context
820	and	%g4, %g5, %g4			! to complete the tlo_info
821	or	%g4, %g3, %g3			! field for logout
822	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
823	membar	#Sync
824
825	/*
826	 * at this point:
827	 *    %g2 - contains the VA whose lookup caused the trap
828	 *    %g3 - contains the tlo_info field
829	 *
830	 * Calculate the TLB index values for the failing VA. Since the T512
831	 * TLBs can be configured for different page sizes, we need to find
832	 * the index into each one separately.
833	 */
834	mov	%g2, %g4			! First we get the DTLB_0 index
835	set	PN_DTLB_PGSZ0_MASK, %g5
836	and	%g3, %g5, %g5
837	srlx	%g5, PN_DTLB_PGSZ0_SHIFT, %g5
838	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the DTLB_0 index
839	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
840	set	PN_DTLB_T512_0, %g5
841	or	%g4, %g5, %g4			! and add in the TLB ID
842
843	mov	%g2, %g7			! Next we get the DTLB_1 index
844	set	PN_DTLB_PGSZ1_MASK, %g5
845	and	%g3, %g5, %g5
846	srlx	%g5, PN_DTLB_PGSZ1_SHIFT, %g5
847	PN_GET_TLB_INDEX(%g7, %g5)		! %g7 has the DTLB_1 index
848	sllx	%g7, PN_TLB_ACC_IDX_SHIFT, %g7	! shift the index into place
849	set	PN_DTLB_T512_1, %g5
850	or	%g7, %g5, %g7			! and add in the TLB ID
851
852	/*
853	 * at this point:
854	 *    %g2 - contains the VA whose lookup caused the trap
855	 *    %g3 - contains the tlo_info field
856	 *    %g4 - contains the T512_0 access index value for the
857	 *          VA/PgSz in question
858	 *    %g7 - contains the T512_1 access index value for the
859	 *          VA/PgSz in question
860	 *
861	 * If this trap happened at TL>0, then we don't want to mess
862	 * with the normal logout struct since that could caused a TLB
863	 * miss.
864	 */
865	rdpr	%tl, %g6			! read current trap level
866	cmp	%g6, 1				! skip over the tl>1 code
867	ble	dtlb_parity_trap_1		! if TL <= 1.
868	  nop
869
870	/*
871	 * If we are here, then the trap happened at TL>1. Simply
872	 * update our tlo_info field and then skip to the TLB flush
873	 * code.
874	 */
875	mov	1, %g6
876	sllx	%g6, PN_TLO_INFO_TL1_SHIFT, %g6
877	or	%g6, %g3, %g3
878	ba	dtlb_parity_trap_2
879	  nop
880
881dtlb_parity_trap_1:
882	/*
883	 * at this point:
884	 *    %g2 - contains the VA whose lookup caused the trap
885	 *    %g3 - contains the tlo_info field
886	 *    %g4 - contains the T512_0 access index value for the
887	 *          VA/PgSz in question
888	 *    %g7 - contains the T512_1 access index value for the
889	 *          VA/PgSz in question
890	 *
891	 * Check to see if the logout structure is available.
892	 */
893	set	CHPR_TLB_LOGOUT, %g6
894	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
895	set	LOGOUT_INVALID_U32, %g6
896	sllx	%g6, 32, %g6			! if our logout structure is
897	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
898	or	%g5, %g6, %g5			! already being used, then we
899	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
900	cmp	%g6, %g5			! information before clearing
901	bne	dtlb_parity_trap_2		! and logging the error.
902	  nop
903
904	/*
905	 * Record the logout information. %g4 contains our DTLB_0
906	 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
907	 * both of which will be used for ASI_DTLB_ACCESS and
908	 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
909	 * struct.
910	 */
911	stx	%g3, [%g1 + PN_TLO_INFO]
912	stx	%g2, [%g1 + PN_TLO_ADDR]
913	rdpr	%tpc, %g5
914	stx	%g5, [%g1 + PN_TLO_PC]
915
916	add	%g1, PN_TLO_DTLB_TTE, %g1	! move up the pointer
917
918	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
919	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 0 and store it away
920	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
921	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 0 and store it away
922
923	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 0
924	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
925	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
926	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
927
928	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
929	or	%g4, %g6, %g4			! of each TLB.
930	or	%g7, %g6, %g7
931	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
932
933	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
934	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 1 and store it away
935	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
936	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 1 and store it away
937
938	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 1
939	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
940	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
941	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
942
943	andn	%g4, %g6, %g4			! back to way 0
944	andn	%g7, %g6, %g7			! back to way 0
945
946dtlb_parity_trap_2:
947	/*
948	 * at this point:
949	 *    %g2 - contains the VA whose lookup caused the trap
950	 *    %g3 - contains the tlo_info field
951	 *    %g4 - contains the T512_0 access index value for the
952	 *          VA/PgSz in question
953	 *    %g7 - contains the T512_1 access index value for the
954	 *          VA/PgSz in question
955	 *
956	 * Here we will clear the errors from the DTLB.
957	 */
958	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
959	stxa	%g0, [%g5]ASI_DMMU		! 0 as it will be invalid.
960	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write the data and tag.
961	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
962	membar	#Sync
963
964	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
965	or	%g4, %g6, %g4
966	or	%g7, %g6, %g7
967
968	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write same data and tag.
969	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
970	membar	#Sync
971
972	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
973	flush   %g6				! flush after writing MMU regs
974
975	/*
976	 * at this point:
977	 *    %g2 - contains the VA whose lookup caused the trap
978	 *    %g3 - contains the tlo_info field
979	 *
980	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
981	 * already at PIL 15. We do this even for TL>1 traps since
982	 * those will lead to a system panic.
983	 */
984	set	cpu_tlb_parity_error, %g1
985	rdpr	%pil, %g4
986	cmp	%g4, PIL_14
987	movl	%icc, PIL_14, %g4
988	ba	sys_trap
989	  nop
990	SET_SIZE(dtlb_parity_trap)
991
992#endif	/* lint */
993
994
995#if defined(lint)
996/*
997 * Calculates the Panther TLB index based on a virtual address and page size
998 *
999 * Register usage:
1000 *	%o0 - virtual address whose index we want
1001 *	%o1 - Page Size of the TLB in question as encoded in the
1002 *	      ASI_[D|I]MMU_TAG_ACCESS_EXT register.
1003 */
1004uint64_t
1005pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
1006{
1007	return ((va + pg_sz)-(va + pg_sz));
1008}
1009#else	/* lint */
1010	ENTRY(pn_get_tlb_index)
1011
1012	PN_GET_TLB_INDEX(%o0, %o1)
1013
1014	retl
1015	  nop
1016	SET_SIZE(pn_get_tlb_index)
1017#endif	/* lint */
1018
1019
1020#if defined(lint)
1021/*
1022 * For Panther CPUs we need to flush the IPB after any I$ or D$
1023 * parity errors are detected.
1024 */
1025void
1026flush_ipb(void)
1027{ return; }
1028
1029#else	/* lint */
1030
1031	ENTRY(flush_ipb)
1032	clr	%o0
1033
1034flush_ipb_1:
1035	stxa	%g0, [%o0]ASI_IPB_TAG
1036	membar	#Sync
1037	cmp	%o0, PN_IPB_TAG_ADDR_MAX
1038	blt	flush_ipb_1
1039	  add	%o0, PN_IPB_TAG_ADDR_LINESIZE, 	%o0
1040
1041	sethi	%hi(FLUSH_ADDR), %o0
1042	flush   %o0
1043	retl
1044	nop
1045	SET_SIZE(flush_ipb)
1046
1047#endif	/* lint */
1048
1049#if defined(lint)
1050
1051/*ARGSUSED*/
1052void
1053casxa_physical_addr(uint64_t start_addr, uint64_t iteration_count)
1054{}
1055
1056#else
1057	ENTRY(casxa_physical_addr)
10581:
1059	casxa	[%o0]ASI_MEM, %g0, %g0
1060	brnz,pt	%o1, 1b
1061	 dec	%o1
1062	retl
1063	nop
1064	SET_SIZE(casxa_physical_addr)
1065
1066#endif	/* lint */
1067
1068
1069#if defined(lint)
1070
1071/*ARGSUSED*/
1072void
1073rw_physical_addr(uint64_t start_addr, uint64_t end_addr)
1074{}
1075
1076#else
1077	ENTRY(rw_physical_addr)
1078	ldxa	[%o0]ASI_MEM, %g1
10791:
1080	stxa	%g1, [%o0]ASI_MEM
1081	add	%o0, 8, %o0
1082	cmp	%o0, %o1
1083	blu,a,pt %xcc, 1b
1084	 ldxa	[%o0]ASI_MEM, %g1
1085	retl
1086	nop
1087	SET_SIZE(rw_physical_addr)
1088
1089#endif	/* lint */
1090
1091#if defined(lint)
1092
1093/*ARGSUSED*/
1094void
1095read_from_physical_addr(uint64_t start_addr, uint64_t count, uint64_t buffer)
1096{}
1097
1098#else
1099	ENTRY(read_from_physical_addr)
1100	clr	%o4
1101	ba,a	2f
11021:
1103	stx	%g1, [%o2]
1104	add	%o0, 8, %o0
1105	add	%o2, 8, %o2
1106	add	%o4, 1, %o4
11072:
1108	cmp	%o4, %o1
1109	blu,a,pt %xcc, 1b
1110	 ldxa	[%o0]ASI_MEM, %g1
1111	retl
1112	nop
1113	SET_SIZE(read_from_physical_addr)
1114
1115#endif	/* lint */
1116
1117#if defined(lint)
1118
1119/*ARGSUSED*/
1120int
1121retire_l2(uint64_t tag_addr, uint64_t pattern)
1122{return 0;}
1123
1124#else
1125	.align 4096
1126	ENTRY(retire_l2)
1127retire_l2_start:
1128
1129	! since we disable interrupts, we don't need to do kpreempt_disable()
1130	rdpr	%pstate, %o2
1131	andn	%o2, PSTATE_IE, %g1
1132	wrpr	%g0, %g1, %pstate		! disable interrupts
1133	/*
1134	 * Save current DCU state.  Turn off IPS
1135	 */
1136	setx	DCU_IPS_MASK, %g2, %o3
1137	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1138	andn	%g1, %o3, %g4
1139	stxa	%g4, [%g0]ASI_DCU
1140	flush	%g0
1141	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1142	clr	%o5	! assume success
11438:
1144	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %g2, %g3)
11451:
1146	! Check if line is invalid; if so, NA it.
1147	ldxa	[%o0]ASI_L2_TAG, %o3
1148	btst	0x7, %o3
1149	bnz	%xcc, 2f
1150	 nop
1151	stxa	%o1, [%o0]ASI_L2_TAG
1152	membar #Sync	! still on same cache line
1153	! now delay 15 cycles so we don't have hazard when we return
1154	mov	16, %o1
11551:
1156	brnz,pt	%o1, 1b
1157	 dec	%o1
11589:
1159	! UNPARK-SIBLING_CORE is 7 instructions, so we cross a cache boundary
1160	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1161	/*
1162	 * Restore the DCU
1163	 */
1164	stxa	%g1, [%g0]ASI_DCU
1165	flush	%g0
1166	wrpr	%g0, %o2, %pstate		!restore pstate
1167	retl
1168	 mov	%o5, %o0
11692:
1170	! It is OK to have STATE as NA (if so, nothing to do!)
1171	and	%o3, 0x7, %o3
1172	cmp	%o3, 0x5
1173	be,a,pt	%xcc, 9b
1174	 mov	1, %o5	! indicate was already NA
1175	! Hmm.	Not INV, not NA.
1176	cmp	%o5, 0
1177	be,a,pt	%xcc, 8b	! Flush the cacheline again
1178	 mov	2, %o5	! indicate retry was done
1179	! We already Flushed cacheline second time. Return -1
1180	clr	%o5
1181	ba	9b
1182	 dec	%o5
1183retire_l2_end:
1184	SET_SIZE(retire_l2)
1185
1186#endif	/* lint */
1187
1188#if defined(lint)
1189
1190/*
1191 */
1192/*ARGSUSED*/
1193int
1194unretire_l2(uint64_t tag_addr)
1195{return 0;}
1196
1197#else
1198	ENTRY(unretire_l2)
1199unretire_l2_start:
1200
1201	! since we disable interrupts, we don't need to do kpreempt_disable()
1202	rdpr	%pstate, %o2
1203	andn	%o2, PSTATE_IE, %g1
1204	wrpr	%g0, %g1, %pstate		! disable interrupts
1205	/*
1206	 * Save current DCU state.  Turn off IPS
1207	 */
1208	setx	DCU_IPS_MASK, %g2, %o3
1209	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1210	andn	%g1, %o3, %g4
1211	stxa	%g4, [%g0]ASI_DCU
1212	flush	%g0	/* flush required after changing the IC bit */
1213	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1214
1215	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %o5, %g2)
12161:
1217	clr	%o5	! assume success
1218	! Check that line is in NA state; if so, INV it.
1219	ldxa	[%o0]ASI_L2_TAG, %o3
1220	and	%o3, 0x7, %o3
1221	cmp	%o3, 0x5
1222	bne,a,pt %xcc, 9f	! Wasn't NA, so something is wrong
1223	 dec	%o5	! indicate not NA
1224	stxa	%g0, [%o0]ASI_L2_TAG
1225	membar #Sync
1226	! now delay 15 cycles so we don't have hazard when we return
1227	mov	16, %o1
12281:
1229	brnz,pt	%o1, 1b
1230	 dec	%o1
12319:
1232	! UNPARK-SIBLING_CORE is 7 instructions
1233	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1234	/*
1235	 * Restore the DCU
1236	 */
1237	stxa	%g1, [%g0]ASI_DCU
1238	flush	%g0
1239	wrpr	%g0, %o2, %pstate		!restore pstate
1240	retl
1241	 mov	%o5, %o0
1242unretire_l2_end:
1243	SET_SIZE(unretire_l2)
1244
1245#endif	/* lint */
1246
1247#if defined(lint)
1248
1249/*ARGSUSED*/
1250int
1251retire_l3(uint64_t tag_addr, uint64_t pattern)
1252{return 0;}
1253
1254#else
1255	ENTRY(retire_l3)
1256retire_l3_start:
1257
1258	! since we disable interrupts, we don't need to do kpreempt_disable()
1259	rdpr	%pstate, %o2
1260	andn	%o2, PSTATE_IE, %g1
1261	wrpr	%g0, %g1, %pstate		! disable interrupts
1262	/*
1263	 * Save current DCU state.  Turn off IPS
1264	 */
1265	setx	DCU_IPS_MASK, %g2, %o3
1266	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1267	andn	%g1, %o3, %g4
1268	stxa	%g4, [%g0]ASI_DCU
1269	flush	%g0	/* flush required after changing the IC bit */
1270	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1271
1272	! PN-ECACHE-FLUSH_LINE is 30 instructions
1273	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %o5, %g2)
12741:
1275	clr	%o5	! assume success
1276	! Check if line is invalid; if so, NA it.
1277	ldxa	[%o0]ASI_EC_DIAG, %o3
1278	btst	0x7, %o3
1279	bnz	%xcc, 2f
1280	 nop
1281	stxa	%o1, [%o0]ASI_EC_DIAG
1282	membar #Sync	! still on same cache line
1283	! now delay 15 cycles so we don't have hazard when we return
1284	mov	16, %o1
12851:
1286	brnz,pt	%o1, 1b
1287	 dec	%o1
12889:
1289	! UNPARK-SIBLING_CORE is 7 instructions, so we cross a cache boundary
1290	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1291	/*
1292	 * Restore the DCU
1293	 */
1294	stxa	%g1, [%g0]ASI_DCU
1295	flush	%g0
1296	wrpr	%g0, %o2, %pstate		!restore pstate
1297	retl
1298	 mov	%o5, %o0
12992:
1300	! It is OK to have STATE as NA (if so, nothing to do!)
1301	and	%o3, 0x7, %o3
1302	cmp	%o3, 0x5
1303	be,a,pt	%xcc, 9b
1304	 inc	%o5	! indicate was already NA
1305	! Hmm.	Not INV, not NA
1306	ba	9b
1307	 dec	%o5
1308retire_l3_end:
1309	SET_SIZE(retire_l3)
1310
1311#endif	/* lint */
1312
1313#if defined(lint)
1314
1315/*
1316 */
1317/*ARGSUSED*/
1318int
1319unretire_l3(uint64_t tag_addr)
1320{return 0;}
1321
1322#else
1323	ENTRY(unretire_l3)
1324unretire_l3_start:
1325
1326	! since we disable interrupts, we don't need to do kpreempt_disable()
1327	rdpr	%pstate, %o2
1328	andn	%o2, PSTATE_IE, %g1
1329	wrpr	%g0, %g1, %pstate		! disable interrupts
1330	/*
1331	 * Save current DCU state.  Turn off IPS
1332	 */
1333	setx	DCU_IPS_MASK, %g2, %o3
1334	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1335	andn	%g1, %o3, %g4
1336	stxa	%g4, [%g0]ASI_DCU
1337	flush	%g0	/* flush required after changing the IC bit */
1338	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1339
1340	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %o5, %g2)
13411:
1342	clr	%o5	! assume success
1343	! Check that line is in NA state; if so, INV it.
1344	ldxa	[%o0]ASI_EC_DIAG, %o3
1345	and	%o3, 0x7, %o3
1346	cmp	%o3, 0x5
1347	bne,a,pt %xcc, 9f	! Wasn't NA, so something is wrong
1348	 dec	%o5	! indicate not NA
1349	stxa	%g0, [%o0]ASI_EC_DIAG
1350	membar #Sync
1351	! now delay 15 cycles so we don't have hazard when we return
1352	mov	16, %o1
13531:
1354	brnz,pt	%o1, 1b
1355	 dec	%o1
13569:
1357	! UNPARK-SIBLING_CORE is 7 instructions
1358	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1359	/*
1360	 * Restore the DCU
1361	 */
1362	stxa	%g1, [%g0]ASI_DCU
1363	flush	%g0
1364	wrpr	%g0, %o2, %pstate		!restore pstate
1365	retl
1366	 mov	%o5, %o0
1367unretire_l3_end:
1368	SET_SIZE(unretire_l3)
1369
1370#endif	/* lint */
1371
1372#if defined(lint)
1373
1374/*ARGSUSED*/
1375int
1376retire_l2_alternate(uint64_t tag_addr, uint64_t pattern)
1377{return 0;}
1378
1379#else
1380	.align 2048
1381
1382	ENTRY(retire_l2_alternate)
1383
1384	! since we disable interrupts, we don't need to do kpreempt_disable()
1385	rdpr	%pstate, %o2
1386	andn	%o2, PSTATE_IE, %g1
1387	wrpr	%g0, %g1, %pstate		! disable interrupts
1388	/*
1389	 * Save current DCU state.  Turn off IPS
1390	 */
1391	setx	DCU_IPS_MASK, %g2, %o3
1392	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1393	andn	%g1, %o3, %g4
1394	stxa	%g4, [%g0]ASI_DCU
1395	flush	%g0
1396	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1397	clr	%o5	! assume success
13988:
1399	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %g2, %g3)
14001:
1401	! Check if line is invalid; if so, NA it.
1402	ldxa	[%o0]ASI_L2_TAG, %o3
1403	btst	0x7, %o3
1404	bnz	%xcc, 2f
1405	 nop
1406	stxa	%o1, [%o0]ASI_L2_TAG
1407	membar #Sync	! still on same cache line
1408	! now delay 15 cycles so we don't have hazard when we return
1409	mov	16, %o1
14101:
1411	brnz,pt	%o1, 1b
1412	 dec	%o1
14139:
1414	! UNPARK-SIBLING_CORE is 7 instructions, so we cross a cache boundary
1415	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1416	/*
1417	 * Restore the DCU
1418	 */
1419	stxa	%g1, [%g0]ASI_DCU
1420	flush	%g0
1421	wrpr	%g0, %o2, %pstate		!restore pstate
1422	retl
1423	 mov	%o5, %o0
14242:
1425	! It is OK to have STATE as NA (if so, nothing to do!)
1426	and	%o3, 0x7, %o3
1427	cmp	%o3, 0x5
1428	be,a,pt	%xcc, 9b
1429	 mov	1, %o5	! indicate was already NA
1430	! Hmm.	Not INV, not NA.
1431	cmp	%o5, 0
1432	be,a,pt	%xcc, 8b	! Flush the cacheline again
1433	 mov	2, %o5	! indicate retry was done
1434	! We already Flushed cacheline second time. Return -1
1435	clr	%o5
1436	ba	9b
1437	 dec	%o5
1438	SET_SIZE(retire_l2_alternate)
1439
1440#endif	/* lint */
1441
1442#if defined(lint)
1443
1444/*
1445 */
1446/*ARGSUSED*/
1447int
1448unretire_l2_alternate(uint64_t tag_addr)
1449{return 0;}
1450
1451#else
1452	ENTRY(unretire_l2_alternate)
1453
1454	! since we disable interrupts, we don't need to do kpreempt_disable()
1455	rdpr	%pstate, %o2
1456	andn	%o2, PSTATE_IE, %g1
1457	wrpr	%g0, %g1, %pstate		! disable interrupts
1458	/*
1459	 * Save current DCU state.  Turn off IPS
1460	 */
1461	setx	DCU_IPS_MASK, %g2, %o3
1462	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1463	andn	%g1, %o3, %g4
1464	stxa	%g4, [%g0]ASI_DCU
1465	flush	%g0	/* flush required after changing the IC bit */
1466	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1467
1468	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %o5, %g2)
14691:
1470	clr	%o5	! assume success
1471	! Check that line is in NA state; if so, INV it.
1472	ldxa	[%o0]ASI_L2_TAG, %o3
1473	and	%o3, 0x7, %o3
1474	cmp	%o3, 0x5
1475	bne,a,pt %xcc, 9f	! Wasn't NA, so something is wrong
1476	 dec	%o5	! indicate not NA
1477	stxa	%g0, [%o0]ASI_L2_TAG
1478	membar #Sync
1479	! now delay 15 cycles so we don't have hazard when we return
1480	mov	16, %o1
14811:
1482	brnz,pt	%o1, 1b
1483	 dec	%o1
14849:
1485	! UNPARK-SIBLING_CORE is 7 instructions
1486	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1487	/*
1488	 * Restore the DCU
1489	 */
1490	stxa	%g1, [%g0]ASI_DCU
1491	flush	%g0
1492	wrpr	%g0, %o2, %pstate		!restore pstate
1493	retl
1494	 mov	%o5, %o0
1495	SET_SIZE(unretire_l2_alternate)
1496
1497#endif	/* lint */
1498
1499#if defined(lint)
1500
1501/*ARGSUSED*/
1502int
1503retire_l3_alternate(uint64_t tag_addr, uint64_t pattern)
1504{return 0;}
1505
1506#else
1507	ENTRY(retire_l3_alternate)
1508
1509	! since we disable interrupts, we don't need to do kpreempt_disable()
1510	rdpr	%pstate, %o2
1511	andn	%o2, PSTATE_IE, %g1
1512	wrpr	%g0, %g1, %pstate		! disable interrupts
1513	/*
1514	 * Save current DCU state.  Turn off IPS
1515	 */
1516	setx	DCU_IPS_MASK, %g2, %o3
1517	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1518	andn	%g1, %o3, %g4
1519	stxa	%g4, [%g0]ASI_DCU
1520	flush	%g0	/* flush required after changing the IC bit */
1521	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1522
1523	! PN-ECACHE-FLUSH_LINE is 30 instructions
1524	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %o5, %g2)
15251:
1526	clr	%o5	! assume success
1527	! Check if line is invalid; if so, NA it.
1528	ldxa	[%o0]ASI_EC_DIAG, %o3
1529	btst	0x7, %o3
1530	bnz	%xcc, 2f
1531	 nop
1532	stxa	%o1, [%o0]ASI_EC_DIAG
1533	membar #Sync	! still on same cache line
1534	! now delay 15 cycles so we don't have hazard when we return
1535	mov	16, %o1
15361:
1537	brnz,pt	%o1, 1b
1538	 dec	%o1
15399:
1540	! UNPARK-SIBLING_CORE is 7 instructions, so we cross a cache boundary
1541	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1542	/*
1543	 * Restore the DCU
1544	 */
1545	stxa	%g1, [%g0]ASI_DCU
1546	flush	%g0
1547	wrpr	%g0, %o2, %pstate		!restore pstate
1548	retl
1549	 mov	%o5, %o0
15502:
1551	! It is OK to have STATE as NA (if so, nothing to do!)
1552	and	%o3, 0x7, %o3
1553	cmp	%o3, 0x5
1554	be,a,pt	%xcc, 9b
1555	 inc	%o5	! indicate was already NA
1556	! Hmm.	Not INV, not NA
1557	ba	9b
1558	 dec	%o5
1559	SET_SIZE(retire_l3_alternate)
1560
1561#endif	/* lint */
1562
1563#if defined(lint)
1564
1565/*
1566 */
1567/*ARGSUSED*/
1568int
1569unretire_l3_alternate(uint64_t tag_addr)
1570{return 0;}
1571
1572#else
1573	ENTRY(unretire_l3_alternate)
1574
1575	! since we disable interrupts, we don't need to do kpreempt_disable()
1576	rdpr	%pstate, %o2
1577	andn	%o2, PSTATE_IE, %g1
1578	wrpr	%g0, %g1, %pstate		! disable interrupts
1579	/*
1580	 * Save current DCU state.  Turn off IPS
1581	 */
1582	setx	DCU_IPS_MASK, %g2, %o3
1583	ldxa	[%g0]ASI_DCU, %g1	! save DCU in %g1
1584	andn	%g1, %o3, %g4
1585	stxa	%g4, [%g0]ASI_DCU
1586	flush	%g0	/* flush required after changing the IC bit */
1587	PARK_SIBLING_CORE(%g1, %o3, %o4)	! %g1 has DCU value
1588
1589	PN_ECACHE_FLUSH_LINE(%o0, %o3, %o4, %o5, %g2)
15901:
1591	clr	%o5	! assume success
1592	! Check that line is in NA state; if so, INV it.
1593	ldxa	[%o0]ASI_EC_DIAG, %o3
1594	and	%o3, 0x7, %o3
1595	cmp	%o3, 0x5
1596	bne,a,pt %xcc, 9f	! Wasn't NA, so something is wrong
1597	 dec	%o5	! indicate not NA
1598	stxa	%g0, [%o0]ASI_EC_DIAG
1599	membar #Sync
1600	! now delay 15 cycles so we don't have hazard when we return
1601	mov	16, %o1
16021:
1603	brnz,pt	%o1, 1b
1604	 dec	%o1
16059:
1606	! UNPARK-SIBLING_CORE is 7 instructions
1607	UNPARK_SIBLING_CORE(%g1, %o3, %o4)	! 7 instructions
1608	/*
1609	 * Restore the DCU
1610	 */
1611	stxa	%g1, [%g0]ASI_DCU
1612	flush	%g0
1613	wrpr	%g0, %o2, %pstate		!restore pstate
1614	retl
1615	 mov	%o5, %o0
1616	SET_SIZE(unretire_l3_alternate)
1617
1618#endif	/* lint */
1619
1620#if defined(lint)
1621
1622/*ARGSUSED*/
1623void
1624get_ecache_dtags_tl1(uint64_t afar, ch_cpu_logout_t *clop)
1625{ }
1626
1627#else
1628	ENTRY(get_ecache_dtags_tl1)
1629
1630
1631	PARK_SIBLING_CORE(%g3, %g4, %g5)
1632	add	%g2, CH_CLO_DATA + CH_CHD_EC_DATA, %g2
1633	rd	%asi, %g4
1634	wr	%g0, ASI_N, %asi
1635	GET_ECACHE_DTAGS(%g1, %g2, %g5, %g6, %g7)
1636	wr	%g4, %asi
1637	UNPARK_SIBLING_CORE(%g3, %g4, %g5)	! can use %g3 again
1638
1639	retry
1640	SET_SIZE(get_ecache_dtags_tl1)
1641
1642#endif	/* lint */
1643
1644#if defined(lint)
1645/*ARGSUSED*/
1646void
1647get_l2_tag_tl1(uint64_t tag_addr, uint64_t tag_data_ptr)
1648{ }
1649
1650#else
1651	ENTRY(get_l2_tag_tl1)
1652
1653	/*
1654	 * Now read the tag data
1655	 */
1656	ldxa	[%g1]ASI_L2_TAG, %g4		! save tag_data
1657	stx	%g4, [%g2]
1658
1659	retry
1660	SET_SIZE(get_l2_tag_tl1)
1661
1662#endif	/* lint */
1663
1664#if defined(lint)
1665/*ARGSUSED*/
1666void
1667get_l3_tag_tl1(uint64_t tag_addr, uint64_t tag_data_ptr)
1668{ }
1669
1670#else
1671	ENTRY(get_l3_tag_tl1)
1672
1673	/*
1674	 * Now read the tag data
1675	 */
1676	ldxa	[%g1]ASI_EC_DIAG, %g4		! save tag_data
1677	stx	%g4, [%g2]
1678
1679	retry
1680	SET_SIZE(get_l3_tag_tl1)
1681
1682#endif	/* lint */
1683
1684