xref: /titanic_51/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s (revision 15d9d0b528387242011cdcc6190c9e598cfe3a07)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Assembly code support for the Cheetah+ module
27 */
28
29#pragma ident	"%Z%%M%	%I%	%E% SMI"
30
31#if !defined(lint)
32#include "assym.h"
33#endif	/* lint */
34
35#include <sys/asm_linkage.h>
36#include <sys/mmu.h>
37#include <vm/hat_sfmmu.h>
38#include <sys/machparam.h>
39#include <sys/machcpuvar.h>
40#include <sys/machthread.h>
41#include <sys/machtrap.h>
42#include <sys/privregs.h>
43#include <sys/asm_linkage.h>
44#include <sys/trap.h>
45#include <sys/cheetahregs.h>
46#include <sys/us3_module.h>
47#include <sys/xc_impl.h>
48#include <sys/intreg.h>
49#include <sys/async.h>
50#include <sys/clock.h>
51#include <sys/cheetahasm.h>
52
53#ifdef TRAPTRACE
54#include <sys/traptrace.h>
55#endif /* TRAPTRACE */
56
57#if !defined(lint)
58
59/* BEGIN CSTYLED */
60
61/*
62 * Cheetah+ version to reflush an Ecache line by index.
63 *
64 * By default we assume the Ecache is 2-way so we flush both
65 * ways. Even if the cache is direct-mapped no harm will come
66 * from performing the flush twice, apart from perhaps a performance
67 * penalty.
68 *
69 * XXX - scr2 not used.
70 */
71#define	ECACHE_REFLUSH_LINE(ec_set_size, index, scr2)			\
72	ldxa	[index]ASI_EC_DIAG, %g0;				\
73	ldxa	[index + ec_set_size]ASI_EC_DIAG, %g0;
74
75/*
76 * Cheetah+ version of ecache_flush_line.  Uses Cheetah+ Ecache Displacement
77 * Flush feature.
78 */
79#define	ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2)		\
80	sub	ec_set_size, 1, scr1;					\
81	and	physaddr, scr1, scr1;					\
82	set	CHP_ECACHE_IDX_DISP_FLUSH, scr2;			\
83	or	scr2, scr1, scr1;					\
84	ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
85
86/* END CSTYLED */
87
88/*
89 * Panther version to reflush a line from both the L2 cache and L3
90 * cache by the respective indexes. Flushes all ways of the line from
91 * each cache.
92 *
93 * l2_index	Index into the L2$ of the line to be flushed. This
94 *		register will not be modified by this routine.
95 * l3_index	Index into the L3$ of the line to be flushed. This
96 *		register will not be modified by this routine.
97 * scr2		scratch register.
98 * scr3		scratch register.
99 *
100 */
101#define	PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3)		\
102	set	PN_L2_MAX_SET, scr2;					\
103	set	PN_L2_SET_SIZE, scr3;					\
1041:									\
105	ldxa	[l2_index + scr2]ASI_L2_TAG, %g0;			\
106	cmp	scr2, %g0;						\
107	bg,a	1b;							\
108	  sub	scr2, scr3, scr2;					\
109	set	PN_L3_MAX_SET, scr2;					\
110	set	PN_L3_SET_SIZE, scr3;					\
1112:									\
112	ldxa	[l3_index + scr2]ASI_EC_DIAG, %g0;			\
113	cmp	scr2, %g0;						\
114	bg,a	2b;							\
115	  sub	scr2, scr3, scr2;
116
117
118/*
119 * Panther version of ecache_flush_line. Flushes the line corresponding
120 * to physaddr from both the L2 cache and the L3 cache.
121 *
122 * physaddr	Input: Physical address to flush.
123 *              Output: Physical address to flush (preserved).
124 * l2_idx_out	Input: scratch register.
125 *              Output: Index into the L2$ of the line to be flushed.
126 * l3_idx_out	Input: scratch register.
127 *              Output: Index into the L3$ of the line to be flushed.
128 * scr3		scratch register.
129 * scr4		scratch register.
130 *
131 */
132#define	PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4)	\
133	set	PN_L3_SET_SIZE, l2_idx_out;					\
134	sub	l2_idx_out, 1, l2_idx_out;					\
135	and	physaddr, l2_idx_out, l3_idx_out;				\
136	set	PN_L3_IDX_DISP_FLUSH, l2_idx_out;				\
137	or	l2_idx_out, l3_idx_out, l3_idx_out;				\
138	set	PN_L2_SET_SIZE, l2_idx_out;					\
139	sub	l2_idx_out, 1, l2_idx_out;					\
140	and	physaddr, l2_idx_out, l2_idx_out;				\
141	set	PN_L2_IDX_DISP_FLUSH, scr3;					\
142	or	l2_idx_out, scr3, l2_idx_out;					\
143	PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
144
145#endif	/* !lint */
146
147/*
148 * Fast ECC error at TL>0 handler
149 * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
150 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
151 * For a complete description of the Fast ECC at TL>0 handling see the
152 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
153 * us3_common_asm.s
154 */
155#if defined(lint)
156
157void
158fast_ecc_tl1_err(void)
159{}
160
161#else	/* lint */
162
163	.section ".text"
164	.align	64
165	ENTRY_NP(fast_ecc_tl1_err)
166
167	/*
168	 * This macro turns off the D$/I$ if they are on and saves their
169	 * original state in ch_err_tl1_tmp, saves all the %g registers in the
170	 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
171	 * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
172	 * point to the ch_err_tl1_data structure and the original D$/I$ state
173	 * will be saved in ch_err_tl1_tmp.  All %g registers except for %g1
174	 * will be available.
175	 */
176	CH_ERR_TL1_FECC_ENTER;
177
178	/*
179	 * Get the diagnostic logout data.  %g4 must be initialized to
180	 * current CEEN state, %g5 must point to logout structure in
181	 * ch_err_tl1_data_t.  %g3 will contain the nesting count upon
182	 * return.
183	 */
184	ldxa	[%g0]ASI_ESTATE_ERR, %g4
185	and	%g4, EN_REG_CEEN, %g4
186	add	%g1, CH_ERR_TL1_LOGOUT, %g5
187	DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
188
189	/*
190	 * If the logout nesting count is exceeded, we're probably
191	 * not making any progress, try to panic instead.
192	 */
193	cmp	%g3, CLO_NESTING_MAX
194	bge	fecc_tl1_err
195	  nop
196
197	/*
198	 * Save the current CEEN and NCEEN state in %g7 and turn them off
199	 * before flushing the Ecache.
200	 */
201	ldxa	[%g0]ASI_ESTATE_ERR, %g7
202	andn	%g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
203	stxa	%g5, [%g0]ASI_ESTATE_ERR
204	membar	#Sync
205
206	/*
207	 * Flush the Ecache, using the largest possible cache size with the
208	 * smallest possible line size since we can't get the actual sizes
209	 * from the cpu_node due to DTLB misses.
210	 */
211	PN_L2_FLUSHALL(%g3, %g4, %g5)
212
213	set	CH_ECACHE_MAX_SIZE, %g4
214	set	CH_ECACHE_MIN_LSIZE, %g5
215
216	GET_CPU_IMPL(%g6)
217	cmp	%g6, PANTHER_IMPL
218	bne	%xcc, 2f
219	  nop
220	set	PN_L3_SIZE, %g4
2212:
222	mov	%g6, %g3
223	CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
224
225	/*
226	 * Restore CEEN and NCEEN to the previous state.
227	 */
228	stxa	%g7, [%g0]ASI_ESTATE_ERR
229	membar	#Sync
230
231	/*
232	 * If we turned off the D$, then flush it and turn it back on.
233	 */
234	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
235	andcc	%g3, CH_ERR_TSTATE_DC_ON, %g0
236	bz	%xcc, 3f
237	  nop
238
239	/*
240	 * Flush the D$.
241	 */
242	ASM_LD(%g4, dcache_size)
243	ASM_LD(%g5, dcache_linesize)
244	CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
245
246	/*
247	 * Turn the D$ back on.
248	 */
249	ldxa	[%g0]ASI_DCU, %g3
250	or	%g3, DCU_DC, %g3
251	stxa	%g3, [%g0]ASI_DCU
252	membar	#Sync
2533:
254	/*
255	 * If we turned off the I$, then flush it and turn it back on.
256	 */
257	ldxa	[%g1 + CH_ERR_TL1_TMP]%asi, %g3
258	andcc	%g3, CH_ERR_TSTATE_IC_ON, %g0
259	bz	%xcc, 4f
260	  nop
261
262	/*
263	 * Flush the I$.  Panther has different I$ parameters, and we
264	 * can't access the logout I$ params without possibly generating
265	 * a MMU miss.
266	 */
267	GET_CPU_IMPL(%g6)
268	set	PN_ICACHE_SIZE, %g3
269	set	CH_ICACHE_SIZE, %g4
270	mov	CH_ICACHE_LSIZE, %g5
271	cmp	%g6, PANTHER_IMPL
272	movz	%xcc, %g3, %g4
273	movz	%xcc, PN_ICACHE_LSIZE, %g5
274	CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
275
276	/*
277	 * Turn the I$ back on.  Changing DCU_IC requires flush.
278	 */
279	ldxa	[%g0]ASI_DCU, %g3
280	or	%g3, DCU_IC, %g3
281	stxa	%g3, [%g0]ASI_DCU
282	flush	%g0
2834:
284
285#ifdef TRAPTRACE
286	/*
287	 * Get current trap trace entry physical pointer.
288	 */
289	CPU_INDEX(%g6, %g5)
290	sll	%g6, TRAPTR_SIZE_SHIFT, %g6
291	set	trap_trace_ctl, %g5
292	add	%g6, %g5, %g6
293	ld	[%g6 + TRAPTR_LIMIT], %g5
294	tst	%g5
295	be	%icc, skip_traptrace
296	  nop
297	ldx	[%g6 + TRAPTR_PBASE], %g5
298	ld	[%g6 + TRAPTR_OFFSET], %g4
299	add	%g5, %g4, %g5
300
301	/*
302	 * Create trap trace entry.
303	 */
304	rd	%asi, %g7
305	wr	%g0, TRAPTR_ASI, %asi
306	rd	STICK, %g4
307	stxa	%g4, [%g5 + TRAP_ENT_TICK]%asi
308	rdpr	%tl, %g4
309	stha	%g4, [%g5 + TRAP_ENT_TL]%asi
310	rdpr	%tt, %g4
311	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
312	rdpr	%tpc, %g4
313	stna	%g4, [%g5 + TRAP_ENT_TPC]%asi
314	rdpr	%tstate, %g4
315	stxa	%g4, [%g5 + TRAP_ENT_TSTATE]%asi
316	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
317	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
318	wr	%g0, %g7, %asi
319	ldxa	[%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
320	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
321	wr	%g0, TRAPTR_ASI, %asi
322	stna	%g3, [%g5 + TRAP_ENT_F1]%asi
323	stna	%g4, [%g5 + TRAP_ENT_F2]%asi
324	wr	%g0, %g7, %asi
325	ldxa	[%g1 + CH_ERR_TL1_AFAR]%asi, %g3
326	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4
327	wr	%g0, TRAPTR_ASI, %asi
328	stna	%g3, [%g5 + TRAP_ENT_F3]%asi
329	stna	%g4, [%g5 + TRAP_ENT_F4]%asi
330	wr	%g0, %g7, %asi
331
332	/*
333	 * Advance trap trace pointer.
334	 */
335	ld	[%g6 + TRAPTR_OFFSET], %g5
336	ld	[%g6 + TRAPTR_LIMIT], %g4
337	st	%g5, [%g6 + TRAPTR_LAST_OFFSET]
338	add	%g5, TRAP_ENT_SIZE, %g5
339	sub	%g4, TRAP_ENT_SIZE, %g4
340	cmp	%g5, %g4
341	movge	%icc, 0, %g5
342	st	%g5, [%g6 + TRAPTR_OFFSET]
343skip_traptrace:
344#endif	/* TRAPTRACE */
345
346	/*
347	 * If nesting count is not zero, skip all the AFSR/AFAR
348	 * handling and just do the necessary cache-flushing.
349	 */
350	ldxa	[%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
351	brnz	%g2, 6f
352	  nop
353
354	/*
355	 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
356	 * and panic since a UE will occur (on the retry) before the
357	 * UCU and WDU messages are enqueued.
358	 */
359	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
360	set	1, %g4
361	sllx	%g4, C_AFSR_UCU_SHIFT, %g4
362	btst	%g4, %g3		! UCU in original shadow AFSR?
363	bnz	%xcc, 5f
364	  mov	1, %g4
365	ldxa	[%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
366	sllx	%g4, C_AFSR_L3_UCU_SHIFT, %g4
367	btst	%g4, %g3		! L3_UCU in original shadow AFSR_EXT?
368	bz	%xcc, 6f
369	  nop
3705:
371	ldxa	[%g1 + CH_ERR_TL1_AFSR]%asi, %g4	! original AFSR
372	ldxa	[%g0]ASI_AFSR, %g3	! current AFSR
373	or	%g3, %g4, %g3		! %g3 = original + current AFSR
374	set	1, %g4
375	sllx	%g4, C_AFSR_WDU_SHIFT, %g4
376	btst	%g4, %g3		! WDU in original or current AFSR?
377	bnz	%xcc, fecc_tl1_err
378	  nop
379
3806:
381	/*
382	 * We fall into this macro if we've successfully logged the error in
383	 * the ch_err_tl1_data structure and want the PIL15 softint to pick
384	 * it up and log it.  %g1 must point to the ch_err_tl1_data structure.
385	 * Restores the %g registers and issues retry.
386	 */
387	CH_ERR_TL1_EXIT;
388
389	/*
390	 * Establish panic exit label.
391	 */
392	CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
393
394	SET_SIZE(fast_ecc_tl1_err)
395
396#endif	/* lint */
397
398
399#if defined(lint)
400/*
401 * scrubphys - Pass in the aligned physical memory address
402 * that you want to scrub, along with the ecache set size.
403 *
404 *	1) Displacement flush the E$ line corresponding to %addr.
405 *	   The first ldxa guarantees that the %addr is no longer in
406 *	   M, O, or E (goes to I or S (if instruction fetch also happens).
407 *	2) "Write" the data using a CAS %addr,%g0,%g0.
408 *	   The casxa guarantees a transition from I to M or S to M.
409 *	3) Displacement flush the E$ line corresponding to %addr.
410 *	   The second ldxa pushes the M line out of the ecache, into the
411 *	   writeback buffers, on the way to memory.
412 *	4) The "membar #Sync" pushes the cache line out of the writeback
413 *	   buffers onto the bus, on the way to dram finally.
414 *
415 * This is a modified version of the algorithm suggested by Gary Lauterbach.
416 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
417 * as modified, but then we found out that for spitfire, if it misses in the
418 * E$ it will probably install as an M, but if it hits in the E$, then it
419 * will stay E, if the store doesn't happen. So the first displacement flush
420 * should ensure that the CAS will miss in the E$.  Arrgh.
421 */
422/* ARGSUSED */
423void
424scrubphys(uint64_t paddr, int ecache_set_size)
425{}
426
427#else	/* lint */
428	ENTRY(scrubphys)
429	rdpr	%pstate, %o4
430	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
431	wrpr	%o5, %g0, %pstate	! clear IE, AM bits
432
433	GET_CPU_IMPL(%o5)		! Panther Ecache is flushed differently
434	cmp	%o5, PANTHER_IMPL
435	bne	scrubphys_1
436	  nop
437	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
438	casxa	[%o0]ASI_MEM, %g0, %g0
439	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
440	b	scrubphys_2
441	  nop
442scrubphys_1:
443	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
444	casxa	[%o0]ASI_MEM, %g0, %g0
445	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
446scrubphys_2:
447	wrpr	%g0, %o4, %pstate	! restore earlier pstate register value
448
449	retl
450	membar	#Sync			! move the data out of the load buffer
451	SET_SIZE(scrubphys)
452
453#endif	/* lint */
454
455
456#if defined(lint)
457/*
458 * clearphys - Pass in the physical memory address of the checkblock
459 * that you want to push out, cleared with a recognizable pattern,
460 * from the ecache.
461 *
462 * To ensure that the ecc gets recalculated after the bad data is cleared,
463 * we must write out enough data to fill the w$ line (64 bytes). So we read
464 * in an entire ecache subblock's worth of data, and write it back out.
465 * Then we overwrite the 16 bytes of bad data with the pattern.
466 */
467/* ARGSUSED */
468void
469clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
470{
471}
472
473#else	/* lint */
474	ENTRY(clearphys)
475	/* turn off IE, AM bits */
476	rdpr	%pstate, %o4
477	andn	%o4, PSTATE_IE | PSTATE_AM, %o5
478	wrpr	%o5, %g0, %pstate
479
480	/* turn off NCEEN */
481	ldxa	[%g0]ASI_ESTATE_ERR, %o5
482	andn	%o5, EN_REG_NCEEN, %o3
483	stxa	%o3, [%g0]ASI_ESTATE_ERR
484	membar	#Sync
485
486	/* align address passed with 64 bytes subblock size */
487	mov	CH_ECACHE_SUBBLK_SIZE, %o2
488	andn	%o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
489
490	/* move the good data into the W$ */
491clearphys_1:
492	subcc	%o2, 8, %o2
493	ldxa	[%g1 + %o2]ASI_MEM, %g2
494	bge	clearphys_1
495	  stxa	%g2, [%g1 + %o2]ASI_MEM
496
497	/* now overwrite the bad data */
498	setx	0xbadecc00badecc01, %g1, %g2
499	stxa	%g2, [%o0]ASI_MEM
500	mov	8, %g1
501	stxa	%g2, [%o0 + %g1]ASI_MEM
502
503	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
504	cmp	%o3, PANTHER_IMPL
505	bne	clearphys_2
506	  nop
507	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
508	casxa	[%o0]ASI_MEM, %g0, %g0
509	PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
510	b	clearphys_3
511	  nop
512clearphys_2:
513	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
514	casxa	[%o0]ASI_MEM, %g0, %g0
515	ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
516clearphys_3:
517	/* clear the AFSR */
518	ldxa	[%g0]ASI_AFSR, %o1
519	stxa	%o1, [%g0]ASI_AFSR
520	membar	#Sync
521
522	/* turn NCEEN back on */
523	stxa	%o5, [%g0]ASI_ESTATE_ERR
524	membar	#Sync
525
526	/* return and re-enable IE and AM */
527	retl
528	  wrpr	%g0, %o4, %pstate
529	SET_SIZE(clearphys)
530
531#endif	/* lint */
532
533
534#if defined(lint)
535/*
536 * Cheetah+ Ecache displacement flush the specified line from the E$
537 *
538 * For Panther, this means flushing the specified line from both the
539 * L2 cache and L3 cache.
540 *
541 * Register usage:
542 *	%o0 - 64 bit physical address for flushing
543 *	%o1 - Ecache set size
544 */
545/*ARGSUSED*/
546void
547ecache_flush_line(uint64_t flushaddr, int ec_set_size)
548{
549}
550#else	/* lint */
551	ENTRY(ecache_flush_line)
552
553	GET_CPU_IMPL(%o3)		! Panther Ecache is flushed differently
554	cmp	%o3, PANTHER_IMPL
555	bne	ecache_flush_line_1
556	  nop
557
558	PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
559	b	ecache_flush_line_2
560	  nop
561ecache_flush_line_1:
562	ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
563ecache_flush_line_2:
564	retl
565	  nop
566	SET_SIZE(ecache_flush_line)
567#endif	/* lint */
568
569#if defined(lint)
570void
571set_afsr_ext(uint64_t afsr_ext)
572{
573	afsr_ext = afsr_ext;
574}
575#else /* lint */
576
577	ENTRY(set_afsr_ext)
578	set	ASI_AFSR_EXT_VA, %o1
579	stxa	%o0, [%o1]ASI_AFSR		! afsr_ext reg
580	membar	#Sync
581	retl
582	nop
583	SET_SIZE(set_afsr_ext)
584
585#endif /* lint */
586
587
588#if defined(lint)
589/*
590 * The CPU jumps here from the MMU exception handler if an ITLB parity
591 * error is detected and we are running on Panther.
592 *
593 * In this routine we collect diagnostic information and write it to our
594 * logout structure (if possible) and clear all ITLB entries that may have
595 * caused our parity trap.
596 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
597 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
598 * send two:
599 *
600 * %g2	- Contains the VA whose lookup in the ITLB caused the parity error
601 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
602 *	  regardless of whether or not we actually used the logout struct.
603 *
604 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
605 * parameters to the data contained in the logout structure in order to
606 * determine whether the logout information is valid for this particular
607 * error or not.
608 */
609void
610itlb_parity_trap(void)
611{}
612
613#else	/* lint */
614
615	ENTRY_NP(itlb_parity_trap)
616	/*
617	 * Collect important information about the trap which will be
618	 * used as a parameter to the TL0 handler.
619	 */
620	wr	%g0, ASI_IMMU, %asi
621	rdpr	%tpc, %g2			! VA that caused the IMMU trap
622	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page size
623	set	PN_ITLB_PGSZ_MASK, %g4
624	and	%g3, %g4, %g3
625	ldxa	[MMU_TAG_ACCESS]%asi, %g4
626	set	TAGREAD_CTX_MASK, %g5
627	and	%g4, %g5, %g4
628	or	%g4, %g3, %g3			! 'or' in the trap context and
629	mov	1, %g4				! add the IMMU flag to complete
630	sllx	%g4, PN_TLO_INFO_IMMU_SHIFT, %g4
631	or	%g4, %g3, %g3			! the tlo_info field for logout
632	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
633	membar	#Sync
634
635	/*
636	 * at this point:
637	 *    %g2 - contains the VA whose lookup caused the trap
638	 *    %g3 - contains the tlo_info field
639	 *
640	 * Next, we calculate the TLB index value for the failing VA.
641	 */
642	mov	%g2, %g4			! We need the ITLB index
643	set	PN_ITLB_PGSZ_MASK, %g5
644	and	%g3, %g5, %g5
645	srlx	%g5, PN_ITLB_PGSZ_SHIFT, %g5
646	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the index
647	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
648	set	PN_ITLB_T512, %g5
649	or	%g4, %g5, %g4			! and add in the TLB ID
650
651	/*
652	 * at this point:
653	 *    %g2 - contains the VA whose lookup caused the trap
654	 *    %g3 - contains the tlo_info field
655	 *    %g4 - contains the TLB access index value for the
656	 *          VA/PgSz in question
657	 *
658	 * Check to see if the logout structure is available.
659	 */
660	set	CHPR_TLB_LOGOUT, %g6
661	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
662	set	LOGOUT_INVALID_U32, %g6
663	sllx	%g6, 32, %g6			! if our logout structure is
664	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
665	or	%g5, %g6, %g5			! already being used, then we
666	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
667	cmp	%g6, %g5			! information before clearing
668	bne	itlb_parity_trap_1		! and logging the error.
669	  nop
670
671	/*
672	 * Record the logout information. %g4 contains our index + TLB ID
673	 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
674	 * the pointer to our logout struct.
675	 */
676	stx	%g3, [%g1 + PN_TLO_INFO]
677	stx	%g2, [%g1 + PN_TLO_ADDR]
678	stx	%g2, [%g1 + PN_TLO_PC]		! %tpc == fault addr for IMMU
679
680	add	%g1, PN_TLO_ITLB_TTE, %g1	! move up the pointer
681
682	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
683	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
684	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
685	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
686
687	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
688	or	%g4, %g6, %g4
689	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
690
691	ldxa	[%g4]ASI_ITLB_ACCESS, %g5	! read the data
692	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! store it away
693	ldxa	[%g4]ASI_ITLB_TAGREAD, %g5	! read the tag
694	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! store it away
695
696	andn	%g4, %g6, %g4			! back to way 0
697
698itlb_parity_trap_1:
699	/*
700	 * at this point:
701	 *    %g2 - contains the VA whose lookup caused the trap
702	 *    %g3 - contains the tlo_info field
703	 *    %g4 - contains the TLB access index value for the
704	 *          VA/PgSz in question
705	 *
706	 * Here we will clear the errors from the TLB.
707	 */
708	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
709	stxa	%g0, [%g5]ASI_IMMU		! 0 as it will be invalid.
710	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write the data and tag
711	membar	#Sync
712
713	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
714	or	%g4, %g6, %g4
715
716	stxa	%g0, [%g4]ASI_ITLB_ACCESS	! Write same data and tag
717	membar	#Sync
718
719	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
720	flush   %g6				! flush after writing MMU regs
721
722	/*
723	 * at this point:
724	 *    %g2 - contains the VA whose lookup caused the trap
725	 *    %g3 - contains the tlo_info field
726	 *
727	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
728	 * already at PIL 15.	 */
729	set	cpu_tlb_parity_error, %g1
730	rdpr	%pil, %g4
731	cmp	%g4, PIL_14
732	movl	%icc, PIL_14, %g4
733	ba	sys_trap
734	  nop
735	SET_SIZE(itlb_parity_trap)
736
737#endif	/* lint */
738
739#if defined(lint)
740/*
741 * The CPU jumps here from the MMU exception handler if a DTLB parity
742 * error is detected and we are running on Panther.
743 *
744 * In this routine we collect diagnostic information and write it to our
745 * logout structure (if possible) and clear all DTLB entries that may have
746 * caused our parity trap.
747 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
748 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
749 * send two:
750 *
751 * %g2	- Contains the VA whose lookup in the DTLB caused the parity error
752 * %g3	- Contains the tlo_info field of the pn_tlb_logout logout struct,
753 *	  regardless of whether or not we actually used the logout struct.
754 *
755 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
756 * parameters to the data contained in the logout structure in order to
757 * determine whether the logout information is valid for this particular
758 * error or not.
759 */
760void
761dtlb_parity_trap(void)
762{}
763
764#else	/* lint */
765
766	ENTRY_NP(dtlb_parity_trap)
767	/*
768	 * Collect important information about the trap which will be
769	 * used as a parameter to the TL0 handler.
770	 */
771	wr	%g0, ASI_DMMU, %asi
772	ldxa	[MMU_SFAR]%asi, %g2		! VA that caused the IMMU trap
773	ldxa	[MMU_TAG_ACCESS_EXT]%asi, %g3	! read the trap VA page sizes
774	set	PN_DTLB_PGSZ_MASK, %g4
775	and	%g3, %g4, %g3
776	ldxa	[MMU_TAG_ACCESS]%asi, %g4
777	set	TAGREAD_CTX_MASK, %g5		! 'or' in the trap context
778	and	%g4, %g5, %g4			! to complete the tlo_info
779	or	%g4, %g3, %g3			! field for logout
780	stxa	%g0,[MMU_SFSR]%asi		! clear the SFSR
781	membar	#Sync
782
783	/*
784	 * at this point:
785	 *    %g2 - contains the VA whose lookup caused the trap
786	 *    %g3 - contains the tlo_info field
787	 *
788	 * Calculate the TLB index values for the failing VA. Since the T512
789	 * TLBs can be configured for different page sizes, we need to find
790	 * the index into each one separately.
791	 */
792	mov	%g2, %g4			! First we get the DTLB_0 index
793	set	PN_DTLB_PGSZ0_MASK, %g5
794	and	%g3, %g5, %g5
795	srlx	%g5, PN_DTLB_PGSZ0_SHIFT, %g5
796	PN_GET_TLB_INDEX(%g4, %g5)		! %g4 has the DTLB_0 index
797	sllx	%g4, PN_TLB_ACC_IDX_SHIFT, %g4	! shift the index into place
798	set	PN_DTLB_T512_0, %g5
799	or	%g4, %g5, %g4			! and add in the TLB ID
800
801	mov	%g2, %g7			! Next we get the DTLB_1 index
802	set	PN_DTLB_PGSZ1_MASK, %g5
803	and	%g3, %g5, %g5
804	srlx	%g5, PN_DTLB_PGSZ1_SHIFT, %g5
805	PN_GET_TLB_INDEX(%g7, %g5)		! %g7 has the DTLB_1 index
806	sllx	%g7, PN_TLB_ACC_IDX_SHIFT, %g7	! shift the index into place
807	set	PN_DTLB_T512_1, %g5
808	or	%g7, %g5, %g7			! and add in the TLB ID
809
810	/*
811	 * at this point:
812	 *    %g2 - contains the VA whose lookup caused the trap
813	 *    %g3 - contains the tlo_info field
814	 *    %g4 - contains the T512_0 access index value for the
815	 *          VA/PgSz in question
816	 *    %g7 - contains the T512_1 access index value for the
817	 *          VA/PgSz in question
818	 *
819	 * If this trap happened at TL>0, then we don't want to mess
820	 * with the normal logout struct since that could caused a TLB
821	 * miss.
822	 */
823	rdpr	%tl, %g6			! read current trap level
824	cmp	%g6, 1				! skip over the tl>1 code
825	ble	dtlb_parity_trap_1		! if TL <= 1.
826	  nop
827
828	/*
829	 * If we are here, then the trap happened at TL>1. Simply
830	 * update our tlo_info field and then skip to the TLB flush
831	 * code.
832	 */
833	mov	1, %g6
834	sllx	%g6, PN_TLO_INFO_TL1_SHIFT, %g6
835	or	%g6, %g3, %g3
836	ba	dtlb_parity_trap_2
837	  nop
838
839dtlb_parity_trap_1:
840	/*
841	 * at this point:
842	 *    %g2 - contains the VA whose lookup caused the trap
843	 *    %g3 - contains the tlo_info field
844	 *    %g4 - contains the T512_0 access index value for the
845	 *          VA/PgSz in question
846	 *    %g7 - contains the T512_1 access index value for the
847	 *          VA/PgSz in question
848	 *
849	 * Check to see if the logout structure is available.
850	 */
851	set	CHPR_TLB_LOGOUT, %g6
852	GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
853	set	LOGOUT_INVALID_U32, %g6
854	sllx	%g6, 32, %g6			! if our logout structure is
855	set	LOGOUT_INVALID_L32, %g5		! unavailable or if it is
856	or	%g5, %g6, %g5			! already being used, then we
857	ldx	[%g1 + PN_TLO_ADDR], %g6	! don't collect any diagnostic
858	cmp	%g6, %g5			! information before clearing
859	bne	dtlb_parity_trap_2		! and logging the error.
860	  nop
861
862	/*
863	 * Record the logout information. %g4 contains our DTLB_0
864	 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
865	 * both of which will be used for ASI_DTLB_ACCESS and
866	 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
867	 * struct.
868	 */
869	stx	%g3, [%g1 + PN_TLO_INFO]
870	stx	%g2, [%g1 + PN_TLO_ADDR]
871	rdpr	%tpc, %g5
872	stx	%g5, [%g1 + PN_TLO_PC]
873
874	add	%g1, PN_TLO_DTLB_TTE, %g1	! move up the pointer
875
876	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
877	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 0 and store it away
878	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
879	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 0 and store it away
880
881	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 0
882	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
883	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
884	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
885
886	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
887	or	%g4, %g6, %g4			! of each TLB.
888	or	%g7, %g6, %g7
889	add	%g1, CH_TLO_TTE_SIZE, %g1	! move up the pointer
890
891	ldxa	[%g4]ASI_DTLB_ACCESS, %g5	! read the data from DTLB_0
892	stx	%g5, [%g1 + CH_TLO_TTE_DATA]	! way 1 and store it away
893	ldxa	[%g4]ASI_DTLB_TAGREAD, %g5	! read the tag from DTLB_0
894	stx	%g5, [%g1 + CH_TLO_TTE_TAG]	! way 1 and store it away
895
896	ldxa	[%g7]ASI_DTLB_ACCESS, %g5	! now repeat for DTLB_1 way 1
897	stx	%g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
898	ldxa	[%g7]ASI_DTLB_TAGREAD, %g5
899	stx	%g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
900
901	andn	%g4, %g6, %g4			! back to way 0
902	andn	%g7, %g6, %g7			! back to way 0
903
904dtlb_parity_trap_2:
905	/*
906	 * at this point:
907	 *    %g2 - contains the VA whose lookup caused the trap
908	 *    %g3 - contains the tlo_info field
909	 *    %g4 - contains the T512_0 access index value for the
910	 *          VA/PgSz in question
911	 *    %g7 - contains the T512_1 access index value for the
912	 *          VA/PgSz in question
913	 *
914	 * Here we will clear the errors from the DTLB.
915	 */
916	set	MMU_TAG_ACCESS, %g5		! We write a TTE tag value of
917	stxa	%g0, [%g5]ASI_DMMU		! 0 as it will be invalid.
918	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write the data and tag.
919	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
920	membar	#Sync
921
922	set	PN_TLB_ACC_WAY_BIT, %g6		! same thing again for way 1
923	or	%g4, %g6, %g4
924	or	%g7, %g6, %g7
925
926	stxa	%g0, [%g4]ASI_DTLB_ACCESS	! Write same data and tag.
927	stxa	%g0, [%g7]ASI_DTLB_ACCESS	! Now repeat for DTLB_1 way 0
928	membar	#Sync
929
930	sethi	%hi(FLUSH_ADDR), %g6		! PRM says we need to issue a
931	flush   %g6				! flush after writing MMU regs
932
933	/*
934	 * at this point:
935	 *    %g2 - contains the VA whose lookup caused the trap
936	 *    %g3 - contains the tlo_info field
937	 *
938	 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
939	 * already at PIL 15. We do this even for TL>1 traps since
940	 * those will lead to a system panic.
941	 */
942	set	cpu_tlb_parity_error, %g1
943	rdpr	%pil, %g4
944	cmp	%g4, PIL_14
945	movl	%icc, PIL_14, %g4
946	ba	sys_trap
947	  nop
948	SET_SIZE(dtlb_parity_trap)
949
950#endif	/* lint */
951
952
953#if defined(lint)
954/*
955 * Calculates the Panther TLB index based on a virtual address and page size
956 *
957 * Register usage:
958 *	%o0 - virtual address whose index we want
959 *	%o1 - Page Size of the TLB in question as encoded in the
960 *	      ASI_[D|I]MMU_TAG_ACCESS_EXT register.
961 */
962uint64_t
963pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
964{
965	return ((va + pg_sz)-(va + pg_sz));
966}
967#else	/* lint */
968	ENTRY(pn_get_tlb_index)
969
970	PN_GET_TLB_INDEX(%o0, %o1)
971
972	retl
973	  nop
974	SET_SIZE(pn_get_tlb_index)
975#endif	/* lint */
976
977
978#if defined(lint)
979/*
980 * For Panther CPUs we need to flush the IPB after any I$ or D$
981 * parity errors are detected.
982 */
983void
984flush_ipb(void)
985{ return; }
986
987#else	/* lint */
988
989	ENTRY(flush_ipb)
990	clr	%o0
991
992flush_ipb_1:
993	stxa	%g0, [%o0]ASI_IPB_TAG
994	membar	#Sync
995	cmp	%o0, PN_IPB_TAG_ADDR_MAX
996	blt	flush_ipb_1
997	  add	%o0, PN_IPB_TAG_ADDR_LINESIZE, 	%o0
998
999	sethi	%hi(FLUSH_ADDR), %o0
1000	flush   %o0
1001	retl
1002	nop
1003	SET_SIZE(flush_ipb)
1004
1005#endif	/* lint */
1006