xref: /titanic_52/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s (revision 1cb6af97c6f66f456d4f726ef056e1ebc0f73305)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * SFMMU primitives.  These primitives should only be used by sfmmu
31 * routines.
32 */
33
34#if defined(lint)
35#include <sys/types.h>
36#else	/* lint */
37#include "assym.h"
38#endif	/* lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/machtrap.h>
42#include <sys/machasi.h>
43#include <sys/sun4asi.h>
44#include <sys/pte.h>
45#include <sys/mmu.h>
46#include <vm/hat_sfmmu.h>
47#include <vm/seg_spt.h>
48#include <sys/machparam.h>
49#include <sys/privregs.h>
50#include <sys/scb.h>
51#include <sys/intreg.h>
52#include <sys/machthread.h>
53#include <sys/clock.h>
54#include <sys/trapstat.h>
55
56/*
57 * sfmmu related subroutines
58 */
59
60#if defined (lint)
61
62/*
63 * sfmmu related subroutines
64 */
65
66/* ARGSUSED */
67void
68sfmmu_ctx_steal_tl1(uint64_t sctx, uint64_t rctx)
69{}
70
71/* ARGSUSED */
72void
73sfmmu_raise_tsb_exception(uint64_t sctx, uint64_t rctx)
74{}
75
76/* ARGSUSED */
77void
78sfmmu_itlb_ld(caddr_t vaddr, int ctxnum, tte_t *tte)
79{}
80
81/* ARGSUSED */
82void
83sfmmu_dtlb_ld(caddr_t vaddr, int ctxnum, tte_t *tte)
84{}
85
86int
87sfmmu_getctx_pri()
88{ return(0); }
89
90int
91sfmmu_getctx_sec()
92{ return(0); }
93
94/* ARGSUSED */
95void
96sfmmu_setctx_sec(int ctx)
97{}
98
99/* ARGSUSED */
100void
101sfmmu_load_mmustate(sfmmu_t *sfmmup)
102{
103}
104
105#else	/* lint */
106
107/*
108 * 1. If stealing ctx, flush all TLB entries whose ctx is ctx-being-stolen.
109 * 2. If processor is running in the ctx-being-stolen, set the
110 *    context to the resv context. That is
111 *    If processor in User-mode - pri/sec-ctx both set to ctx-being-stolen,
112 *		change both pri/sec-ctx registers to resv ctx.
113 *    If processor in Kernel-mode - pri-ctx is 0, sec-ctx is ctx-being-stolen,
114 *		just change sec-ctx register to resv ctx. When it returns to
115 *		kernel-mode, user_rtt will change pri-ctx.
116 *
117 * Note: For multiple page size TLB, no need to set page sizes for
118 *       DEMAP context.
119 *
120 * %g1 = ctx being stolen (victim)
121 * %g2 = invalid ctx to replace victim with
122 */
123	ENTRY(sfmmu_ctx_steal_tl1)
124	/*
125	 * Flush TLBs.
126	 */
127	set	MMU_PCONTEXT, %g3
128	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g4
129	ldxa	[%g3]ASI_MMU_CTX, %g5		/* get pri-ctx */
130	sethi	%hi(FLUSH_ADDR), %g6
131	stxa	%g1, [%g3]ASI_MMU_CTX		/* temporarily set our */
132						/*   pri-ctx to victim */
133	stxa	%g0, [%g4]ASI_DTLB_DEMAP	/* flush DTLB */
134	stxa	%g0, [%g4]ASI_ITLB_DEMAP	/* flush ITLB */
135	stxa	%g5, [%g3]ASI_MMU_CTX		/* restore original pri-ctx */
136	flush	%g6				/* ensure stxa's committed */
137	/* fall through to the code below */
138
139	/*
140	 * We enter here if we're just raising a TSB miss
141	 * exception, without switching MMU contexts.  In
142	 * this case, there is no need to flush the TLB.
143	 */
144	ALTENTRY(sfmmu_raise_tsb_exception)
145	!
146	! if (sec-ctx != victim) {
147	!	return
148	! } else {
149	!	if (pri-ctx == victim) {
150	!		write INVALID_CONTEXT to sec-ctx
151	!		write INVALID_CONTEXT to pri-ctx
152	!	} else {
153	!		write INVALID_CONTEXT to sec-ctx
154	!	}
155	! }
156	!
157	cmp	%g1, NUM_LOCKED_CTXS
158	blt,a,pn %icc, ptl1_panic		/* can't steal locked ctx */
159	  mov	PTL1_BAD_CTX_STEAL, %g1
160	set	CTXREG_CTX_MASK, %g6
161	set	MMU_SCONTEXT, %g3
162	ldxa	[%g3]ASI_MMU_CTX, %g5		/* get sec-ctx */
163	and	%g5, %g6, %g5
164	cmp	%g5, %g1			/* is it the victim? */
165	bne,pn	%icc, 2f			/* was our sec-ctx a victim? */
166	  mov	MMU_PCONTEXT, %g7
167	ldxa	[%g7]ASI_MMU_CTX, %g4		/* get pri-ctx */
168	and	%g4, %g6, %g4
169	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid ctx */
170	membar	#Sync
171	cmp	%g1, %g4			/* is it the victim? */
172	bne 	%icc, 2f			/* nope, no need to change it */
173	  nop
174	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid ctx */
175	/* next instruction is retry so no membar sync */
1762:
177	retry
178	SET_SIZE(sfmmu_ctx_steal_tl1)
179
180	ENTRY_NP(sfmmu_itlb_ld)
181	rdpr	%pstate, %o3
182#ifdef DEBUG
183	andcc	%o3, PSTATE_IE, %g0		! If interrupts already
184	bnz,pt %icc, 1f				!   disabled, panic
185	  nop
186
187	sethi	%hi(panicstr), %g1
188	ldx	[%g1 + %lo(panicstr)], %g1
189	tst	%g1
190	bnz,pt	%icc, 1f
191	  nop
192
193	sethi	%hi(sfmmu_panic1), %o0
194	call	panic
195	 or	%o0, %lo(sfmmu_panic1), %o0
1961:
197#endif /* DEBUG */
198	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
199	srln	%o0, MMU_PAGESHIFT, %o0
200	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
201	or	%o0, %o1, %o0
202	ldx	[%o2], %g1
203	set	MMU_TAG_ACCESS, %o5
204#ifdef	CHEETAHPLUS_ERRATUM_34
205	!
206	! If this is Cheetah or derivative and the specified TTE is locked
207	! and hence to be loaded into the T16, fully-associative TLB, we
208	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
209	! certain conditions an ITLB locked index 0 TTE will erroneously be
210	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
211	! this erratum, we scan the T16 top down for an unlocked TTE and
212	! explicitly load the specified TTE into that index.
213	!
214	GET_CPU_IMPL(%g2)
215	cmp	%g2, CHEETAH_IMPL
216	bl,pn	%icc, 0f
217	  nop
218
219	andcc	%g1, TTE_LCK_INT, %g0
220	bz	%icc, 0f			! Lock bit is not set;
221						!   load normally.
222	  or	%g0, (15 << 3), %g3		! Start searching from the
223						!   top down.
224
2251:
226	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
227
228	!
229	! If this entry isn't valid, we'll choose to displace it (regardless
230	! of the lock bit).
231	!
232	cmp	%g4, %g0
233	bge	%xcc, 2f			! TTE is > 0 iff not valid
234	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
235	bz	%icc, 2f			! If unlocked, go displace
236	  nop
237	sub	%g3, (1 << 3), %g3
238	brgz	%g3, 1b				! Still more TLB entries
239	  nop					! to search
240
241	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
242	call    panic                           ! found no unlocked TTE so
243	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
244
245
2462:
247	!
248	! We have found an unlocked or non-valid entry; we'll explicitly load
249	! our locked entry here.
250	!
251	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
252	stxa	%o0, [%o5]ASI_IMMU
253	stxa	%g1, [%g3]ASI_ITLB_ACCESS
254	flush	%o1				! Flush required for I-MMU
255	ba	3f				! Delay slot of ba is empty
256	nop					!   per Erratum 64
257
2580:
259#endif	/* CHEETAHPLUS_ERRATUM_34 */
260	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
261	stxa	%o0, [%o5]ASI_IMMU
262	stxa	%g1, [%g0]ASI_ITLB_IN
263	flush	%o1				! Flush required for I-MMU
2643:
265	retl
266	  wrpr	%g0, %o3, %pstate		! Enable interrupts
267	SET_SIZE(sfmmu_itlb_ld)
268
269	/*
270	 * Load an entry into the DTLB.
271	 *
272	 * Special handling is required for locked entries since there
273	 * are some TLB slots that are reserved for the kernel but not
274	 * always held locked.  We want to avoid loading locked TTEs
275	 * into those slots since they could be displaced.
276	 */
277	ENTRY_NP(sfmmu_dtlb_ld)
278	rdpr	%pstate, %o3
279#ifdef DEBUG
280	andcc	%o3, PSTATE_IE, %g0		! if interrupts already
281	bnz,pt	%icc, 1f			! disabled, panic
282	  nop
283
284	sethi	%hi(panicstr), %g1
285	ldx	[%g1 + %lo(panicstr)], %g1
286	tst	%g1
287	bnz,pt	%icc, 1f
288	  nop
289
290	sethi	%hi(sfmmu_panic1), %o0
291	call	panic
292	 or	%o0, %lo(sfmmu_panic1), %o0
2931:
294#endif /* DEBUG */
295	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
296	srln	%o0, MMU_PAGESHIFT, %o0
297	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
298	or	%o0, %o1, %o0			! or in ctx to form tagacc
299	ldx	[%o2], %g1
300	sethi	%hi(ctx_pgsz_array), %o2	! Check for T8s
301	ldn	[%o2 + %lo(ctx_pgsz_array)], %o2
302	brz	%o2, 1f
303	set	MMU_TAG_ACCESS, %o5
304	ldub	[%o2 + %o1], %o2		! Cheetah+: set up tag access
305	sll	%o2, TAGACCEXT_SHIFT, %o2	! extension register so entry
306	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
307	stxa	%o2,[%o4]ASI_DMMU
308	membar	#Sync
3091:
310	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
311	bnz,pn	%icc, 2f			! special handling
312	  sethi	%hi(dtlb_resv_ttenum), %g3
313	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
314	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
315	membar	#Sync
316	retl
317	  wrpr	%g0, %o3, %pstate		! enable interrupts
3182:
319	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
320	sll	%g3, 3, %g3			! First reserved idx in TLB 0
321	sub	%g3, (1 << 3), %g3		! Decrement idx
3223:
323	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
324	!
325	! If this entry isn't valid, we'll choose to displace it (regardless
326	! of the lock bit).
327	!
328	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
329	  nop
330	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
331	bz,pn	%icc, 4f			! If unlocked, go displace
332	  nop
333	sub	%g3, (1 << 3), %g3		! Decrement idx
334	brgez	%g3, 3b
335	  nop
336	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
337	call	panic				! found no unlocked TTE so
338	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
3394:
340	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
341	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
342	membar	#Sync
343	retl
344	  wrpr	%g0, %o3, %pstate		! enable interrupts
345	SET_SIZE(sfmmu_dtlb_ld)
346
347	ENTRY_NP(sfmmu_getctx_pri)
348	set	MMU_PCONTEXT, %o0
349	retl
350	  ldxa	[%o0]ASI_MMU_CTX, %o0
351	SET_SIZE(sfmmu_getctx_pri)
352
353	ENTRY_NP(sfmmu_getctx_sec)
354	set	MMU_SCONTEXT, %o0
355	set	CTXREG_CTX_MASK, %o1
356	ldxa	[%o0]ASI_MMU_CTX, %o0
357	retl
358	and	%o0, %o1, %o0
359	SET_SIZE(sfmmu_getctx_sec)
360
361	/*
362	 * Set the secondary context register for this process.
363	 * %o0 = context number for this process.
364	 */
365	ENTRY_NP(sfmmu_setctx_sec)
366	/*
367	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
368	 * But we can also get called from C with interrupts enabled. So,
369	 * we need to check first. Also, resume saves state in %o3 and %o5
370	 * so we can't use those registers here.
371	 */
372
373	/* If interrupts are not disabled, then disable them */
374	rdpr	%pstate, %g1
375	btst	PSTATE_IE, %g1
376	bnz,a,pt %icc, 1f
377	wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
3781:
379	mov	MMU_SCONTEXT, %o1
380	sethi	%hi(ctx_pgsz_array), %g2
381	ldn	[%g2 + %lo(ctx_pgsz_array)], %g2
382	brz	%g2, 2f
383	nop
384	ldub	[%g2 + %o0], %g2
385	sll	%g2, CTXREG_EXT_SHIFT, %g2
386	or	%g2, %o0, %o0
3872:
388	sethi	%hi(FLUSH_ADDR), %o4
389	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
390	flush	%o4
391
392	btst	PSTATE_IE, %g1
393	bnz,a,pt %icc, 1f
394	wrpr	%g0, %g1, %pstate		/* enable interrupts */
3951:	retl
396	nop
397	SET_SIZE(sfmmu_setctx_sec)
398
399	/*
400	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
401	 * returns the detection value in %o0.
402	 */
403	ENTRY_NP(sfmmu_setup_4lp)
404	GET_CPU_IMPL(%o0);
405	cmp	%o0, CHEETAH_PLUS_IMPL
406	blt,a,pt %icc, 4f
407	  clr	%o1
408	set	ktsb_phys, %o2
409	mov	1, %o1
410	st	%o1, [%o2]
4114:	retl
412	mov	%o1, %o0
413	SET_SIZE(sfmmu_setup_4lp)
414
415
416	/*
417	 * Called to load MMU registers and tsbmiss area
418	 * for the active process.  This function should
419	 * only be called from TL=0.
420	 *
421	 * %o0 - hat pointer
422	 */
423	ENTRY_NP(sfmmu_load_mmustate)
424	/*
425	 * From resume we call sfmmu_load_mmustate with interrupts disabled.
426	 * But we can also get called from C with interrupts enabled. So,
427	 * we need to check first. Also, resume saves state in %o5 and we
428	 * can't use this register here.
429	 */
430
431	sethi	%hi(ksfmmup), %o3
432	ldx	[%o3 + %lo(ksfmmup)], %o3
433	cmp	%o3, %o0
434	be,pn	%xcc, 3f			! if kernel as, do nothing
435	  nop
436
437	/* If interrupts are not disabled, then disable them */
438	rdpr	%pstate, %g1
439	btst	PSTATE_IE, %g1
440	bnz,a,pt %icc, 1f
441	wrpr	%g1, PSTATE_IE, %pstate		! disable interrupts
4421:
443	/*
444	 * We need to set up the TSB base register, tsbmiss
445	 * area, and load locked TTE(s) for the TSB.
446	 */
447	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
448	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
449	brz,pt	%g2, 4f
450	  nop
451	/*
452	 * We have a second TSB for this process, so we need to
453	 * encode data for both the first and second TSB in our single
454	 * TSB base register.  See hat_sfmmu.h for details on what bits
455	 * correspond to which TSB.
456	 * We also need to load a locked TTE into the TLB for the second TSB
457	 * in this case.
458	 */
459	MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
460	! %o2 = tsbreg
461	sethi	%hi(utsb4m_dtlb_ttenum), %o3
462	sethi	%hi(utsb4m_vabase), %o4
463	ld	[%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
464	ldx	[%o4 + %lo(utsb4m_vabase)], %o4	! %o4 = TLB tag for sec TSB
465	sll	%o3, DTACC_SHIFT, %o3		! %o3 = sec TSB TLB index
466	RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)	! or-in bits of TSB VA
467	LOAD_TSBTTE(%g2, %o3, %o4, %g3)		! load sec TSB locked TTE
468	sethi	%hi(utsb_vabase), %g3
469	ldx	[%g3 + %lo(utsb_vabase)], %g3	! %g3 = TLB tag for first TSB
470	ba,pt	%xcc, 5f
471	  nop
472
4734:	sethi	%hi(utsb_vabase), %g3
474	ldx	[%g3 + %lo(utsb_vabase)], %g3	! %g3 = TLB tag for first TSB
475	MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)	! %o2 = tsbreg
476
4775:	LOAD_TSBREG(%o2, %o3, %o4)		! write TSB base register
478
479	/*
480	 * Load the TTE for the first TSB at the appropriate location in
481	 * the TLB
482	 */
483	sethi	%hi(utsb_dtlb_ttenum), %o2
484	ld	[%o2 + %lo(utsb_dtlb_ttenum)], %o2
485	sll	%o2, DTACC_SHIFT, %o2		! %o1 = first TSB TLB index
486	RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)	! or-in bits of TSB VA
487	LOAD_TSBTTE(%o1, %o2, %g3, %o4)		! load first TSB locked TTE
488
4896:	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
490	CPU_TSBMISS_AREA(%o2, %o3)		! we need to access from
491	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
492	lduh	[%o0 + SFMMU_FLAGS], %o3	! per-CPU tsbmiss area.
493	stx	%o0, [%o2 + TSBMISS_UHATID]
494	stuh	%o3, [%o2 + TSBMISS_HATFLAGS]
495
496	btst	PSTATE_IE, %g1
497	bnz,a,pt %icc, 3f
498	wrpr	%g0, %g1, %pstate		! enable interrupts
4993:	retl
500	nop
501	SET_SIZE(sfmmu_load_mmustate)
502
503#endif /* lint */
504
505#if defined (lint)
506/*
507 * Invalidate all of the entries within the tsb, by setting the inv bit
508 * in the tte_tag field of each tsbe.
509 *
510 * We take advantage of the fact TSBs are page aligned and a multiple of
511 * PAGESIZE to use block stores.
512 *
513 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
514 * (in short, we set all bits in the upper word of the tag, and we give the
515 * invalid bit precedence over other tag bits in both places).
516 */
517/* ARGSUSED */
518void
519sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
520{}
521
522#else /* lint */
523
524#define	VIS_BLOCKSIZE	64
525
526	ENTRY(sfmmu_inv_tsb_fast)
527
528	! Get space for aligned block of saved fp regs.
529	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
530
531	! kpreempt_disable();
532	ldsb	[THREAD_REG + T_PREEMPT], %l3
533	inc	%l3
534	stb	%l3, [THREAD_REG + T_PREEMPT]
535
536	! See if fpu was in use.  If it was, we need to save off the
537	! floating point registers to the stack.
538	rd	%fprs, %l0			! %l0 = cached copy of fprs
539	btst	FPRS_FEF, %l0
540	bz,pt	%icc, 4f
541	  nop
542
543	! save in-use fpregs on stack
544	membar	#Sync				! make sure tranx to fp regs
545						! have completed
546	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
547	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
548	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
549
550	! enable fp
5514:	membar	#StoreStore|#StoreLoad|#LoadStore
552	wr	%g0, FPRS_FEF, %fprs
553	wr	%g0, ASI_BLK_P, %asi
554
555	! load up FP registers with invalid TSB tag.
556	fone	%d0			! ones in tag
557	fzero	%d2			! zeros in TTE
558	fone	%d4			! ones in tag
559	fzero	%d6			! zeros in TTE
560	fone	%d8			! ones in tag
561	fzero	%d10			! zeros in TTE
562	fone	%d12			! ones in tag
563	fzero	%d14			! zeros in TTE
564	ba,pt	%xcc, .sfmmu_inv_doblock
565	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
566
567.sfmmu_inv_blkstart:
568      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
569	stda	%d0, [%i0+128]%asi
570	stda	%d0, [%i0+64]%asi
571	stda	%d0, [%i0]%asi
572
573	add	%i0, %i4, %i0
574	sub	%i1, %i4, %i1
575
576.sfmmu_inv_doblock:
577	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
578	bgeu,a	%icc, .sfmmu_inv_blkstart
579	  stda	%d0, [%i0+192]%asi
580
581.sfmmu_inv_finish:
582	membar	#Sync
583	btst	FPRS_FEF, %l0		! saved from above
584	bz,a	.sfmmu_inv_finished
585	  wr	%l0, 0, %fprs		! restore fprs
586
587	! restore fpregs from stack
588	ldda    [%l1]ASI_BLK_P, %d0
589	membar	#Sync
590	wr	%l0, 0, %fprs		! restore fprs
591
592.sfmmu_inv_finished:
593	! kpreempt_enable();
594	ldsb	[THREAD_REG + T_PREEMPT], %l3
595	dec	%l3
596	stb	%l3, [THREAD_REG + T_PREEMPT]
597	ret
598	restore
599	SET_SIZE(sfmmu_inv_tsb_fast)
600
601#endif /* lint */
602
603#if defined(lint)
604
605/*
606 * Prefetch "struct tsbe" while walking TSBs.
607 * prefetch 7 cache lines ahead of where we are at now.
608 * #n_reads is being used since #one_read only applies to
609 * floating point reads, and we are not doing floating point
610 * reads.  However, this has the negative side effect of polluting
611 * the ecache.
612 * The 448 comes from (7 * 64) which is how far ahead of our current
613 * address, we want to prefetch.
614 */
615/*ARGSUSED*/
616void
617prefetch_tsbe_read(struct tsbe *tsbep)
618{}
619
620/* Prefetch the tsbe that we are about to write */
621/*ARGSUSED*/
622void
623prefetch_tsbe_write(struct tsbe *tsbep)
624{}
625
626#else /* lint */
627
628	ENTRY(prefetch_tsbe_read)
629	retl
630	prefetch	[%o0+448], #n_reads
631	SET_SIZE(prefetch_tsbe_read)
632
633	ENTRY(prefetch_tsbe_write)
634	retl
635	prefetch	[%o0], #n_writes
636	SET_SIZE(prefetch_tsbe_write)
637#endif /* lint */
638
639
640#ifndef lint
641#endif	/* lint */
642
643