xref: /titanic_50/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s (revision e07d9cb85217949d497b02d7211de8a197d2f2eb)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/clock.h>
53#include <sys/trapstat.h>
54
55/*
56 * sfmmu related subroutines
57 */
58
59#if defined (lint)
60
61/*
62 * sfmmu related subroutines
63 */
64/* ARGSUSED */
65void
66sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
67{}
68
69/* ARGSUSED */
70void
71sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
72{}
73
74/* ARGSUSED */
75void
76sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
77{}
78
79int
80sfmmu_getctx_pri()
81{ return(0); }
82
83int
84sfmmu_getctx_sec()
85{ return(0); }
86
87/* ARGSUSED */
88void
89sfmmu_setctx_sec(uint_t ctx)
90{}
91
92/* ARGSUSED */
93void
94sfmmu_load_mmustate(sfmmu_t *sfmmup)
95{
96}
97
98#else	/* lint */
99
100/*
101 * Invalidate either the context of a specific victim or any process
102 * currently running on this CPU.
103 *
104 * %g1 = sfmmup whose ctx is being invalidated
105 *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
106 * Note %g1 is the only input argument used by this xcall handler.
107 */
108	ENTRY(sfmmu_raise_tsb_exception)
109	!
110	! if (victim == INVALID_CONTEXT) {
111	!	if (sec-ctx > INVALID_CONTEXT) {
112	!		write INVALID_CONTEXT to sec-ctx
113	!	}
114	!	if (pri-ctx > INVALID_CONTEXT) {
115	!		write INVALID_CONTEXT to pri-ctx
116	!	}
117	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
118	!	return
119	! } else {
120	!	if (sec-ctx > INVALID_CONTEXT)
121	!		write INVALID_CONTEXT to sec-ctx
122	!
123	!	if (pri-ctx > INVALID_CONTEXT)
124	!		write INVALID_CONTEXT to pri-ctx
125	! }
126	!
127
128	sethi   %hi(ksfmmup), %g3
129        ldx     [%g3 + %lo(ksfmmup)], %g3
130	cmp	%g1, %g3
131	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
132	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
133
134	set	INVALID_CONTEXT, %g2
135
136	cmp	%g1, INVALID_CONTEXT
137	bne,pt	%xcc, 1f			/* called from wrap_around? */
138	  mov	MMU_SCONTEXT, %g3
139
140	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
141	set     CTXREG_CTX_MASK, %g4
142	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
143	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
144	ble,pn	%xcc, 0f			/* yes, no need to change */
145	  mov	MMU_PCONTEXT, %g7
146
147	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
148	membar	#Sync
149
1500:
151	ldxa	[%g7]ASI_MMU_CTX, %g5		/* get pgz | pri-ctx */
152	and     %g5, %g4, %g5			/* %g5 = pri-ctx */
153	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
154	ble,pn	%xcc, 2f			/* yes, no need to change */
155	  nop
156
157	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
158	retry
159
1601:
161	/* %g3 = MMU_SCONTEXT	*/
162	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
163	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
164
165	cmp	%g5, %g1			/* hat toBe-invalid running? */
166	bne,pt	%xcc, 2f
167	  nop
168
169	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = pgsz | sec-ctx */
170	set     CTXREG_CTX_MASK, %g4
171	and     %g5, %g4, %g5			/* %g5 = sec-ctx */
172	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
173	ble,pn  %xcc, 0f                        /* yes, no need to change */
174	  mov	MMU_PCONTEXT, %g7
175
176	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
177	membar	#Sync
178
1790:
180	ldxa	[%g7]ASI_MMU_CTX, %g4		/* %g4 = pgsz | pri-ctx */
181	set     CTXREG_CTX_MASK, %g6
182	and	%g4, %g6, %g4			/* %g4 = pri-ctx */
183	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
184	ble 	%icc, 2f			/* no, no need to change it */
185	  nop
186	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
187	/* next instruction is retry so no membar sync */
1882:
189	retry
190	SET_SIZE(sfmmu_raise_tsb_exception)
191
192	/*
193	 * %o0 = virtual address
194	 * %o1 = address of TTE to be loaded
195	 */
196	ENTRY_NP(sfmmu_itlb_ld_kva)
197	rdpr	%pstate, %o3
198#ifdef DEBUG
199	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
200#endif /* DEBUG */
201	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
202	srln	%o0, MMU_PAGESHIFT, %o0
203	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
204
205	ldx	[%o1], %g1
206	set	MMU_TAG_ACCESS, %o5
207#ifdef	CHEETAHPLUS_ERRATUM_34
208	!
209	! If this is Cheetah or derivative and the specified TTE is locked
210	! and hence to be loaded into the T16, fully-associative TLB, we
211	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
212	! certain conditions an ITLB locked index 0 TTE will erroneously be
213	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
214	! this erratum, we scan the T16 top down for an unlocked TTE and
215	! explicitly load the specified TTE into that index.
216	!
217	GET_CPU_IMPL(%g2)
218	cmp	%g2, CHEETAH_IMPL
219	bl,pn	%icc, 0f
220	  nop
221
222	andcc	%g1, TTE_LCK_INT, %g0
223	bz	%icc, 0f			! Lock bit is not set;
224						!   load normally.
225	  or	%g0, (15 << 3), %g3		! Start searching from the
226						!   top down.
227
2281:
229	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
230
231	!
232	! If this entry isn't valid, we'll choose to displace it (regardless
233	! of the lock bit).
234	!
235	cmp	%g4, %g0
236	bge	%xcc, 2f			! TTE is > 0 iff not valid
237	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
238	bz	%icc, 2f			! If unlocked, go displace
239	  nop
240	sub	%g3, (1 << 3), %g3
241	brgz	%g3, 1b				! Still more TLB entries
242	  nop					! to search
243
244	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
245	call    panic                           ! found no unlocked TTE so
246	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
247
248
2492:
250	!
251	! We have found an unlocked or non-valid entry; we'll explicitly load
252	! our locked entry here.
253	!
254	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
255	stxa	%o0, [%o5]ASI_IMMU
256	stxa	%g1, [%g3]ASI_ITLB_ACCESS
257	flush	%o1				! Flush required for I-MMU
258	ba	3f				! Delay slot of ba is empty
259	  nop					!   per Erratum 64
260
2610:
262#endif	/* CHEETAHPLUS_ERRATUM_34 */
263	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
264	stxa	%o0, [%o5]ASI_IMMU
265	stxa	%g1, [%g0]ASI_ITLB_IN
266	flush	%o1				! Flush required for I-MMU
2673:
268	retl
269	  wrpr	%g0, %o3, %pstate		! Enable interrupts
270	SET_SIZE(sfmmu_itlb_ld_kva)
271
272	/*
273	 * Load an entry into the DTLB.
274	 *
275	 * Special handling is required for locked entries since there
276	 * are some TLB slots that are reserved for the kernel but not
277	 * always held locked.  We want to avoid loading locked TTEs
278	 * into those slots since they could be displaced.
279	 *
280	 * %o0 = virtual address
281	 * %o1 = address of TTE to be loaded
282	 */
283	ENTRY_NP(sfmmu_dtlb_ld_kva)
284	rdpr	%pstate, %o3
285#ifdef DEBUG
286	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
287#endif /* DEBUG */
288	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
289	srln	%o0, MMU_PAGESHIFT, %o0
290	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
291
292	ldx	[%o1], %g1
293
294	set	MMU_TAG_ACCESS, %o5
295
296	set	cpu_impl_dual_pgsz, %o2
297	ld	[%o2], %o2
298	brz	%o2, 1f
299	  nop
300
301	sethi	%hi(ksfmmup), %o2
302	ldx	[%o2 + %lo(ksfmmup)], %o2
303	ldub    [%o2 + SFMMU_CEXT], %o2
304        sll     %o2, TAGACCEXT_SHIFT, %o2
305
306	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
307	stxa	%o2,[%o4]ASI_DMMU
308	membar	#Sync
3091:
310	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
311	bnz,pn	%icc, 2f			! special handling
312	  sethi	%hi(dtlb_resv_ttenum), %g3
313	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
314	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
315	membar	#Sync
316	retl
317	  wrpr	%g0, %o3, %pstate		! enable interrupts
3182:
319	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
320	sll	%g3, 3, %g3			! First reserved idx in TLB 0
321	sub	%g3, (1 << 3), %g3		! Decrement idx
3223:
323	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
324	!
325	! If this entry isn't valid, we'll choose to displace it (regardless
326	! of the lock bit).
327	!
328	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
329	  nop
330	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
331	bz,pn	%icc, 4f			! If unlocked, go displace
332	  nop
333	sub	%g3, (1 << 3), %g3		! Decrement idx
334	brgez	%g3, 3b
335	  nop
336	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
337	call	panic				! found no unlocked TTE so
338	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
3394:
340	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
341#ifdef	OLYMPUS_SHARED_FTLB
342	stxa	%g1,[%g0]ASI_DTLB_IN
343#else
344	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
345#endif
346	membar	#Sync
347	retl
348	  wrpr	%g0, %o3, %pstate		! enable interrupts
349	SET_SIZE(sfmmu_dtlb_ld_kva)
350
351	ENTRY_NP(sfmmu_getctx_pri)
352	set	MMU_PCONTEXT, %o0
353	retl
354	  ldxa	[%o0]ASI_MMU_CTX, %o0
355	SET_SIZE(sfmmu_getctx_pri)
356
357	ENTRY_NP(sfmmu_getctx_sec)
358	set	MMU_SCONTEXT, %o0
359	set	CTXREG_CTX_MASK, %o1
360	ldxa	[%o0]ASI_MMU_CTX, %o0
361	retl
362	  and	%o0, %o1, %o0
363	SET_SIZE(sfmmu_getctx_sec)
364
365	/*
366	 * Set the secondary context register for this process.
367	 * %o0 = page_size | context number for this process.
368	 */
369	ENTRY_NP(sfmmu_setctx_sec)
370	/*
371	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
372	 * But we can also get called from C with interrupts enabled. So,
373	 * we need to check first.
374	 */
375
376	/* If interrupts are not disabled, then disable them */
377	rdpr	%pstate, %g1
378	btst	PSTATE_IE, %g1
379	bnz,a,pt %icc, 1f
380	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
381
3821:
383	mov	MMU_SCONTEXT, %o1
384
385	sethi	%hi(FLUSH_ADDR), %o4
386	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
387	flush	%o4
388
389	/*
390	 * if the routine was entered with intr enabled, then enable intr now.
391	 * otherwise, keep intr disabled, return without enabing intr.
392	 * %g1 - old intr state
393	 */
394	btst	PSTATE_IE, %g1
395	bnz,a,pt %icc, 2f
396	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
3972:	retl
398	  nop
399	SET_SIZE(sfmmu_setctx_sec)
400
401	/*
402	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
403	 * returns the detection value in %o0.
404	 *
405	 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
406	 *  - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
407	 *  - FJ OPL Olympus-C and later  (less than SPITFIRE_IMPL)
408	 *
409	 */
410	ENTRY_NP(sfmmu_setup_4lp)
411	GET_CPU_IMPL(%o0);
412	cmp	%o0, CHEETAH_PLUS_IMPL
413	bge,pt	%icc, 4f
414	  mov	1, %o1
415	cmp	%o0, SPITFIRE_IMPL
416	bge,a,pn %icc, 3f
417	  clr	%o1
4184:
419	set	ktsb_phys, %o2
420	st	%o1, [%o2]
4213:	retl
422	mov	%o1, %o0
423	SET_SIZE(sfmmu_setup_4lp)
424
425
426	/*
427	 * Called to load MMU registers and tsbmiss area
428	 * for the active process.  This function should
429	 * only be called from TL=0.
430	 *
431	 * %o0 - hat pointer
432	 */
433	ENTRY_NP(sfmmu_load_mmustate)
434
435#ifdef DEBUG
436	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
437#endif /* DEBUG */
438
439 	sethi	%hi(ksfmmup), %o3
440	ldx	[%o3 + %lo(ksfmmup)], %o3
441	cmp	%o3, %o0
442	be,pn	%xcc, 3f			! if kernel as, do nothing
443	  nop
444
445	/*
446	 * We need to set up the TSB base register, tsbmiss
447	 * area, and load locked TTE(s) for the TSB.
448	 */
449	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
450	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
451
452#ifdef UTSB_PHYS
453	/*
454	 * UTSB_PHYS accesses user TSBs via physical addresses.  The first
455	 * TSB is in the MMU I/D TSB Base registers.  The second TSB uses a
456	 * designated ASI_SCRATCHPAD register as a pseudo TSB base register.
457	 */
458	MAKE_UTSBREG_PHYS(%o1, %o2, %o3)	! %o2 = first utsbreg
459	LOAD_TSBREG(%o2, %o3, %o4)		! write TSB base register
460
461	brz,a,pt %g2, 2f
462	  mov   -1, %o2				! use -1 if no second TSB
463
464	MAKE_UTSBREG_PHYS(%g2, %o2, %o3)	! %o2 = second utsbreg
4652:
466	LOAD_2ND_TSBREG(%o2, %o3)		! write 2nd pseudo TSB base register
467#else /* UTSB_PHYS */
468	brz,pt  %g2, 4f
469	  nop
470	/*
471	 * We have a second TSB for this process, so we need to
472	 * encode data for both the first and second TSB in our single
473	 * TSB base register.  See hat_sfmmu.h for details on what bits
474	 * correspond to which TSB.
475	 * We also need to load a locked TTE into the TLB for the second TSB
476	 * in this case.
477	 */
478	MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
479	! %o2 = tsbreg
480	sethi	%hi(utsb4m_dtlb_ttenum), %o3
481	sethi	%hi(utsb4m_vabase), %o4
482	ld	[%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
483	ldx	[%o4 + %lo(utsb4m_vabase)], %o4	! %o4 = TLB tag for sec TSB
484	sll	%o3, DTACC_SHIFT, %o3		! %o3 = sec TSB TLB index
485	RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)	! or-in bits of TSB VA
486	LOAD_TSBTTE(%g2, %o3, %o4, %g3)		! load sec TSB locked TTE
487	sethi	%hi(utsb_vabase), %g3
488	ldx	[%g3 + %lo(utsb_vabase)], %g3	! %g3 = TLB tag for first TSB
489	ba,pt	%xcc, 5f
490	  nop
491
4924:	sethi	%hi(utsb_vabase), %g3
493	ldx	[%g3 + %lo(utsb_vabase)], %g3	! %g3 = TLB tag for first TSB
494	MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)	! %o2 = tsbreg
495
4965:	LOAD_TSBREG(%o2, %o3, %o4)		! write TSB base register
497
498	/*
499	 * Load the TTE for the first TSB at the appropriate location in
500	 * the TLB
501	 */
502	sethi	%hi(utsb_dtlb_ttenum), %o2
503	ld	[%o2 + %lo(utsb_dtlb_ttenum)], %o2
504	sll	%o2, DTACC_SHIFT, %o2		! %o1 = first TSB TLB index
505	RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)	! or-in bits of TSB VA
506	LOAD_TSBTTE(%o1, %o2, %g3, %o4)		! load first TSB locked TTE
507#endif /* UTSB_PHYS */
508
5096:	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
510	CPU_TSBMISS_AREA(%o2, %o3)		! we need to access from
511	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
512	ldub	[%o0 + SFMMU_TTEFLAGS], %o3	! per-CPU tsbmiss area.
513	stx	%o0, [%o2 + TSBMISS_UHATID]
514	stub	%o3, [%o2 + TSBMISS_UTTEFLAGS]
515
5163:	retl
517	  nop
518	SET_SIZE(sfmmu_load_mmustate)
519
520#endif /* lint */
521
522#if defined (lint)
523/*
524 * Invalidate all of the entries within the tsb, by setting the inv bit
525 * in the tte_tag field of each tsbe.
526 *
527 * We take advantage of the fact TSBs are page aligned and a multiple of
528 * PAGESIZE to use block stores.
529 *
530 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
531 * (in short, we set all bits in the upper word of the tag, and we give the
532 * invalid bit precedence over other tag bits in both places).
533 */
534/* ARGSUSED */
535void
536sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
537{}
538
539#else /* lint */
540
541#define	VIS_BLOCKSIZE	64
542
543	ENTRY(sfmmu_inv_tsb_fast)
544
545	! Get space for aligned block of saved fp regs.
546	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
547
548	! kpreempt_disable();
549	ldsb	[THREAD_REG + T_PREEMPT], %l3
550	inc	%l3
551	stb	%l3, [THREAD_REG + T_PREEMPT]
552
553	! See if fpu was in use.  If it was, we need to save off the
554	! floating point registers to the stack.
555	rd	%fprs, %l0			! %l0 = cached copy of fprs
556	btst	FPRS_FEF, %l0
557	bz,pt	%icc, 4f
558	  nop
559
560	! save in-use fpregs on stack
561	membar	#Sync				! make sure tranx to fp regs
562						! have completed
563	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
564	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
565	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
566
567	! enable fp
5684:	membar	#StoreStore|#StoreLoad|#LoadStore
569	wr	%g0, FPRS_FEF, %fprs
570	wr	%g0, ASI_BLK_P, %asi
571
572	! load up FP registers with invalid TSB tag.
573	fone	%d0			! ones in tag
574	fzero	%d2			! zeros in TTE
575	fone	%d4			! ones in tag
576	fzero	%d6			! zeros in TTE
577	fone	%d8			! ones in tag
578	fzero	%d10			! zeros in TTE
579	fone	%d12			! ones in tag
580	fzero	%d14			! zeros in TTE
581	ba,pt	%xcc, .sfmmu_inv_doblock
582	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
583
584.sfmmu_inv_blkstart:
585      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
586	stda	%d0, [%i0+128]%asi
587	stda	%d0, [%i0+64]%asi
588	stda	%d0, [%i0]%asi
589
590	add	%i0, %i4, %i0
591	sub	%i1, %i4, %i1
592
593.sfmmu_inv_doblock:
594	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
595	bgeu,a	%icc, .sfmmu_inv_blkstart
596	  stda	%d0, [%i0+192]%asi
597
598.sfmmu_inv_finish:
599	membar	#Sync
600	btst	FPRS_FEF, %l0		! saved from above
601	bz,a	.sfmmu_inv_finished
602	  wr	%l0, 0, %fprs		! restore fprs
603
604	! restore fpregs from stack
605	ldda    [%l1]ASI_BLK_P, %d0
606	membar	#Sync
607	wr	%l0, 0, %fprs		! restore fprs
608
609.sfmmu_inv_finished:
610	! kpreempt_enable();
611	ldsb	[THREAD_REG + T_PREEMPT], %l3
612	dec	%l3
613	stb	%l3, [THREAD_REG + T_PREEMPT]
614	ret
615	  restore
616	SET_SIZE(sfmmu_inv_tsb_fast)
617
618#endif /* lint */
619
620#if defined(lint)
621
622/*
623 * Prefetch "struct tsbe" while walking TSBs.
624 * prefetch 7 cache lines ahead of where we are at now.
625 * #n_reads is being used since #one_read only applies to
626 * floating point reads, and we are not doing floating point
627 * reads.  However, this has the negative side effect of polluting
628 * the ecache.
629 * The 448 comes from (7 * 64) which is how far ahead of our current
630 * address, we want to prefetch.
631 */
632/*ARGSUSED*/
633void
634prefetch_tsbe_read(struct tsbe *tsbep)
635{}
636
637/* Prefetch the tsbe that we are about to write */
638/*ARGSUSED*/
639void
640prefetch_tsbe_write(struct tsbe *tsbep)
641{}
642
643#else /* lint */
644
645	ENTRY(prefetch_tsbe_read)
646	retl
647	  prefetch	[%o0+448], #n_reads
648	SET_SIZE(prefetch_tsbe_read)
649
650	ENTRY(prefetch_tsbe_write)
651	retl
652	  prefetch	[%o0], #n_writes
653	SET_SIZE(prefetch_tsbe_write)
654#endif /* lint */
655
656
657#ifndef lint
658#endif	/* lint */
659