xref: /titanic_50/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s (revision a4e4e13f4001644f2f960e3be0056c22b3a40fd1)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/clock.h>
53#include <sys/trapstat.h>
54
55/*
56 * sfmmu related subroutines
57 */
58
59#if defined (lint)
60
61/*
62 * sfmmu related subroutines
63 */
64/* ARGSUSED */
65void
66sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
67{}
68
69/* ARGSUSED */
70void
71sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
72{}
73
74/* ARGSUSED */
75void
76sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
77{}
78
79int
80sfmmu_getctx_pri()
81{ return(0); }
82
83int
84sfmmu_getctx_sec()
85{ return(0); }
86
87/* ARGSUSED */
88void
89sfmmu_setctx_sec(uint_t ctx)
90{}
91
92/* ARGSUSED */
93void
94sfmmu_load_mmustate(sfmmu_t *sfmmup)
95{
96}
97
98#else	/* lint */
99
100/*
101 * Invalidate either the context of a specific victim or any process
102 * currently running on this CPU.
103 *
104 * %g1 = sfmmup whose ctx is being invalidated
105 *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
106 * Note %g1 is the only input argument used by this xcall handler.
107 */
108	ENTRY(sfmmu_raise_tsb_exception)
109	!
110	! if (victim == INVALID_CONTEXT) {
111	!	if (sec-ctx > INVALID_CONTEXT) {
112	!		write INVALID_CONTEXT to sec-ctx
113	!	}
114	!	if (pri-ctx > INVALID_CONTEXT) {
115	!		write INVALID_CONTEXT to pri-ctx
116	!	}
117	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
118	!	return
119	! } else {
120	!	if (sec-ctx > INVALID_CONTEXT)
121	!		write INVALID_CONTEXT to sec-ctx
122	!
123	!	if (pri-ctx > INVALID_CONTEXT)
124	!		write INVALID_CONTEXT to pri-ctx
125	! }
126	!
127
128	sethi   %hi(ksfmmup), %g3
129        ldx     [%g3 + %lo(ksfmmup)], %g3
130	cmp	%g1, %g3
131	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
132	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
133
134	set	INVALID_CONTEXT, %g2
135
136	cmp	%g1, INVALID_CONTEXT
137	bne,pt	%xcc, 1f			/* called from wrap_around? */
138	  mov	MMU_SCONTEXT, %g3
139
140	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
141	set     CTXREG_CTX_MASK, %g4
142	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
143	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
144	ble,pn	%xcc, 0f			/* yes, no need to change */
145	  mov	MMU_PCONTEXT, %g7
146
147	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
148	membar	#Sync
149
1500:
151	ldxa	[%g7]ASI_MMU_CTX, %g3		/* get pgz | pri-ctx */
152	and     %g3, %g4, %g5			/* %g5 = pri-ctx */
153	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
154	ble,pn	%xcc, 2f			/* yes, no need to change */
155	  srlx	%g3, CTXREG_NEXT_SHIFT, %g3	/* %g3 = nucleus pgsz */
156	sllx	%g3, CTXREG_NEXT_SHIFT, %g3	/* need to preserve nucleus pgsz */
157	or	%g3, %g2, %g2			/* %g2 = nucleus pgsz | INVALID_CONTEXT */
158
159	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
160	retry
161
1621:
163	/* %g3 = MMU_SCONTEXT	*/
164	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
165	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
166
167	cmp	%g5, %g1			/* hat toBe-invalid running? */
168	bne,pt	%xcc, 2f
169	  nop
170
171	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = pgsz | sec-ctx */
172	set     CTXREG_CTX_MASK, %g4
173	and     %g5, %g4, %g5			/* %g5 = sec-ctx */
174	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
175	ble,pn  %xcc, 0f                        /* yes, no need to change */
176	  mov	MMU_PCONTEXT, %g7
177
178	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
179	membar	#Sync
180
1810:
182	ldxa	[%g7]ASI_MMU_CTX, %g3		/* %g3 = pgsz | pri-ctx */
183	set     CTXREG_CTX_MASK, %g6
184	and	%g3, %g6, %g4			/* %g4 = pri-ctx */
185	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
186	ble 	%icc, 2f			/* no, no need to change it */
187	  srlx	%g3, CTXREG_NEXT_SHIFT, %g3	/* %g3 = nucleus pgsz */
188	sllx	%g3, CTXREG_NEXT_SHIFT, %g3	/* need to preserve nucleus pgsz */
189	or	%g3, %g2, %g2			/* %g2 = nucleus pgsz | INVALID_CONTEXT */
190	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
191	/* next instruction is retry so no membar sync */
1922:
193	retry
194	SET_SIZE(sfmmu_raise_tsb_exception)
195
196	/*
197	 * %o0 = virtual address
198	 * %o1 = address of TTE to be loaded
199	 */
200	ENTRY_NP(sfmmu_itlb_ld_kva)
201	rdpr	%pstate, %o3
202#ifdef DEBUG
203	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
204#endif /* DEBUG */
205	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
206	srln	%o0, MMU_PAGESHIFT, %o0
207	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
208
209	ldx	[%o1], %g1
210	set	MMU_TAG_ACCESS, %o5
211#ifdef	CHEETAHPLUS_ERRATUM_34
212	!
213	! If this is Cheetah or derivative and the specified TTE is locked
214	! and hence to be loaded into the T16, fully-associative TLB, we
215	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
216	! certain conditions an ITLB locked index 0 TTE will erroneously be
217	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
218	! this erratum, we scan the T16 top down for an unlocked TTE and
219	! explicitly load the specified TTE into that index.
220	!
221	GET_CPU_IMPL(%g2)
222	cmp	%g2, CHEETAH_IMPL
223	bl,pn	%icc, 0f
224	  nop
225
226	andcc	%g1, TTE_LCK_INT, %g0
227	bz	%icc, 0f			! Lock bit is not set;
228						!   load normally.
229	  or	%g0, (15 << 3), %g3		! Start searching from the
230						!   top down.
231
2321:
233	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
234
235	!
236	! If this entry isn't valid, we'll choose to displace it (regardless
237	! of the lock bit).
238	!
239	cmp	%g4, %g0
240	bge	%xcc, 2f			! TTE is > 0 iff not valid
241	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
242	bz	%icc, 2f			! If unlocked, go displace
243	  nop
244	sub	%g3, (1 << 3), %g3
245	brgz	%g3, 1b				! Still more TLB entries
246	  nop					! to search
247
248	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
249	call    panic                           ! found no unlocked TTE so
250	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
251
252
2532:
254	!
255	! We have found an unlocked or non-valid entry; we'll explicitly load
256	! our locked entry here.
257	!
258	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
259	stxa	%o0, [%o5]ASI_IMMU
260	stxa	%g1, [%g3]ASI_ITLB_ACCESS
261	flush	%o1				! Flush required for I-MMU
262	ba	3f				! Delay slot of ba is empty
263	  nop					!   per Erratum 64
264
2650:
266#endif	/* CHEETAHPLUS_ERRATUM_34 */
267	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
268	stxa	%o0, [%o5]ASI_IMMU
269	stxa	%g1, [%g0]ASI_ITLB_IN
270	flush	%o1				! Flush required for I-MMU
2713:
272	retl
273	  wrpr	%g0, %o3, %pstate		! Enable interrupts
274	SET_SIZE(sfmmu_itlb_ld_kva)
275
276	/*
277	 * Load an entry into the DTLB.
278	 *
279	 * Special handling is required for locked entries since there
280	 * are some TLB slots that are reserved for the kernel but not
281	 * always held locked.  We want to avoid loading locked TTEs
282	 * into those slots since they could be displaced.
283	 *
284	 * %o0 = virtual address
285	 * %o1 = address of TTE to be loaded
286	 */
287	ENTRY_NP(sfmmu_dtlb_ld_kva)
288	rdpr	%pstate, %o3
289#ifdef DEBUG
290	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
291#endif /* DEBUG */
292	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
293	srln	%o0, MMU_PAGESHIFT, %o0
294	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
295
296	ldx	[%o1], %g1
297
298	set	MMU_TAG_ACCESS, %o5
299
300	set	cpu_impl_dual_pgsz, %o2
301	ld	[%o2], %o2
302	brz	%o2, 1f
303	  nop
304
305	sethi	%hi(ksfmmup), %o2
306	ldx	[%o2 + %lo(ksfmmup)], %o2
307	ldub    [%o2 + SFMMU_CEXT], %o2
308        sll     %o2, TAGACCEXT_SHIFT, %o2
309
310	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
311	stxa	%o2,[%o4]ASI_DMMU
312	membar	#Sync
3131:
314	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
315	bnz,pn	%icc, 2f			! special handling
316	  sethi	%hi(dtlb_resv_ttenum), %g3
317	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
318	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
319	membar	#Sync
320	retl
321	  wrpr	%g0, %o3, %pstate		! enable interrupts
3222:
323	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
324	sll	%g3, 3, %g3			! First reserved idx in TLB 0
325	sub	%g3, (1 << 3), %g3		! Decrement idx
3263:
327	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
328	!
329	! If this entry isn't valid, we'll choose to displace it (regardless
330	! of the lock bit).
331	!
332	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
333	  nop
334	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
335	bz,pn	%icc, 4f			! If unlocked, go displace
336	  nop
337	sub	%g3, (1 << 3), %g3		! Decrement idx
338	brgez	%g3, 3b
339	  nop
340	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
341	call	panic				! found no unlocked TTE so
342	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
3434:
344	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
345#ifdef	OLYMPUS_SHARED_FTLB
346	stxa	%g1,[%g0]ASI_DTLB_IN
347#else
348	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
349#endif
350	membar	#Sync
351	retl
352	  wrpr	%g0, %o3, %pstate		! enable interrupts
353	SET_SIZE(sfmmu_dtlb_ld_kva)
354
355	ENTRY_NP(sfmmu_getctx_pri)
356	set	MMU_PCONTEXT, %o0
357	retl
358	  ldxa	[%o0]ASI_MMU_CTX, %o0
359	SET_SIZE(sfmmu_getctx_pri)
360
361	ENTRY_NP(sfmmu_getctx_sec)
362	set	MMU_SCONTEXT, %o0
363	set	CTXREG_CTX_MASK, %o1
364	ldxa	[%o0]ASI_MMU_CTX, %o0
365	retl
366	  and	%o0, %o1, %o0
367	SET_SIZE(sfmmu_getctx_sec)
368
369	/*
370	 * Set the secondary context register for this process.
371	 * %o0 = page_size | context number for this process.
372	 */
373	ENTRY_NP(sfmmu_setctx_sec)
374	/*
375	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
376	 * But we can also get called from C with interrupts enabled. So,
377	 * we need to check first.
378	 */
379
380	/* If interrupts are not disabled, then disable them */
381	rdpr	%pstate, %g1
382	btst	PSTATE_IE, %g1
383	bnz,a,pt %icc, 1f
384	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
385
3861:
387	mov	MMU_SCONTEXT, %o1
388
389	sethi	%hi(FLUSH_ADDR), %o4
390	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
391	flush	%o4
392
393	/*
394	 * if the routine was entered with intr enabled, then enable intr now.
395	 * otherwise, keep intr disabled, return without enabing intr.
396	 * %g1 - old intr state
397	 */
398	btst	PSTATE_IE, %g1
399	bnz,a,pt %icc, 2f
400	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
4012:	retl
402	  nop
403	SET_SIZE(sfmmu_setctx_sec)
404
405	/*
406	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
407	 * returns the detection value in %o0.
408	 *
409	 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
410	 *  - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
411	 *  - FJ OPL Olympus-C and later  (less than SPITFIRE_IMPL)
412	 *
413	 */
414	ENTRY_NP(sfmmu_setup_4lp)
415	GET_CPU_IMPL(%o0);
416	cmp	%o0, CHEETAH_PLUS_IMPL
417	bge,pt	%icc, 4f
418	  mov	1, %o1
419	cmp	%o0, SPITFIRE_IMPL
420	bge,a,pn %icc, 3f
421	  clr	%o1
4224:
423	set	ktsb_phys, %o2
424	st	%o1, [%o2]
4253:	retl
426	mov	%o1, %o0
427	SET_SIZE(sfmmu_setup_4lp)
428
429
430	/*
431	 * Called to load MMU registers and tsbmiss area
432	 * for the active process.  This function should
433	 * only be called from TL=0.
434	 *
435	 * %o0 - hat pointer
436	 */
437	ENTRY_NP(sfmmu_load_mmustate)
438
439#ifdef DEBUG
440	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
441#endif /* DEBUG */
442
443 	sethi	%hi(ksfmmup), %o3
444	ldx	[%o3 + %lo(ksfmmup)], %o3
445	cmp	%o3, %o0
446	be,pn	%xcc, 3f			! if kernel as, do nothing
447	  nop
448
449	/*
450	 * We need to set up the TSB base register, tsbmiss
451	 * area, and load locked TTE(s) for the TSB.
452	 */
453	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
454	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
455
456#ifdef UTSB_PHYS
457	/*
458	 * UTSB_PHYS accesses user TSBs via physical addresses.  The first
459	 * TSB is in the MMU I/D TSB Base registers.  The second TSB uses a
460	 * designated ASI_SCRATCHPAD register as a pseudo TSB base register.
461	 */
462	MAKE_UTSBREG_PHYS(%o1, %o2, %o3)	! %o2 = first utsbreg
463	LOAD_TSBREG(%o2, %o3, %o4)		! write TSB base register
464
465	brz,a,pt %g2, 2f
466	  mov   -1, %o2				! use -1 if no second TSB
467
468	MAKE_UTSBREG_PHYS(%g2, %o2, %o3)	! %o2 = second utsbreg
4692:
470	LOAD_2ND_TSBREG(%o2, %o3)		! write 2nd pseudo TSB base register
471#else /* UTSB_PHYS */
472	brz,pt  %g2, 4f
473	  nop
474	/*
475	 * We have a second TSB for this process, so we need to
476	 * encode data for both the first and second TSB in our single
477	 * TSB base register.  See hat_sfmmu.h for details on what bits
478	 * correspond to which TSB.
479	 * We also need to load a locked TTE into the TLB for the second TSB
480	 * in this case.
481	 */
482	MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
483	! %o2 = tsbreg
484	sethi	%hi(utsb4m_dtlb_ttenum), %o3
485	sethi	%hi(utsb4m_vabase), %o4
486	ld	[%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
487	ldx	[%o4 + %lo(utsb4m_vabase)], %o4	! %o4 = TLB tag for sec TSB
488	sll	%o3, DTACC_SHIFT, %o3		! %o3 = sec TSB TLB index
489	RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)	! or-in bits of TSB VA
490	LOAD_TSBTTE(%g2, %o3, %o4, %g3)		! load sec TSB locked TTE
491	sethi	%hi(utsb_vabase), %g3
492	ldx	[%g3 + %lo(utsb_vabase)], %g3	! %g3 = TLB tag for first TSB
493	ba,pt	%xcc, 5f
494	  nop
495
4964:	sethi	%hi(utsb_vabase), %g3
497	ldx	[%g3 + %lo(utsb_vabase)], %g3	! %g3 = TLB tag for first TSB
498	MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)	! %o2 = tsbreg
499
5005:	LOAD_TSBREG(%o2, %o3, %o4)		! write TSB base register
501
502	/*
503	 * Load the TTE for the first TSB at the appropriate location in
504	 * the TLB
505	 */
506	sethi	%hi(utsb_dtlb_ttenum), %o2
507	ld	[%o2 + %lo(utsb_dtlb_ttenum)], %o2
508	sll	%o2, DTACC_SHIFT, %o2		! %o1 = first TSB TLB index
509	RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)	! or-in bits of TSB VA
510	LOAD_TSBTTE(%o1, %o2, %g3, %o4)		! load first TSB locked TTE
511#endif /* UTSB_PHYS */
512
5136:	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
514	CPU_TSBMISS_AREA(%o2, %o3)		! we need to access from
515	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
516	ldub	[%o0 + SFMMU_TTEFLAGS], %o3	! per-CPU tsbmiss area.
517	stx	%o0, [%o2 + TSBMISS_UHATID]
518	stub	%o3, [%o2 + TSBMISS_UTTEFLAGS]
519
5203:	retl
521	  nop
522	SET_SIZE(sfmmu_load_mmustate)
523
524#endif /* lint */
525
526#if defined (lint)
527/*
528 * Invalidate all of the entries within the tsb, by setting the inv bit
529 * in the tte_tag field of each tsbe.
530 *
531 * We take advantage of the fact TSBs are page aligned and a multiple of
532 * PAGESIZE to use block stores.
533 *
534 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
535 * (in short, we set all bits in the upper word of the tag, and we give the
536 * invalid bit precedence over other tag bits in both places).
537 */
538/* ARGSUSED */
539void
540sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
541{}
542
543#else /* lint */
544
545#define	VIS_BLOCKSIZE	64
546
547	ENTRY(sfmmu_inv_tsb_fast)
548
549	! Get space for aligned block of saved fp regs.
550	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
551
552	! kpreempt_disable();
553	ldsb	[THREAD_REG + T_PREEMPT], %l3
554	inc	%l3
555	stb	%l3, [THREAD_REG + T_PREEMPT]
556
557	! See if fpu was in use.  If it was, we need to save off the
558	! floating point registers to the stack.
559	rd	%fprs, %l0			! %l0 = cached copy of fprs
560	btst	FPRS_FEF, %l0
561	bz,pt	%icc, 4f
562	  nop
563
564	! save in-use fpregs on stack
565	membar	#Sync				! make sure tranx to fp regs
566						! have completed
567	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
568	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
569	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
570
571	! enable fp
5724:	membar	#StoreStore|#StoreLoad|#LoadStore
573	wr	%g0, FPRS_FEF, %fprs
574	wr	%g0, ASI_BLK_P, %asi
575
576	! load up FP registers with invalid TSB tag.
577	fone	%d0			! ones in tag
578	fzero	%d2			! zeros in TTE
579	fone	%d4			! ones in tag
580	fzero	%d6			! zeros in TTE
581	fone	%d8			! ones in tag
582	fzero	%d10			! zeros in TTE
583	fone	%d12			! ones in tag
584	fzero	%d14			! zeros in TTE
585	ba,pt	%xcc, .sfmmu_inv_doblock
586	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
587
588.sfmmu_inv_blkstart:
589      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
590	stda	%d0, [%i0+128]%asi
591	stda	%d0, [%i0+64]%asi
592	stda	%d0, [%i0]%asi
593
594	add	%i0, %i4, %i0
595	sub	%i1, %i4, %i1
596
597.sfmmu_inv_doblock:
598	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
599	bgeu,a	%icc, .sfmmu_inv_blkstart
600	  stda	%d0, [%i0+192]%asi
601
602.sfmmu_inv_finish:
603	membar	#Sync
604	btst	FPRS_FEF, %l0		! saved from above
605	bz,a	.sfmmu_inv_finished
606	  wr	%l0, 0, %fprs		! restore fprs
607
608	! restore fpregs from stack
609	ldda    [%l1]ASI_BLK_P, %d0
610	membar	#Sync
611	wr	%l0, 0, %fprs		! restore fprs
612
613.sfmmu_inv_finished:
614	! kpreempt_enable();
615	ldsb	[THREAD_REG + T_PREEMPT], %l3
616	dec	%l3
617	stb	%l3, [THREAD_REG + T_PREEMPT]
618	ret
619	  restore
620	SET_SIZE(sfmmu_inv_tsb_fast)
621
622#endif /* lint */
623
624#if defined(lint)
625
626/*
627 * Prefetch "struct tsbe" while walking TSBs.
628 * prefetch 7 cache lines ahead of where we are at now.
629 * #n_reads is being used since #one_read only applies to
630 * floating point reads, and we are not doing floating point
631 * reads.  However, this has the negative side effect of polluting
632 * the ecache.
633 * The 448 comes from (7 * 64) which is how far ahead of our current
634 * address, we want to prefetch.
635 */
636/*ARGSUSED*/
637void
638prefetch_tsbe_read(struct tsbe *tsbep)
639{}
640
641/* Prefetch the tsbe that we are about to write */
642/*ARGSUSED*/
643void
644prefetch_tsbe_write(struct tsbe *tsbep)
645{}
646
647#else /* lint */
648
649	ENTRY(prefetch_tsbe_read)
650	retl
651	  prefetch	[%o0+448], #n_reads
652	SET_SIZE(prefetch_tsbe_read)
653
654	ENTRY(prefetch_tsbe_write)
655	retl
656	  prefetch	[%o0], #n_writes
657	SET_SIZE(prefetch_tsbe_write)
658#endif /* lint */
659
660
661#ifndef lint
662#endif	/* lint */
663