xref: /titanic_52/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s (revision 19f938d5a97467cae5809ba3cee189b02c8e03b7)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/clock.h>
53#include <sys/trapstat.h>
54
55/*
56 * sfmmu related subroutines
57 */
58
59#if defined (lint)
60
61/*
62 * sfmmu related subroutines
63 */
64/* ARGSUSED */
65void
66sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
67{}
68
69/* ARGSUSED */
70void
71sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
72{}
73
74/* ARGSUSED */
75void
76sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
77{}
78
79int
80sfmmu_getctx_pri()
81{ return(0); }
82
83int
84sfmmu_getctx_sec()
85{ return(0); }
86
87/* ARGSUSED */
88void
89sfmmu_setctx_sec(uint_t ctx)
90{}
91
92/* ARGSUSED */
93void
94sfmmu_load_mmustate(sfmmu_t *sfmmup)
95{
96}
97
98#else	/* lint */
99
100/*
101 * Invalidate either the context of a specific victim or any process
102 * currently running on this CPU.
103 *
104 * %g1 = sfmmup whose ctx is being invalidated
105 *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
106 * Note %g1 is the only input argument used by this xcall handler.
107 */
108	ENTRY(sfmmu_raise_tsb_exception)
109	!
110	! if (victim == INVALID_CONTEXT ||
111	!     current CPU tsbmiss->usfmmup == victim sfmmup) {
112	!       if (shctx_on) {
113	!               shctx = INVALID;
114	!       }
115	!	if (sec-ctx > INVALID_CONTEXT) {
116	!		write INVALID_CONTEXT to sec-ctx
117	!	}
118	!	if (pri-ctx > INVALID_CONTEXT) {
119	!		write INVALID_CONTEXT to pri-ctx
120	!	}
121	! }
122
123	sethi   %hi(ksfmmup), %g3
124        ldx     [%g3 + %lo(ksfmmup)], %g3
125	cmp	%g1, %g3
126	be,a,pn %xcc, ptl1_panic		/* can't invalidate kernel ctx */
127	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
128
129	set	INVALID_CONTEXT, %g2
130	cmp	%g1, INVALID_CONTEXT
131	be,pn	%xcc, 0f			/* called from wrap_around? */
132	  mov	MMU_SCONTEXT, %g3
133
134	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
135	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
136	cmp	%g5, %g1			/* hat toBe-invalid running? */
137	bne,pt	%xcc, 3f
138	  nop
139
1400:
141	sethi   %hi(shctx_on), %g5
142        ld      [%g5 + %lo(shctx_on)], %g5
143        brz     %g5, 1f
144          mov     MMU_SHARED_CONTEXT, %g5
145        sethi   %hi(FLUSH_ADDR), %g4
146        stxa    %g0, [%g5]ASI_MMU_CTX
147        flush   %g4
148
1491:
150	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
151	set     CTXREG_CTX_MASK, %g4
152	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
153	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
154	ble,pn	%xcc, 2f			/* yes, no need to change */
155	  mov   MMU_PCONTEXT, %g7
156
157	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
158	membar	#Sync
159
1602:
161	ldxa	[%g7]ASI_MMU_CTX, %g3		/* get pgz | pri-ctx */
162	and     %g3, %g4, %g5			/* %g5 = pri-ctx */
163	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
164	ble,pn	%xcc, 3f			/* yes, no need to change */
165	  srlx	%g3, CTXREG_NEXT_SHIFT, %g3	/* %g3 = nucleus pgsz */
166	sllx	%g3, CTXREG_NEXT_SHIFT, %g3	/* need to preserve nucleus pgsz */
167	or	%g3, %g2, %g2			/* %g2 = nucleus pgsz | INVALID_CONTEXT */
168
169	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
1703:
171	retry
172	SET_SIZE(sfmmu_raise_tsb_exception)
173
174
175
176	/*
177	 * %o0 = virtual address
178	 * %o1 = address of TTE to be loaded
179	 */
180	ENTRY_NP(sfmmu_itlb_ld_kva)
181	rdpr	%pstate, %o3
182#ifdef DEBUG
183	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
184#endif /* DEBUG */
185	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
186	srln	%o0, MMU_PAGESHIFT, %o0
187	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
188
189	ldx	[%o1], %g1
190	set	MMU_TAG_ACCESS, %o5
191#ifdef	CHEETAHPLUS_ERRATUM_34
192	!
193	! If this is Cheetah or derivative and the specified TTE is locked
194	! and hence to be loaded into the T16, fully-associative TLB, we
195	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
196	! certain conditions an ITLB locked index 0 TTE will erroneously be
197	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
198	! this erratum, we scan the T16 top down for an unlocked TTE and
199	! explicitly load the specified TTE into that index.
200	!
201	GET_CPU_IMPL(%g2)
202	cmp	%g2, CHEETAH_IMPL
203	bl,pn	%icc, 0f
204	  nop
205
206	andcc	%g1, TTE_LCK_INT, %g0
207	bz	%icc, 0f			! Lock bit is not set;
208						!   load normally.
209	  or	%g0, (15 << 3), %g3		! Start searching from the
210						!   top down.
211
2121:
213	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
214
215	!
216	! If this entry isn't valid, we'll choose to displace it (regardless
217	! of the lock bit).
218	!
219	cmp	%g4, %g0
220	bge	%xcc, 2f			! TTE is > 0 iff not valid
221	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
222	bz	%icc, 2f			! If unlocked, go displace
223	  nop
224	sub	%g3, (1 << 3), %g3
225	brgz	%g3, 1b				! Still more TLB entries
226	  nop					! to search
227
228	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
229	call    panic                           ! found no unlocked TTE so
230	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
231
232
2332:
234	!
235	! We have found an unlocked or non-valid entry; we'll explicitly load
236	! our locked entry here.
237	!
238	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
239	stxa	%o0, [%o5]ASI_IMMU
240	stxa	%g1, [%g3]ASI_ITLB_ACCESS
241	flush	%o1				! Flush required for I-MMU
242	ba	3f				! Delay slot of ba is empty
243	  nop					!   per Erratum 64
244
2450:
246#endif	/* CHEETAHPLUS_ERRATUM_34 */
247	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
248	stxa	%o0, [%o5]ASI_IMMU
249	stxa	%g1, [%g0]ASI_ITLB_IN
250	flush	%o1				! Flush required for I-MMU
2513:
252	retl
253	  wrpr	%g0, %o3, %pstate		! Enable interrupts
254	SET_SIZE(sfmmu_itlb_ld_kva)
255
256	/*
257	 * Load an entry into the DTLB.
258	 *
259	 * Special handling is required for locked entries since there
260	 * are some TLB slots that are reserved for the kernel but not
261	 * always held locked.  We want to avoid loading locked TTEs
262	 * into those slots since they could be displaced.
263	 *
264	 * %o0 = virtual address
265	 * %o1 = address of TTE to be loaded
266	 */
267	ENTRY_NP(sfmmu_dtlb_ld_kva)
268	rdpr	%pstate, %o3
269#ifdef DEBUG
270	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
271#endif /* DEBUG */
272	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
273	srln	%o0, MMU_PAGESHIFT, %o0
274	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
275
276	ldx	[%o1], %g1
277
278	set	MMU_TAG_ACCESS, %o5
279
280	set	cpu_impl_dual_pgsz, %o2
281	ld	[%o2], %o2
282	brz	%o2, 1f
283	  nop
284
285	sethi	%hi(ksfmmup), %o2
286	ldx	[%o2 + %lo(ksfmmup)], %o2
287	ldub    [%o2 + SFMMU_CEXT], %o2
288        sll     %o2, TAGACCEXT_SHIFT, %o2
289
290	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
291	stxa	%o2,[%o4]ASI_DMMU
292	membar	#Sync
2931:
294	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
295	bnz,pn	%icc, 2f			! special handling
296	  sethi	%hi(dtlb_resv_ttenum), %g3
297	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
298	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
299	membar	#Sync
300	retl
301	  wrpr	%g0, %o3, %pstate		! enable interrupts
3022:
303#ifdef	CHEETAHPLUS_ERRATUM_34
304	GET_CPU_IMPL(%g2)
305#endif
306	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
307	sll	%g3, 3, %g3			! First reserved idx in TLB 0
308	sub	%g3, (1 << 3), %g3		! Decrement idx
309	! Erratum 15 workaround due to ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
310	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
3113:
312	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
313	!
314	! If this entry isn't valid, we'll choose to displace it (regardless
315	! of the lock bit).
316	!
317	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
318	  nop
319	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
320	bz,pn	%icc, 4f			! If unlocked, go displace
321	  nop
322	sub	%g3, (1 << 3), %g3		! Decrement idx
323#ifdef	CHEETAHPLUS_ERRATUM_34
324	!
325	! If this is a Cheetah or derivative, we must work around Erratum 34
326	! for the DTLB.  Erratum 34 states that under certain conditions,
327	! a locked entry 0 TTE may be improperly displaced.  To avoid this,
328	! we do not place a locked TTE in entry 0.
329	!
330	brgz	%g3, 3b
331	  nop
332	cmp	%g2, CHEETAH_IMPL
333	bge,pt	%icc, 5f
334	  nop
335	brz	%g3, 3b
336	 nop
337#else	/* CHEETAHPLUS_ERRATUM_34 */
338	brgez	%g3, 3b
339	  nop
340#endif	/* CHEETAHPLUS_ERRATUM_34 */
3415:
342	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
343	call	panic				! found no unlocked TTE so
344	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
3454:
346	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
347#ifdef	OLYMPUS_SHARED_FTLB
348	stxa	%g1,[%g0]ASI_DTLB_IN
349#else
350	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
351#endif
352	membar	#Sync
353	retl
354	  wrpr	%g0, %o3, %pstate		! enable interrupts
355	SET_SIZE(sfmmu_dtlb_ld_kva)
356
357	ENTRY_NP(sfmmu_getctx_pri)
358	set	MMU_PCONTEXT, %o0
359	retl
360	  ldxa	[%o0]ASI_MMU_CTX, %o0
361	SET_SIZE(sfmmu_getctx_pri)
362
363	ENTRY_NP(sfmmu_getctx_sec)
364	set	MMU_SCONTEXT, %o0
365	set	CTXREG_CTX_MASK, %o1
366	ldxa	[%o0]ASI_MMU_CTX, %o0
367	retl
368	  and	%o0, %o1, %o0
369	SET_SIZE(sfmmu_getctx_sec)
370
371	/*
372	 * Set the secondary context register for this process.
373	 * %o0 = page_size | context number for this process.
374	 */
375	ENTRY_NP(sfmmu_setctx_sec)
376	/*
377	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
378	 * But we can also get called from C with interrupts enabled. So,
379	 * we need to check first.
380	 */
381
382	/* If interrupts are not disabled, then disable them */
383	rdpr	%pstate, %g1
384	btst	PSTATE_IE, %g1
385	bnz,a,pt %icc, 1f
386	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
387
3881:
389	mov	MMU_SCONTEXT, %o1
390
391	sethi	%hi(FLUSH_ADDR), %o4
392	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
393	flush	%o4
394        sethi   %hi(shctx_on), %g3
395        ld      [%g3 + %lo(shctx_on)], %g3
396	brz     %g3, 2f
397	  nop
398	set	CTXREG_CTX_MASK, %o4
399	and	%o0,%o4,%o1
400	cmp	%o1, INVALID_CONTEXT
401	bne,pn %icc, 2f
402   	  mov     MMU_SHARED_CONTEXT, %o1
403        sethi   %hi(FLUSH_ADDR), %o4
404        stxa    %g0, [%o1]ASI_MMU_CTX           /* set 2nd context reg. */
405        flush   %o4
406
407	/*
408	 * if the routine was entered with intr enabled, then enable intr now.
409	 * otherwise, keep intr disabled, return without enabing intr.
410	 * %g1 - old intr state
411	 */
4122:	btst	PSTATE_IE, %g1
413	bnz,a,pt %icc, 3f
414	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
4153:	retl
416	  nop
417	SET_SIZE(sfmmu_setctx_sec)
418
419	/*
420	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
421	 * returns the detection value in %o0.
422	 *
423	 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
424	 *  - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
425	 *  - FJ OPL Olympus-C and later  (less than SPITFIRE_IMPL)
426	 *
427	 */
428	ENTRY_NP(sfmmu_setup_4lp)
429	GET_CPU_IMPL(%o0);
430	cmp	%o0, CHEETAH_PLUS_IMPL
431	bge,pt	%icc, 4f
432	  mov	1, %o1
433	cmp	%o0, SPITFIRE_IMPL
434	bge,a,pn %icc, 3f
435	  clr	%o1
4364:
437	set	ktsb_phys, %o2
438	st	%o1, [%o2]
4393:	retl
440	mov	%o1, %o0
441	SET_SIZE(sfmmu_setup_4lp)
442
443
444	/*
445	 * Called to load MMU registers and tsbmiss area
446	 * for the active process.  This function should
447	 * only be called from TL=0.
448	 *
449	 * %o0 - hat pointer
450	 *
451	 */
452	ENTRY_NP(sfmmu_load_mmustate)
453
454#ifdef DEBUG
455        PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
456#endif /* DEBUG */
457
458        sethi   %hi(ksfmmup), %o3
459        ldx     [%o3 + %lo(ksfmmup)], %o3
460        cmp     %o3, %o0
461        be,pn   %xcc, 8f			! if kernel as, do nothing
462          nop
463        /*
464         * We need to set up the TSB base register, tsbmiss
465         * area, and load locked TTE(s) for the TSB.
466         */
467        ldx     [%o0 + SFMMU_TSB], %o1          ! %o1 = first tsbinfo
468        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second tsbinfo
469
470#ifdef UTSB_PHYS
471        /*
472         * UTSB_PHYS accesses user TSBs via physical addresses.  The first
473         * TSB is in the MMU I/D TSB Base registers.  The 2nd, 3rd and
474	 * 4th TSBs use designated ASI_SCRATCHPAD regs as pseudo TSB base regs.
475	 */
476
477        /* create/set first UTSBREG actually loaded into MMU_TSB  */
478        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = first utsbreg
479 	LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
480
481        brz,a,pt  %g2, 2f
482          mov   -1, %o2                         ! use -1 if no second TSB
483
484        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = second utsbreg
4852:
486        SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
487
488	/* make 3rd and 4th TSB */
489	CPU_TSBMISS_AREA(%o4, %o3) 		! %o4 = tsbmiss area
490
491        ldx     [%o0 + SFMMU_SCDP], %g2         ! %g2 = sfmmu_scd
492        brz,pt  %g2, 3f
493          mov   -1, %o2                         ! use -1 if no third TSB
494
495        ldx     [%g2 + SCD_SFMMUP], %g3         ! %g3 = scdp->scd_sfmmup
496        ldx     [%g3 + SFMMU_TSB], %o1          ! %o1 = first scd tsbinfo
497        brz,pn %o1, 5f
498          nop                                   ! panic if no third TSB
499
500	/* make 3rd UTSBREG */
501        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = third utsbreg
5023:
503        SET_UTSBREG(SCRATCHPAD_UTSBREG3, %o2, %o3)
504	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR]
505
506        brz,pt  %g2, 4f
507          mov   -1, %o2                         ! use -1 if no 3rd or 4th TSB
508
509        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second scd tsbinfo
510        brz,pt  %g2, 4f
511          mov   -1, %o2                         ! use -1 if no 4th TSB
512
513	/* make 4th UTSBREG */
514        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = fourth utsbreg
5154:
516        SET_UTSBREG(SCRATCHPAD_UTSBREG4, %o2, %o3)
517	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR4M]
518	ba,pt	%icc, 6f
519	  mov	%o4, %o2			! %o2 = tsbmiss area
5205:
521        sethi   %hi(panicstr), %g1              ! panic if no 3rd TSB
522        ldx     [%g1 + %lo(panicstr)], %g1
523        tst     %g1
524
525        bnz,pn  %xcc, 8f
526          nop
527
528        sethi   %hi(sfmmu_panic10), %o0
529        call    panic
530          or     %o0, %lo(sfmmu_panic10), %o0
531
532#else /* UTSBREG_PHYS */
533
534        brz,pt  %g2, 4f
535          nop
536        /*
537         * We have a second TSB for this process, so we need to
538         * encode data for both the first and second TSB in our single
539         * TSB base register.  See hat_sfmmu.h for details on what bits
540         * correspond to which TSB.
541         * We also need to load a locked TTE into the TLB for the second TSB
542         * in this case.
543         */
544        MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
545        ! %o2 = tsbreg
546        sethi   %hi(utsb4m_dtlb_ttenum), %o3
547        sethi   %hi(utsb4m_vabase), %o4
548        ld      [%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
549        ldx     [%o4 + %lo(utsb4m_vabase)], %o4 ! %o4 = TLB tag for sec TSB
550        sll     %o3, DTACC_SHIFT, %o3           ! %o3 = sec TSB TLB index
551        RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)       ! or-in bits of TSB VA
552        LOAD_TSBTTE(%g2, %o3, %o4, %g3)         ! load sec TSB locked TTE
553        sethi   %hi(utsb_vabase), %g3
554        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
555        ba,pt   %xcc, 5f
556          nop
557
5584:      sethi   %hi(utsb_vabase), %g3
559        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
560        MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)     ! %o2 = tsbreg
561
5625:      LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
563
564        /*
565         * Load the TTE for the first TSB at the appropriate location in
566         * the TLB
567         */
568        sethi   %hi(utsb_dtlb_ttenum), %o2
569        ld      [%o2 + %lo(utsb_dtlb_ttenum)], %o2
570        sll     %o2, DTACC_SHIFT, %o2           ! %o1 = first TSB TLB index
571        RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)       ! or-in bits of TSB VA
572        LOAD_TSBTTE(%o1, %o2, %g3, %o4)         ! load first TSB locked TTE
573	CPU_TSBMISS_AREA(%o2, %o3)
574#endif /* UTSB_PHYS */
5756:
576	ldx     [%o0 + SFMMU_ISMBLKPA], %o1     ! copy members of sfmmu
577	              				! we need to access from
578        stx     %o1, [%o2 + TSBMISS_ISMBLKPA]   ! sfmmu_tsb_miss into the
579        ldub    [%o0 + SFMMU_TTEFLAGS], %o3     ! per-CPU tsbmiss area.
580        stx     %o0, [%o2 + TSBMISS_UHATID]
581        stub    %o3, [%o2 + TSBMISS_UTTEFLAGS]
582#ifdef UTSB_PHYS
583        ldx     [%o0 + SFMMU_SRDP], %o1
584        ldub    [%o0 + SFMMU_RTTEFLAGS], %o4
585        stub    %o4,  [%o2 + TSBMISS_URTTEFLAGS]
586        stx     %o1, [%o2 +  TSBMISS_SHARED_UHATID]
587        brz,pn  %o1, 8f				! check for sfmmu_srdp
588          add   %o0, SFMMU_HMERMAP, %o1
589        add     %o2, TSBMISS_SHMERMAP, %o2
590        mov     SFMMU_HMERGNMAP_WORDS, %o3
591                                                ! set tsbmiss shmermap
592        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
593
594	ldx     [%o0 + SFMMU_SCDP], %o4         ! %o4 = sfmmu_scd
595        CPU_TSBMISS_AREA(%o2, %o3)              ! %o2 = tsbmiss area
596        mov     SFMMU_HMERGNMAP_WORDS, %o3
597        brnz,pt %o4, 7f                       ! check for sfmmu_scdp else
598          add   %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
599        ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
600	ba 8f
601	  nop
6027:
603        add     %o4, SCD_HMERMAP, %o1
604        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
605#endif /* UTSB_PHYS */
606
6078:
608	retl
609          nop
610        SET_SIZE(sfmmu_load_mmustate)
611
612#endif /* lint */
613
614#if defined (lint)
615/*
616 * Invalidate all of the entries within the tsb, by setting the inv bit
617 * in the tte_tag field of each tsbe.
618 *
619 * We take advantage of the fact TSBs are page aligned and a multiple of
620 * PAGESIZE to use block stores.
621 *
622 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
623 * (in short, we set all bits in the upper word of the tag, and we give the
624 * invalid bit precedence over other tag bits in both places).
625 */
626/* ARGSUSED */
627void
628sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
629{}
630
631#else /* lint */
632
633#define	VIS_BLOCKSIZE	64
634
635	ENTRY(sfmmu_inv_tsb_fast)
636
637	! Get space for aligned block of saved fp regs.
638	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
639
640	! kpreempt_disable();
641	ldsb	[THREAD_REG + T_PREEMPT], %l3
642	inc	%l3
643	stb	%l3, [THREAD_REG + T_PREEMPT]
644
645	! See if fpu was in use.  If it was, we need to save off the
646	! floating point registers to the stack.
647	rd	%fprs, %l0			! %l0 = cached copy of fprs
648	btst	FPRS_FEF, %l0
649	bz,pt	%icc, 4f
650	  nop
651
652	! save in-use fpregs on stack
653	membar	#Sync				! make sure tranx to fp regs
654						! have completed
655	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
656	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
657	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
658
659	! enable fp
6604:	membar	#StoreStore|#StoreLoad|#LoadStore
661	wr	%g0, FPRS_FEF, %fprs
662	wr	%g0, ASI_BLK_P, %asi
663
664	! load up FP registers with invalid TSB tag.
665	fone	%d0			! ones in tag
666	fzero	%d2			! zeros in TTE
667	fone	%d4			! ones in tag
668	fzero	%d6			! zeros in TTE
669	fone	%d8			! ones in tag
670	fzero	%d10			! zeros in TTE
671	fone	%d12			! ones in tag
672	fzero	%d14			! zeros in TTE
673	ba,pt	%xcc, .sfmmu_inv_doblock
674	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
675
676.sfmmu_inv_blkstart:
677      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
678	stda	%d0, [%i0+128]%asi
679	stda	%d0, [%i0+64]%asi
680	stda	%d0, [%i0]%asi
681
682	add	%i0, %i4, %i0
683	sub	%i1, %i4, %i1
684
685.sfmmu_inv_doblock:
686	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
687	bgeu,a	%icc, .sfmmu_inv_blkstart
688	  stda	%d0, [%i0+192]%asi
689
690.sfmmu_inv_finish:
691	membar	#Sync
692	btst	FPRS_FEF, %l0		! saved from above
693	bz,a	.sfmmu_inv_finished
694	  wr	%l0, 0, %fprs		! restore fprs
695
696	! restore fpregs from stack
697	ldda    [%l1]ASI_BLK_P, %d0
698	membar	#Sync
699	wr	%l0, 0, %fprs		! restore fprs
700
701.sfmmu_inv_finished:
702	! kpreempt_enable();
703	ldsb	[THREAD_REG + T_PREEMPT], %l3
704	dec	%l3
705	stb	%l3, [THREAD_REG + T_PREEMPT]
706	ret
707	  restore
708	SET_SIZE(sfmmu_inv_tsb_fast)
709
710#endif /* lint */
711
712#if defined(lint)
713
714/*
715 * Prefetch "struct tsbe" while walking TSBs.
716 * prefetch 7 cache lines ahead of where we are at now.
717 * #n_reads is being used since #one_read only applies to
718 * floating point reads, and we are not doing floating point
719 * reads.  However, this has the negative side effect of polluting
720 * the ecache.
721 * The 448 comes from (7 * 64) which is how far ahead of our current
722 * address, we want to prefetch.
723 */
724/*ARGSUSED*/
725void
726prefetch_tsbe_read(struct tsbe *tsbep)
727{}
728
729/* Prefetch the tsbe that we are about to write */
730/*ARGSUSED*/
731void
732prefetch_tsbe_write(struct tsbe *tsbep)
733{}
734
735#else /* lint */
736
737	ENTRY(prefetch_tsbe_read)
738	retl
739	  prefetch	[%o0+448], #n_reads
740	SET_SIZE(prefetch_tsbe_read)
741
742	ENTRY(prefetch_tsbe_write)
743	retl
744	  prefetch	[%o0], #n_writes
745	SET_SIZE(prefetch_tsbe_write)
746#endif /* lint */
747
748