xref: /titanic_44/usr/src/uts/sun4v/vm/mach_sfmmu.h (revision 2b4a78020b9c38d1b95e2f3fefa6d6e4be382d1f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun reference mmu (sfmmu)
30  * specific hat data structures and the sfmmu specific hat procedures.
31  * The machine independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef _VM_MACH_SFMMU_H
35 #define	_VM_MACH_SFMMU_H
36 
37 #include <sys/x_call.h>
38 #include <sys/hypervisor_api.h>
39 
40 #ifdef	__cplusplus
41 extern "C" {
42 #endif
43 
44 /*
45  * Define UTSB_PHYS if user TSB is always accessed via physical address.
46  * On sun4v platform, user TSB is accessed via physical address.
47  */
48 #define	UTSB_PHYS	1
49 
50 /*
51  * Hypervisor TSB info
52  */
53 #define	NHV_TSB_INFO	4
54 
55 #ifndef _ASM
56 
57 struct hv_tsb_block {
58 	uint64_t	hv_tsb_info_pa;	/* hypervisor TSB info PA */
59 	uint64_t	hv_tsb_info_cnt; /* hypervisor TSB info count */
60 	hv_tsb_info_t	hv_tsb_info[NHV_TSB_INFO]; /* hypervisor TSB info */
61 };
62 
63 #endif /* _ASM */
64 
65 #ifdef _ASM
66 
67 /*
68  * This macro is used to set private/shared secondary context register in
69  * sfmmu_alloc_ctx().
70  * Input:
71  * cnum     = cnum
72  * is_shctx = sfmmu private/shared flag (0: private, 1: shared)
73  * tmp2 is only used in the sun4u version of this macro
74  */
75 #define	SET_SECCTX(cnum, is_shctx, tmp1, tmp2, label)			\
76 	mov	MMU_SCONTEXT, tmp1;					\
77 	movrnz	is_shctx, MMU_SCONTEXT1, tmp1;				\
78 	stxa    cnum, [tmp1]ASI_MMU_CTX;  /* set 2nd ctx reg. */	\
79 	membar  #Sync;							\
80 
81 /*
82  * This macro is used in the MMU code to check if TL should be lowered from
83  * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
84  * for details.
85  */
86 
87 #define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
88 	rdpr	%tpc, scr1;					\
89 	sethi	%hi(KERNELBASE), scr2;				\
90 	or	scr2, %lo(KERNELBASE), scr2; 			\
91 	cmp	scr1, scr2; 					\
92 	bgeu	%xcc, 9f;					\
93 	nop;							\
94 	wrpr	%g0, 1, %gl;					\
95 	ba	label;						\
96 	wrpr	%g0, 1, %tl;					\
97 9:
98 
99 /*
100  * The following macros allow us to share majority of the
101  * SFMMU code between sun4u and sun4v platforms.
102  */
103 
104 #define	SETUP_TSB_ASI(qlp, tmp)
105 
106 #define	SETUP_UTSB_ATOMIC_ASI(tmp1, tmp2)
107 
108 /*
109  * Macro to swtich to alternate global register on sun4u platforms
110  * (not applicable to sun4v platforms)
111  */
112 #define	USE_ALTERNATE_GLOBALS(scr)
113 
114 /*
115  * Macro to set %gl register value on sun4v platforms
116  * (not applicable to sun4u platforms)
117  */
118 #define	SET_GL_REG(val)						\
119 	wrpr	%g0, val, %gl
120 
121 /*
122  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
123  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
124  * (1) for invalid context, and USER_CONTEXT (2) for user context.
125  *
126  * In:
127  *   tagacc, ctxtype = scratch registers
128  * Out:
129  *   tagacc = MMU data tag access register value
130  *   ctx = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
131  */
132 #define	GET_MMU_D_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
133 	MMU_FAULT_STATUS_AREA(ctxtype);					\
134 	ldx	[ctxtype + MMFSA_D_ADDR], ptagacc;			\
135 	ldx	[ctxtype + MMFSA_D_CTX], ctxtype;			\
136 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
137 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
138 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
139 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
140 	or	ptagacc, ctxtype, ptagacc
141 
142 /*
143  * Synthesize/get data tag access register value from the MMU fault area
144  *
145  * In:
146  *   tagacc, scr1 = scratch registers
147  * Out:
148  *   tagacc = MMU data tag access register value
149  */
150 #define	GET_MMU_D_TAGACC(tagacc, scr1)				\
151 	GET_MMU_D_PTAGACC_CTXTYPE(tagacc, scr1)
152 
153 /*
154  * Synthesize/get data tag target register value from the MMU fault area
155  *
156  * In:
157  *   ttarget, scr1 = scratch registers
158  * Out:
159  *   ttarget = MMU data tag target register value
160  */
161 #define	GET_MMU_D_TTARGET(ttarget, scr1)			\
162 	MMU_FAULT_STATUS_AREA(ttarget);				\
163 	ldx	[ttarget + MMFSA_D_CTX], scr1;			\
164 	sllx	scr1, TTARGET_CTX_SHIFT, scr1;			\
165 	ldx	[ttarget + MMFSA_D_ADDR], ttarget;		\
166 	srlx	ttarget, TTARGET_VA_SHIFT, ttarget;		\
167 	or	ttarget, scr1, ttarget
168 
169 /*
170  * Synthesize/get data/instruction psuedo tag access register values
171  * from the MMU fault area (context is 0 for kernel, 1 for invalid, 2 for user)
172  *
173  * In:
174  *   dtagacc, itagacc, scr1, scr2 = scratch registers
175  * Out:
176  *   dtagacc = MMU data tag access register value w/psuedo-context
177  *   itagacc = MMU instruction tag access register value w/pseudo-context
178  */
179 #define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
180 	MMU_FAULT_STATUS_AREA(scr1);				\
181 	ldx	[scr1 + MMFSA_D_ADDR], scr2;			\
182 	ldx	[scr1 + MMFSA_D_CTX], dtagacc;			\
183 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundary */ \
184 	cmp	dtagacc, USER_CONTEXT_TYPE;			\
185 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
186 	movgu	%icc, USER_CONTEXT_TYPE, dtagacc;		\
187 	or	scr2, dtagacc, dtagacc;				\
188 	ldx	[scr1 + MMFSA_I_ADDR], scr2;			\
189 	ldx	[scr1 + MMFSA_I_CTX], itagacc;			\
190 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundry */ \
191 	cmp	itagacc, USER_CONTEXT_TYPE;			\
192 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
193 	movgu	%icc, USER_CONTEXT_TYPE, itagacc;		\
194 	or	scr2, itagacc, itagacc
195 
196 /*
197  * Synthesize/get MMU data fault address from the MMU fault area
198  *
199  * In:
200  *   daddr, scr1 = scratch registers
201  * Out:
202  *   daddr = MMU data fault address
203  */
204 #define	GET_MMU_D_ADDR(daddr, scr1)				\
205 	MMU_FAULT_STATUS_AREA(scr1);				\
206 	ldx	[scr1 + MMFSA_D_ADDR], daddr
207 
208 /*
209  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
210  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
211  * (1) for invalid context, and USER_CONTEXT (2) for user context.
212  *
213  * In:
214  *   tagacc, ctxtype = scratch registers
215  * Out:
216  *   tagacc = MMU instruction tag access register value
217  *   ctxtype = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
218  */
219 #define	GET_MMU_I_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
220 	MMU_FAULT_STATUS_AREA(ctxtype);					\
221 	ldx	[ctxtype + MMFSA_I_ADDR], ptagacc;			\
222 	ldx	[ctxtype + MMFSA_I_CTX], ctxtype;			\
223 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
224 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
225 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
226 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
227 	or	ptagacc, ctxtype, ptagacc
228 
229 /*
230  * Load ITLB entry
231  *
232  * In:
233  *   tte = reg containing tte
234  *   scr1, scr2, scr3, scr4 = scratch registers
235  */
236 #define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
237 	mov	%o0, scr1;				\
238 	mov	%o1, scr2;				\
239 	mov	%o2, scr3;				\
240 	mov	%o3, scr4;				\
241 	MMU_FAULT_STATUS_AREA(%o2);			\
242 	ldx	[%o2 + MMFSA_I_ADDR], %o0;		\
243 	ldx	[%o2 + MMFSA_I_CTX], %o1;		\
244 	mov	tte, %o2;				\
245 	mov	MAP_ITLB, %o3;				\
246 	ta	MMU_MAP_ADDR;				\
247 	/* BEGIN CSTYLED */				\
248 	brnz,a,pn %o0, ptl1_panic;			\
249 	  mov	PTL1_BAD_HCALL, %g1;			\
250 	/* END CSTYLED */				\
251 	mov	scr1, %o0;				\
252 	mov	scr2, %o1;				\
253 	mov	scr3, %o2;				\
254 	mov	scr4, %o3
255 
256 /*
257  * Load DTLB entry
258  *
259  * In:
260  *   tte = reg containing tte
261  *   scr1, scr2, scr3, scr4 = scratch registers
262  */
263 #define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
264 	mov	%o0, scr1;				\
265 	mov	%o1, scr2;				\
266 	mov	%o2, scr3;				\
267 	mov	%o3, scr4;				\
268 	MMU_FAULT_STATUS_AREA(%o2);			\
269 	ldx	[%o2 + MMFSA_D_ADDR], %o0;		\
270 	ldx	[%o2 + MMFSA_D_CTX], %o1;		\
271 	mov	tte, %o2;				\
272 	mov	MAP_DTLB, %o3;				\
273 	ta	MMU_MAP_ADDR;				\
274 	/* BEGIN CSTYLED */				\
275 	brnz,a,pn %o0, ptl1_panic;			\
276 	  mov	PTL1_BAD_HCALL, %g1;			\
277 	/* END CSTYLED */				\
278 	mov	scr1, %o0;				\
279 	mov	scr2, %o1;				\
280 	mov	scr3, %o2;				\
281 	mov	scr4, %o3
282 
283 /*
284  * Returns PFN given the TTE and vaddr
285  *
286  * In:
287  *   tte = reg containing tte
288  *   vaddr = reg containing vaddr
289  *   scr1, scr2, scr3 = scratch registers
290  * Out:
291  *   tte = PFN value
292  */
293 #define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
294 	and	tte, TTE_SZ_BITS, scr1;		/* scr1 = ttesz */	\
295 	sllx	tte, TTE_PA_LSHIFT, tte;				\
296 	sllx	scr1, 1, scr2;						\
297 	add	scr2, scr1, scr2;		/* mulx 3 */		\
298 	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
299 	/* CSTYLED */							\
300 	brz,pt	scr2, label/**/1;					\
301 	srlx	tte, scr3, tte;						\
302 	sllx	tte, scr2, tte;						\
303 	set	1, scr1;						\
304 	add	scr2, MMU_PAGESHIFT, scr3;				\
305 	sllx	scr1, scr3, scr1;					\
306 	sub	scr1, 1, scr1;	/* scr1=TTE_PAGE_OFFSET(ttesz) */	\
307 	and	vaddr, scr1, scr2;					\
308 	srln	scr2, MMU_PAGESHIFT, scr2;				\
309 	or	tte, scr2, tte;						\
310 	/* CSTYLED */							\
311 label/**/1:
312 
313 /*
314  * Support for non-coherent I$.
315  *
316  * In sun4v we use tte bit 3 as a software flag indicating whether
317  * execute permission is given. IMMU miss traps cause the real execute
318  * permission to be set. sfmmu_ttesync() will see if execute permission
319  * has been set, and then set P_EXEC in page_t. This causes I-cache
320  * flush when the page is freed.
321  *
322  * However, the hypervisor reserves bit 3 as part of a 4-bit page size.
323  * We allow this flag to be set in hme TTE, but never in TSB or TLB.
324  */
325 #define	TTE_CLR_SOFTEXEC_ML(tte)	bclr TTE_SOFTEXEC_INT, tte
326 #define	TTE_CHK_SOFTEXEC_ML(tte)	andcc tte, TTE_SOFTEXEC_INT, %g0
327 
328 /*
329  * TTE_SET_EXEC_ML is a macro that updates the exec bit if it is
330  * not already set. Will also set reference bit at the same time.
331  *
332  * Caller must check EXECPRM. Do not call if it is already set in the tte.
333  *
334  * Parameters:
335  * tte      = reg containing tte
336  * ttepa    = physical pointer to tte
337  * tmp1     = tmp reg
338  * label    = temporary label
339  */
340 
341 #define	TTE_SET_EXEC_ML(tte, ttepa, tmp1, label)			\
342 	/* BEGIN CSTYLED */						\
343 	/* update execprm bit */					\
344 label/**/1:								\
345 	or	tte, (TTE_EXECPRM_INT | TTE_REF_INT), tmp1;		\
346 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update bits */	\
347 	cmp	tte, tmp1;						\
348 	bne,a,pn %xcc, label/**/1;					\
349 	  mov	tmp1, tte;						\
350 	or	tte, (TTE_EXECPRM_INT | TTE_REF_INT), tte;		\
351 	/* END CSTYLED */
352 
353 
354 /*
355  * TTE_SET_REF_ML is a macro that updates the reference bit if it is
356  * not already set.
357  *
358  * Parameters:
359  * tte      = reg containing tte
360  * ttepa    = physical pointer to tte
361  * tteva    = virtual ptr to tte
362  * tsbarea  = tsb miss area
363  * tmp1     = tmp reg
364  * label    = temporary label
365  */
366 
367 #define	TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label)		\
368 	/* BEGIN CSTYLED */						\
369 	/* check reference bit */					\
370 	btst	TTE_REF_INT, tte;					\
371 	bnz,pt	%xcc, label/**/2;	/* if ref bit set-skip ahead */	\
372 	nop;								\
373 	/* update reference bit */					\
374 label/**/1:								\
375 	or	tte, TTE_REF_INT, tmp1;					\
376 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
377 	cmp	tte, tmp1;						\
378 	bne,a,pn %xcc, label/**/1;					\
379 	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
380 	or	tte, TTE_REF_INT, tte;					\
381 label/**/2:								\
382 	/* END CSTYLED */
383 
384 
385 /*
386  * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
387  * if not already set.
388  *
389  * Parameters:
390  * tte      = reg containing tte
391  * ttepa    = physical pointer to tte
392  * tteva    = virtual ptr to tte
393  * tsbarea  = tsb miss area
394  * tmp1     = tmp reg
395  * label    = temporary label
396  * exitlabel = label where to jump to if write perm bit not set.
397  */
398 
399 #define	TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label,	\
400 	exitlabel)							\
401 	/* BEGIN CSTYLED */						\
402 	/* check reference bit */					\
403 	btst	TTE_WRPRM_INT, tte;					\
404 	bz,pn	%xcc, exitlabel;	/* exit if wr_perm no set */	\
405 	  btst	TTE_HWWR_INT, tte;					\
406 	bnz,pn	%xcc, label/**/2;	/* nothing to do */		\
407 	  nop;								\
408 	/* update reference bit */					\
409 label/**/1:								\
410 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
411 	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
412 	cmp	tte, tmp1;						\
413 	bne,a,pn %xcc, label/**/1;					\
414 	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
415 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
416 label/**/2:								\
417 	/* END CSTYLED */
418 /*
419  * Get TSB base register from the scratchpad for
420  * shared contexts
421  *
422  * In:
423  *   tsbmiss = pointer to tsbmiss area
424  *   tsbmissoffset = offset to right tsb pointer
425  *   tsbreg = scratch
426  * Out:
427  *   tsbreg = tsbreg from the specified scratchpad register
428  */
429 #define	GET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg)		\
430 	ldx	[tsbmiss + tsbmissoffset], tsbreg
431 
432 
433 /*
434  * Get the location of the TSB entry in the first TSB to probe
435  *
436  * In:
437  *   tagacc = tag access register (not clobbered)
438  *   tsbe, tmp1, tmp2 = scratch registers
439  * Out:
440  *   tsbe = pointer to the tsbe in the 1st TSB
441  */
442 
443 #define	GET_1ST_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
444 	/* BEGIN CSTYLED */						\
445 	mov	SCRATCHPAD_UTSBREG1, tmp1				;\
446 	ldxa	[tmp1]ASI_SCRATCHPAD, tsbe	/* get tsbreg */	;\
447 	and	tsbe, TSB_SOFTSZ_MASK, tmp2	/* tmp2=szc */		;\
448 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe	/* tsbbase */		;\
449 	mov	TSB_ENTRIES(0), tmp1	/* nentries in TSB size 0 */	;\
450 	sllx	tmp1, tmp2, tmp1	/* tmp1 = nentries in TSB */	;\
451 	sub	tmp1, 1, tmp1		/* mask = nentries - 1 */	;\
452 	srlx	tagacc, MMU_PAGESHIFT, tmp2 				;\
453 	and	tmp2, tmp1, tmp1	/* tsbent = virtpage & mask */	;\
454 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1	/* entry num --> ptr */	;\
455 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */ ;\
456 	/* END CSTYLED */
457 
458 
459 /*
460  * Will probe the first TSB, and if it finds a match, will insert it
461  * into the TLB and retry.
462  *
463  * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
464  * vpg_4m = 4M virtual page number for tag matching  (in, ro)
465  * label = where to branch to if this is a miss (text)
466  * %asi = atomic ASI to use for the TSB access
467  *
468  * For trapstat, we have to explicily use these registers.
469  * g4 = location tag will be retrieved into from TSB (out)
470  * g5 = location data(tte) will be retrieved into from TSB (out)
471  */
472 #define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
473 	/* BEGIN CSTYLED */						\
474 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
475 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
476 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
477 	  nop								;\
478 	brgez,pn %g5, label/**/1					;\
479 	  nop								;\
480 	TT_TRACE(trace_tsbhit)						;\
481 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
482 	/* trapstat expects tte in %g5 */				;\
483 	retry				/* retry faulted instruction */	;\
484 label/**/1:								\
485 	/* END CSTYLED */
486 
487 
488 /*
489  * Same as above, only if the TTE doesn't have the execute
490  * bit set, will branch to exec_fault directly.
491  */
492 #define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
493 	/* BEGIN CSTYLED */						\
494 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
495 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
496 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
497 	  nop								;\
498 	brgez,pn %g5, label/**/1					;\
499 	  nop								;\
500 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
501 	bz,pn	%icc, exec_fault					;\
502 	  nop								;\
503 	TT_TRACE(trace_tsbhit)						;\
504 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
505 	retry				/* retry faulted instruction */	;\
506 label/**/1:								\
507 	/* END CSTYLED */
508 
509 /*
510  * vpg_4m = 4M virtual page number for tag matching (in)
511  * tsbe_ptr = precomputed second TSB entry pointer (in)
512  * label = label to use to make branch targets unique (text)
513  *
514  * For trapstat, we have to explicity use these registers.
515  * g4 = tag portion of TSBE (out)
516  * g5 = data portion of TSBE (out)
517  */
518 #define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
519 	/* BEGIN CSTYLED */						\
520 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4  /* g4 = tag, g5 = data */ ;\
521 	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
522 	cmp	%g4, vpg_4m						;\
523 	bne,pn	%xcc, label/**/1					;\
524 	  nop								;\
525 	brgez,pn %g5, label/**/1					;\
526 	  nop								;\
527 	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
528 	TT_TRACE(trace_tsbhit)						;\
529 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
530 	/* trapstat expects tte in %g5 */				;\
531 	retry				/* retry faulted instruction */	;\
532 label/**/1:								\
533 	/* END CSTYLED */
534 
535 
536 /*
537  * Same as above, with the following additions:
538  * If the TTE found is not executable, branch directly
539  * to exec_fault.  If a TSB miss, branch to TSB miss handler.
540  */
541 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m)				\
542 	/* BEGIN CSTYLED */						\
543 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
544 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
545 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
546 	  nop								;\
547 	brgez,pn %g5, sfmmu_tsb_miss_tt					;\
548 	  nop								;\
549 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
550 	bz,pn	%icc, exec_fault					;\
551 	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
552 	TT_TRACE(trace_tsbhit)						;\
553 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
554 	retry				/* retry faulted instruction */	\
555 	/* END CSTYLED */
556 
557 /*
558  * 1. Get ctx1. The traptype is supplied by caller.
559  * 2. If iTSB miss, store in MMFSA_I_CTX
560  * 3. if dTSB miss, store in MMFSA_D_CTX
561  * 4. Thus the [D|I]TLB_STUFF will work as expected.
562  */
563 #define	SAVE_CTX1(traptype, ctx1, tmp, label)				\
564 	/* BEGIN CSTYLED */						\
565 	mov	MMU_SCONTEXT1, tmp					;\
566 	ldxa	[tmp]ASI_MMU_CTX, ctx1					;\
567 	MMU_FAULT_STATUS_AREA(tmp)					;\
568 	cmp     traptype, FAST_IMMU_MISS_TT				;\
569 	be,a,pn %icc, label						;\
570 	  stx	ctx1, [tmp + MMFSA_I_CTX] 				;\
571 	cmp     traptype, T_INSTR_MMU_MISS				;\
572 	be,a,pn %icc, label						;\
573 	  stx	ctx1, [tmp + MMFSA_I_CTX]				;\
574 	stx	ctx1, [tmp + MMFSA_D_CTX]				;\
575 label:
576 	/* END CSTYLED */
577 
578 #endif /* _ASM */
579 
580 #ifdef	__cplusplus
581 }
582 #endif
583 
584 #endif	/* _VM_MACH_SFMMU_H */
585