xref: /illumos-gate/usr/src/uts/sun4v/vm/mach_sfmmu.h (revision 8548bf79039833dba8615afdf63258b2cb122121)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun reference mmu (sfmmu)
30  * specific hat data structures and the sfmmu specific hat procedures.
31  * The machine independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef _VM_MACH_SFMMU_H
35 #define	_VM_MACH_SFMMU_H
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #include <sys/x_call.h>
40 #include <sys/hypervisor_api.h>
41 
42 #ifdef	__cplusplus
43 extern "C" {
44 #endif
45 
46 /*
47  * Define UTSB_PHYS if user TSB is always accessed via physical address.
48  * On sun4v platform, user TSB is accessed via physical address.
49  */
50 #define	UTSB_PHYS	1
51 
52 /*
53  * Hypervisor TSB info
54  */
55 #define	NHV_TSB_INFO	2
56 
57 #ifndef _ASM
58 
59 struct hv_tsb_block {
60 	uint64_t	hv_tsb_info_pa;	/* hypervisor TSB info PA */
61 	uint64_t	hv_tsb_info_cnt; /* hypervisor TSB info count */
62 	hv_tsb_info_t	hv_tsb_info[NHV_TSB_INFO]; /* hypervisor TSB info */
63 };
64 
65 #endif /* _ASM */
66 
67 #ifdef _ASM
68 
69 /*
70  * This macro is used in the MMU code to check if TL should be lowered from
71  * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
72  * for details.
73  */
74 
75 #define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
76 	rdpr	%tpc, scr1;					\
77 	sethi	%hi(KERNELBASE), scr2;				\
78 	or	scr2, %lo(KERNELBASE), scr2; 			\
79 	cmp	scr1, scr2; 					\
80 	bgeu	%xcc, 9f;					\
81 	nop;							\
82 	wrpr	%g0, 1, %gl;					\
83 	ba	label;						\
84 	wrpr	%g0, 1, %tl;					\
85 9:
86 
87 /*
88  * The following macros allow us to share majority of the
89  * SFMMU code between sun4u and sun4v platforms.
90  */
91 
92 #define	SETUP_TSB_ASI(qlp, tmp)
93 
94 #define	SETUP_UTSB_ATOMIC_ASI(tmp1, tmp2)
95 
96 /*
97  * Macro to swtich to alternate global register on sun4u platforms
98  * (not applicable to sun4v platforms)
99  */
100 #define	USE_ALTERNATE_GLOBALS(scr)
101 
102 /*
103  * Macro to set %gl register value on sun4v platforms
104  * (not applicable to sun4u platforms)
105  */
106 #define	SET_GL_REG(val)						\
107 	wrpr	%g0, val, %gl
108 
109 /*
110  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
111  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
112  * (1) for invalid context, and USER_CONTEXT (2) for user context.
113  *
114  * In:
115  *   tagacc, ctxtype = scratch registers
116  * Out:
117  *   tagacc = MMU data tag access register value
118  *   ctx = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
119  */
120 #define	GET_MMU_D_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
121 	MMU_FAULT_STATUS_AREA(ctxtype);					\
122 	ldx	[ctxtype + MMFSA_D_ADDR], ptagacc;			\
123 	ldx	[ctxtype + MMFSA_D_CTX], ctxtype;			\
124 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
125 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
126 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
127 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
128 	or	ptagacc, ctxtype, ptagacc
129 
130 /*
131  * Synthesize/get data tag access register value from the MMU fault area
132  *
133  * In:
134  *   tagacc, scr1 = scratch registers
135  * Out:
136  *   tagacc = MMU data tag access register value
137  */
138 #define	GET_MMU_D_TAGACC(tagacc, scr1)				\
139 	GET_MMU_D_PTAGACC_CTXTYPE(tagacc, scr1)
140 
141 /*
142  * Synthesize/get data tag target register value from the MMU fault area
143  *
144  * In:
145  *   ttarget, scr1 = scratch registers
146  * Out:
147  *   ttarget = MMU data tag target register value
148  */
149 #define	GET_MMU_D_TTARGET(ttarget, scr1)			\
150 	MMU_FAULT_STATUS_AREA(ttarget);				\
151 	ldx	[ttarget + MMFSA_D_CTX], scr1;			\
152 	sllx	scr1, TTARGET_CTX_SHIFT, scr1;			\
153 	ldx	[ttarget + MMFSA_D_ADDR], ttarget;		\
154 	srlx	ttarget, TTARGET_VA_SHIFT, ttarget;		\
155 	or	ttarget, scr1, ttarget
156 
157 /*
158  * Synthesize/get data/instruction psuedo tag access register values
159  * from the MMU fault area (context is 0 for kernel, 1 for invalid, 2 for user)
160  *
161  * In:
162  *   dtagacc, itagacc, scr1, scr2 = scratch registers
163  * Out:
164  *   dtagacc = MMU data tag access register value w/psuedo-context
165  *   itagacc = MMU instruction tag access register value w/pseudo-context
166  */
167 #define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
168 	MMU_FAULT_STATUS_AREA(scr1);				\
169 	ldx	[scr1 + MMFSA_D_ADDR], scr2;			\
170 	ldx	[scr1 + MMFSA_D_CTX], dtagacc;			\
171 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundary */ \
172 	cmp	dtagacc, USER_CONTEXT_TYPE;			\
173 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
174 	movgu	%icc, USER_CONTEXT_TYPE, dtagacc;		\
175 	or	scr2, dtagacc, dtagacc;				\
176 	ldx	[scr1 + MMFSA_I_ADDR], scr2;			\
177 	ldx	[scr1 + MMFSA_I_CTX], itagacc;			\
178 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundry */ \
179 	cmp	itagacc, USER_CONTEXT_TYPE;			\
180 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
181 	movgu	%icc, USER_CONTEXT_TYPE, itagacc;		\
182 	or	scr2, itagacc, itagacc
183 
184 /*
185  * Synthesize/get MMU data fault address from the MMU fault area
186  *
187  * In:
188  *   daddr, scr1 = scratch registers
189  * Out:
190  *   daddr = MMU data fault address
191  */
192 #define	GET_MMU_D_ADDR(daddr, scr1)				\
193 	MMU_FAULT_STATUS_AREA(scr1);				\
194 	ldx	[scr1 + MMFSA_D_ADDR], daddr
195 
196 /*
197  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
198  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
199  * (1) for invalid context, and USER_CONTEXT (2) for user context.
200  *
201  * In:
202  *   tagacc, ctxtype = scratch registers
203  * Out:
204  *   tagacc = MMU instruction tag access register value
205  *   ctxtype = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
206  */
207 #define	GET_MMU_I_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
208 	MMU_FAULT_STATUS_AREA(ctxtype);					\
209 	ldx	[ctxtype + MMFSA_I_ADDR], ptagacc;			\
210 	ldx	[ctxtype + MMFSA_I_CTX], ctxtype;			\
211 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
212 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
213 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
214 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
215 	or	ptagacc, ctxtype, ptagacc
216 
217 /*
218  * Load ITLB entry
219  *
220  * In:
221  *   tte = reg containing tte
222  *   scr1, scr2, scr3, scr4 = scratch registers
223  */
224 #define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
225 	mov	%o0, scr1;				\
226 	mov	%o1, scr2;				\
227 	mov	%o2, scr3;				\
228 	mov	%o3, scr4;				\
229 	MMU_FAULT_STATUS_AREA(%o2);			\
230 	ldx	[%o2 + MMFSA_I_ADDR], %o0;		\
231 	ldx	[%o2 + MMFSA_I_CTX], %o1;		\
232 	mov	tte, %o2;				\
233 	mov	MAP_ITLB, %o3;				\
234 	ta	MMU_MAP_ADDR;				\
235 	/* BEGIN CSTYLED */				\
236 	brnz,a,pn %o0, ptl1_panic;			\
237 	  mov	PTL1_BAD_HCALL, %g1;			\
238 	/* END CSTYLED */				\
239 	mov	scr1, %o0;				\
240 	mov	scr2, %o1;				\
241 	mov	scr3, %o2;				\
242 	mov	scr4, %o3
243 
244 /*
245  * Load DTLB entry
246  *
247  * In:
248  *   tte = reg containing tte
249  *   scr1, scr2, scr3, scr4 = scratch registers
250  */
251 #define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
252 	mov	%o0, scr1;				\
253 	mov	%o1, scr2;				\
254 	mov	%o2, scr3;				\
255 	mov	%o3, scr4;				\
256 	MMU_FAULT_STATUS_AREA(%o2);			\
257 	ldx	[%o2 + MMFSA_D_ADDR], %o0;		\
258 	ldx	[%o2 + MMFSA_D_CTX], %o1;		\
259 	mov	tte, %o2;				\
260 	mov	MAP_DTLB, %o3;				\
261 	ta	MMU_MAP_ADDR;				\
262 	/* BEGIN CSTYLED */				\
263 	brnz,a,pn %o0, ptl1_panic;			\
264 	  mov	PTL1_BAD_HCALL, %g1;			\
265 	/* END CSTYLED */				\
266 	mov	scr1, %o0;				\
267 	mov	scr2, %o1;				\
268 	mov	scr3, %o2;				\
269 	mov	scr4, %o3
270 
271 /*
272  * Returns PFN given the TTE and vaddr
273  *
274  * In:
275  *   tte = reg containing tte
276  *   vaddr = reg containing vaddr
277  *   scr1, scr2, scr3 = scratch registers
278  * Out:
279  *   tte = PFN value
280  */
281 #define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
282 	and	tte, TTE_SZ_BITS, scr1;		/* scr1 = ttesz */	\
283 	sllx	tte, TTE_PA_LSHIFT, tte;				\
284 	sllx	scr1, 1, scr2;						\
285 	add	scr2, scr1, scr2;		/* mulx 3 */		\
286 	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
287 	/* CSTYLED */							\
288 	brz,pt	scr2, label/**/1;					\
289 	srlx	tte, scr3, tte;						\
290 	sllx	tte, scr2, tte;						\
291 	set	1, scr1;						\
292 	add	scr2, MMU_PAGESHIFT, scr3;				\
293 	sllx	scr1, scr3, scr1;					\
294 	sub	scr1, 1, scr1;	/* scr1=TTE_PAGE_OFFSET(ttesz) */	\
295 	and	vaddr, scr1, scr2;					\
296 	srln	scr2, MMU_PAGESHIFT, scr2;				\
297 	or	tte, scr2, tte;						\
298 	/* CSTYLED */							\
299 label/**/1:
300 
301 /*
302  * TTE_SET_REF_ML is a macro that updates the reference bit if it is
303  * not already set.
304  *
305  * Parameters:
306  * tte      = reg containing tte
307  * ttepa    = physical pointer to tte
308  * tteva    = virtual ptr to tte
309  * tsbarea  = tsb miss area
310  * tmp1     = tmp reg
311  * label    = temporary label
312  */
313 
314 #define	TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label)		\
315 	/* BEGIN CSTYLED */						\
316 	/* check reference bit */					\
317 	btst	TTE_REF_INT, tte;					\
318 	bnz,pt	%xcc, label/**/2;	/* if ref bit set-skip ahead */	\
319 	nop;								\
320 	/* update reference bit */					\
321 label/**/1:								\
322 	or	tte, TTE_REF_INT, tmp1;					\
323 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
324 	cmp	tte, tmp1;						\
325 	bne,a,pn %xcc, label/**/1;					\
326 	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
327 	or	tte, TTE_REF_INT, tte;					\
328 label/**/2:								\
329 	/* END CSTYLED */
330 
331 
332 /*
333  * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
334  * if not already set.
335  *
336  * Parameters:
337  * tte      = reg containing tte
338  * ttepa    = physical pointer to tte
339  * tteva    = virtual ptr to tte
340  * tsbarea  = tsb miss area
341  * tmp1     = tmp reg
342  * label    = temporary label
343  * exitlabel = label where to jump to if write perm bit not set.
344  */
345 
346 #define	TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label,	\
347 	exitlabel)							\
348 	/* BEGIN CSTYLED */						\
349 	/* check reference bit */					\
350 	btst	TTE_WRPRM_INT, tte;					\
351 	bz,pn	%xcc, exitlabel;	/* exit if wr_perm no set */	\
352 	  btst	TTE_HWWR_INT, tte;					\
353 	bnz,pn	%xcc, label/**/2;	/* nothing to do */		\
354 	  nop;								\
355 	/* update reference bit */					\
356 label/**/1:								\
357 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
358 	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
359 	cmp	tte, tmp1;						\
360 	bne,a,pn %xcc, label/**/1;					\
361 	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
362 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
363 label/**/2:								\
364 	/* END CSTYLED */
365 
366 
367 /*
368  * Synthesize a TSB base register contents for a process.
369  *
370  * In:
371  *   tsbinfo = TSB info pointer (ro)
372  *   tsbreg, tmp1 = scratch registers
373  * Out:
374  *   tsbreg = value to program into TSB base register
375  */
376 
377 #define	MAKE_UTSBREG(tsbinfo, tsbreg, tmp1)			\
378 	ldx	[tsbinfo + TSBINFO_PADDR], tsbreg;		\
379 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1;		\
380 	and	tmp1, TSB_SOFTSZ_MASK, tmp1;			\
381 	or	tsbreg, tmp1, tsbreg;				\
382 
383 
384 /*
385  * Load TSB base register into a dedicated scratchpad register.
386  * This register contains utsb_pabase in bits 63:13, and TSB size
387  * code in bits 2:0.
388  *
389  * In:
390  *   tsbreg = value to load (ro)
391  *   regnum = constant or register
392  *   tmp1 = scratch register
393  * Out:
394  *   Specified scratchpad register updated
395  *
396  */
397 #define	SET_UTSBREG(regnum, tsbreg, tmp1)				\
398 	mov	regnum, tmp1;						\
399 	stxa	tsbreg, [tmp1]ASI_SCRATCHPAD	/* save tsbreg */
400 
401 /*
402  * Get TSB base register from the scratchpad
403  *
404  * In:
405  *   regnum = constant or register
406  *   tsbreg = scratch
407  * Out:
408  *   tsbreg = tsbreg from the specified scratchpad register
409  */
410 #define	GET_UTSBREG(regnum, tsbreg)					\
411 	mov	regnum, tsbreg;						\
412 	ldxa	[tsbreg]ASI_SCRATCHPAD, tsbreg
413 
414 
415 /*
416  * Get the location of the TSB entry in the first TSB to probe
417  *
418  * In:
419  *   tagacc = tag access register (not clobbered)
420  *   tsbe, tmp1, tmp2 = scratch registers
421  * Out:
422  *   tsbe = pointer to the tsbe in the 1st TSB
423  */
424 
425 #define	GET_1ST_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
426 	/* BEGIN CSTYLED */						\
427 	mov	SCRATCHPAD_UTSBREG1, tmp1				;\
428 	ldxa	[tmp1]ASI_SCRATCHPAD, tsbe	/* get tsbreg */	;\
429 	and	tsbe, TSB_SOFTSZ_MASK, tmp2	/* tmp2=szc */		;\
430 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe	/* tsbbase */		;\
431 	mov	TSB_ENTRIES(0), tmp1	/* nentries in TSB size 0 */	;\
432 	sllx	tmp1, tmp2, tmp1	/* tmp1 = nentries in TSB */	;\
433 	sub	tmp1, 1, tmp1		/* mask = nentries - 1 */	;\
434 	srlx	tagacc, MMU_PAGESHIFT, tmp2 				;\
435 	and	tmp2, tmp1, tmp1	/* tsbent = virtpage & mask */	;\
436 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1	/* entry num --> ptr */	;\
437 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */ ;\
438 	/* END CSTYLED */
439 
440 
441 /*
442  * Will probe the first TSB, and if it finds a match, will insert it
443  * into the TLB and retry.
444  *
445  * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
446  * vpg_4m = 4M virtual page number for tag matching  (in, ro)
447  * label = where to branch to if this is a miss (text)
448  * %asi = atomic ASI to use for the TSB access
449  *
450  * For trapstat, we have to explicily use these registers.
451  * g4 = location tag will be retrieved into from TSB (out)
452  * g5 = location data(tte) will be retrieved into from TSB (out)
453  */
454 #define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
455 	/* BEGIN CSTYLED */						\
456 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
457 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
458 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
459 	  nop								;\
460 	brgez,pn %g5, label/**/1					;\
461 	  nop								;\
462 	TT_TRACE(trace_tsbhit)						;\
463 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
464 	/* trapstat expects tte in %g5 */				;\
465 	retry				/* retry faulted instruction */	;\
466 label/**/1:								\
467 	/* END CSTYLED */
468 
469 
470 /*
471  * Same as above, only if the TTE doesn't have the execute
472  * bit set, will branch to exec_fault directly.
473  */
474 #define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
475 	/* BEGIN CSTYLED */						\
476 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
477 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
478 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
479 	  nop								;\
480 	brgez,pn %g5, label/**/1					;\
481 	  nop								;\
482 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
483 	bz,pn	%icc, exec_fault					;\
484 	  nop								;\
485 	TT_TRACE(trace_tsbhit)						;\
486 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
487 	retry				/* retry faulted instruction */	;\
488 label/**/1:								\
489 	/* END CSTYLED */
490 
491 
492 /*
493  * Get the location in the 2nd TSB of the tsbe for this fault.
494  * Assumes that the second TSB only contains 4M mappings.
495  *
496  * In:
497  *   tagacc = tag access register (not clobbered)
498  *   tsbe, tmp1, tmp2 = scratch registers
499  * Out:
500  *   tsbe = pointer to the tsbe in the 2nd TSB
501  */
502 
503 #define	GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
504 	mov	SCRATCHPAD_UTSBREG2, tmp1;				\
505 	ldxa	[tmp1]ASI_SCRATCHPAD, tsbe;	/* get tsbreg */	\
506 	and	tsbe, TSB_SOFTSZ_MASK, tmp2;	/* tmp2=szc */		\
507 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe;	/* tsbbase */		\
508 	mov	TSB_ENTRIES(0), tmp1;	/* nentries in TSB size 0 */	\
509 	sllx	tmp1, tmp2, tmp1;	/* tmp1 = nentries in TSB */	\
510 	sub	tmp1, 1, tmp1;		/* mask = nentries - 1 */	\
511 	srlx	tagacc, MMU_PAGESHIFT4M, tmp2; 				\
512 	and	tmp2, tmp1, tmp1;	/* tsbent = virtpage & mask */	\
513 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1;	/* entry num --> ptr */	\
514 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */
515 
516 
517 /*
518  * vpg_4m = 4M virtual page number for tag matching (in)
519  * tsbe_ptr = precomputed second TSB entry pointer (in)
520  * label = label to use to make branch targets unique (text)
521  *
522  * For trapstat, we have to explicity use these registers.
523  * g4 = tag portion of TSBE (out)
524  * g5 = data portion of TSBE (out)
525  */
526 #define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
527 	/* BEGIN CSTYLED */						\
528 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4  /* g4 = tag, g5 = data */ ;\
529 	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
530 	cmp	%g4, vpg_4m						;\
531 	bne,pn	%xcc, label/**/1					;\
532 	  nop								;\
533 	brgez,pn %g5, label/**/1					;\
534 	  nop								;\
535 	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
536 	TT_TRACE(trace_tsbhit)						;\
537 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
538 	/* trapstat expects tte in %g5 */				;\
539 	retry				/* retry faulted instruction */	;\
540 label/**/1:								\
541 	/* END CSTYLED */
542 
543 
544 /*
545  * Same as above, with the following additions:
546  * If the TTE found is not executable, branch directly
547  * to exec_fault.  If a TSB miss, branch to TSB miss handler.
548  */
549 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m)				\
550 	/* BEGIN CSTYLED */						\
551 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
552 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
553 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
554 	  nop								;\
555 	brgez,pn %g5, sfmmu_tsb_miss_tt					;\
556 	  nop								;\
557 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
558 	bz,pn	%icc, exec_fault					;\
559 	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
560 	TT_TRACE(trace_tsbhit)						;\
561 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
562 	retry				/* retry faulted instruction */	\
563 	/* END CSTYLED */
564 
565 
566 #endif /* _ASM */
567 
568 #ifdef	__cplusplus
569 }
570 #endif
571 
572 #endif	/* _VM_MACH_SFMMU_H */
573