xref: /illumos-gate/usr/src/uts/sun4v/vm/mach_sfmmu.h (revision b31b5de1357c915fe7dab4d9646d9d84f9fe69bc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun reference mmu (sfmmu)
30  * specific hat data structures and the sfmmu specific hat procedures.
31  * The machine independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef _VM_MACH_SFMMU_H
35 #define	_VM_MACH_SFMMU_H
36 
37 #include <sys/x_call.h>
38 #include <sys/hypervisor_api.h>
39 #include <sys/mmu.h>
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 /*
46  * Define UTSB_PHYS if user TSB is always accessed via physical address.
47  * On sun4v platform, user TSB is accessed via physical address.
48  */
49 #define	UTSB_PHYS	1
50 
51 /*
52  * Hypervisor TSB info
53  */
54 #define	NHV_TSB_INFO	4
55 
56 #ifndef _ASM
57 
58 struct hv_tsb_block {
59 	uint64_t	hv_tsb_info_pa;	/* hypervisor TSB info PA */
60 	uint64_t	hv_tsb_info_cnt; /* hypervisor TSB info count */
61 	hv_tsb_info_t	hv_tsb_info[NHV_TSB_INFO]; /* hypervisor TSB info */
62 };
63 
64 /*
65  * Defines for hypervisor pagesize search API.
66  */
67 
68 #define	TLB_PGSZ_ENABLE_SHIFT	15
69 #define	TLB_PGSZ_CTX_SHIFT	7
70 #define	TLB_PGSZ_ENABLE		(1<<TLB_PGSZ_ENABLE_SHIFT)
71 #define	TLB_PGSZ_CONTEXT1	(1<<TLB_PGSZ_CTX_SHIFT)
72 #define	TLB_PGSZ_CONTEXT1_ENABLE (TLB_PGSZ_ENABLE|TLB_PGSZ_CONTEXT1)
73 
74 struct hv_pgsz_order {
75 	uint64_t hv_pgsz_order_pa;	/* hypervisor pagesize order PA */
76 					/* hypervisor pagesize order */
77 	uint16_t hv_pgsz_order[MAX_PGSZ_SEARCH_ORDER];
78 };
79 
80 #define	sfmmu_pgsz_order_hv sfmmu_pgsz_order.hv_pgsz_order
81 
82 #endif /* _ASM */
83 
84 /* value for sfmmu_pgsz_map if all shared pagesizes are allowed */
85 #define	TLB_ALL_SHARED_PGSZ	0xff
86 
87 #ifdef _ASM
88 
89 /*
90  * This macro is used to set private/shared secondary context register in
91  * sfmmu_alloc_ctx().
92  * Input:
93  * cnum     = cnum
94  * is_shctx = sfmmu private/shared flag (0: private, 1: shared)
95  * tmp2 is only used in the sun4u version of this macro
96  */
97 #define	SET_SECCTX(cnum, is_shctx, tmp1, tmp2, label)			\
98 	mov	MMU_SCONTEXT, tmp1;					\
99 	movrnz	is_shctx, MMU_SCONTEXT1, tmp1;				\
100 	stxa    cnum, [tmp1]ASI_MMU_CTX;  /* set 2nd ctx reg. */	\
101 	membar  #Sync;							\
102 
103 /*
104  * This macro is used in the MMU code to check if TL should be lowered from
105  * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
106  * for details.
107  */
108 
109 #define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
110 	rdpr	%tpc, scr1;					\
111 	sethi	%hi(KERNELBASE), scr2;				\
112 	or	scr2, %lo(KERNELBASE), scr2; 			\
113 	cmp	scr1, scr2; 					\
114 	bgeu	%xcc, 9f;					\
115 	nop;							\
116 	wrpr	%g0, 1, %gl;					\
117 	ba	label;						\
118 	wrpr	%g0, 1, %tl;					\
119 9:
120 
121 /*
122  * The following macros allow us to share majority of the
123  * SFMMU code between sun4u and sun4v platforms.
124  */
125 
126 #define	SETUP_TSB_ASI(qlp, tmp)
127 
128 #define	SETUP_UTSB_ATOMIC_ASI(tmp1, tmp2)
129 
130 /*
131  * Macro to swtich to alternate global register on sun4u platforms
132  * (not applicable to sun4v platforms)
133  */
134 #define	USE_ALTERNATE_GLOBALS(scr)
135 
136 /*
137  * Macro to set %gl register value on sun4v platforms
138  * (not applicable to sun4u platforms)
139  */
140 #define	SET_GL_REG(val)						\
141 	wrpr	%g0, val, %gl
142 
143 /*
144  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
145  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
146  * (1) for invalid context, and USER_CONTEXT (2) for user context.
147  *
148  * In:
149  *   tagacc, ctxtype = scratch registers
150  * Out:
151  *   tagacc = MMU data tag access register value
152  *   ctx = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
153  */
154 #define	GET_MMU_D_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
155 	MMU_FAULT_STATUS_AREA(ctxtype);					\
156 	ldx	[ctxtype + MMFSA_D_ADDR], ptagacc;			\
157 	ldx	[ctxtype + MMFSA_D_CTX], ctxtype;			\
158 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
159 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
160 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
161 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
162 	or	ptagacc, ctxtype, ptagacc
163 
164 /*
165  * Synthesize/get data tag access register value from the MMU fault area
166  *
167  * In:
168  *   tagacc, scr1 = scratch registers
169  * Out:
170  *   tagacc = MMU data tag access register value
171  */
172 #define	GET_MMU_D_TAGACC(tagacc, scr1)				\
173 	GET_MMU_D_PTAGACC_CTXTYPE(tagacc, scr1)
174 
175 /*
176  * Synthesize/get data tag target register value from the MMU fault area
177  *
178  * In:
179  *   ttarget, scr1 = scratch registers
180  * Out:
181  *   ttarget = MMU data tag target register value
182  */
183 #define	GET_MMU_D_TTARGET(ttarget, scr1)			\
184 	MMU_FAULT_STATUS_AREA(ttarget);				\
185 	ldx	[ttarget + MMFSA_D_CTX], scr1;			\
186 	sllx	scr1, TTARGET_CTX_SHIFT, scr1;			\
187 	ldx	[ttarget + MMFSA_D_ADDR], ttarget;		\
188 	srlx	ttarget, TTARGET_VA_SHIFT, ttarget;		\
189 	or	ttarget, scr1, ttarget
190 
191 /*
192  * Synthesize/get data/instruction psuedo tag access register values
193  * from the MMU fault area (context is 0 for kernel, 1 for invalid, 2 for user)
194  *
195  * In:
196  *   dtagacc, itagacc, scr1, scr2 = scratch registers
197  * Out:
198  *   dtagacc = MMU data tag access register value w/psuedo-context
199  *   itagacc = MMU instruction tag access register value w/pseudo-context
200  */
201 #define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
202 	MMU_FAULT_STATUS_AREA(scr1);				\
203 	ldx	[scr1 + MMFSA_D_ADDR], scr2;			\
204 	ldx	[scr1 + MMFSA_D_CTX], dtagacc;			\
205 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundary */ \
206 	cmp	dtagacc, USER_CONTEXT_TYPE;			\
207 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
208 	movgu	%icc, USER_CONTEXT_TYPE, dtagacc;		\
209 	or	scr2, dtagacc, dtagacc;				\
210 	ldx	[scr1 + MMFSA_I_ADDR], scr2;			\
211 	ldx	[scr1 + MMFSA_I_CTX], itagacc;			\
212 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundry */ \
213 	cmp	itagacc, USER_CONTEXT_TYPE;			\
214 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
215 	movgu	%icc, USER_CONTEXT_TYPE, itagacc;		\
216 	or	scr2, itagacc, itagacc
217 
218 /*
219  * Synthesize/get MMU data fault address from the MMU fault area
220  *
221  * In:
222  *   daddr, scr1 = scratch registers
223  * Out:
224  *   daddr = MMU data fault address
225  */
226 #define	GET_MMU_D_ADDR(daddr, scr1)				\
227 	MMU_FAULT_STATUS_AREA(scr1);				\
228 	ldx	[scr1 + MMFSA_D_ADDR], daddr
229 
230 /*
231  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
232  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
233  * (1) for invalid context, and USER_CONTEXT (2) for user context.
234  *
235  * In:
236  *   tagacc, ctxtype = scratch registers
237  * Out:
238  *   tagacc = MMU instruction tag access register value
239  *   ctxtype = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
240  */
241 #define	GET_MMU_I_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
242 	MMU_FAULT_STATUS_AREA(ctxtype);					\
243 	ldx	[ctxtype + MMFSA_I_ADDR], ptagacc;			\
244 	ldx	[ctxtype + MMFSA_I_CTX], ctxtype;			\
245 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
246 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
247 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
248 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
249 	or	ptagacc, ctxtype, ptagacc
250 
251 /*
252  * Load ITLB entry
253  *
254  * In:
255  *   tte = reg containing tte
256  *   scr1, scr2, scr3, scr4 = scratch registers
257  */
258 #define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
259 	mov	%o0, scr1;				\
260 	mov	%o1, scr2;				\
261 	mov	%o2, scr3;				\
262 	mov	%o3, scr4;				\
263 	MMU_FAULT_STATUS_AREA(%o2);			\
264 	ldx	[%o2 + MMFSA_I_ADDR], %o0;		\
265 	ldx	[%o2 + MMFSA_I_CTX], %o1;		\
266 	mov	tte, %o2;				\
267 	mov	MAP_ITLB, %o3;				\
268 	ta	MMU_MAP_ADDR;				\
269 	/* BEGIN CSTYLED */				\
270 	brnz,a,pn %o0, ptl1_panic;			\
271 	  mov	PTL1_BAD_HCALL, %g1;			\
272 	/* END CSTYLED */				\
273 	mov	scr1, %o0;				\
274 	mov	scr2, %o1;				\
275 	mov	scr3, %o2;				\
276 	mov	scr4, %o3
277 
278 /*
279  * Load DTLB entry
280  *
281  * In:
282  *   tte = reg containing tte
283  *   scr1, scr2, scr3, scr4 = scratch registers
284  */
285 #define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
286 	mov	%o0, scr1;				\
287 	mov	%o1, scr2;				\
288 	mov	%o2, scr3;				\
289 	mov	%o3, scr4;				\
290 	MMU_FAULT_STATUS_AREA(%o2);			\
291 	ldx	[%o2 + MMFSA_D_ADDR], %o0;		\
292 	ldx	[%o2 + MMFSA_D_CTX], %o1;		\
293 	mov	tte, %o2;				\
294 	mov	MAP_DTLB, %o3;				\
295 	ta	MMU_MAP_ADDR;				\
296 	/* BEGIN CSTYLED */				\
297 	brnz,a,pn %o0, ptl1_panic;			\
298 	  mov	PTL1_BAD_HCALL, %g1;			\
299 	/* END CSTYLED */				\
300 	mov	scr1, %o0;				\
301 	mov	scr2, %o1;				\
302 	mov	scr3, %o2;				\
303 	mov	scr4, %o3
304 
305 /*
306  * Returns PFN given the TTE and vaddr
307  *
308  * In:
309  *   tte = reg containing tte
310  *   vaddr = reg containing vaddr
311  *   scr1, scr2, scr3 = scratch registers
312  * Out:
313  *   tte = PFN value
314  */
315 #define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
316 	and	tte, TTE_SZ_BITS, scr1;		/* scr1 = ttesz */	\
317 	sllx	tte, TTE_PA_LSHIFT, tte;				\
318 	sllx	scr1, 1, scr2;						\
319 	add	scr2, scr1, scr2;		/* mulx 3 */		\
320 	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
321 	/* CSTYLED */							\
322 	brz,pt	scr2, label/**/1;					\
323 	srlx	tte, scr3, tte;						\
324 	sllx	tte, scr2, tte;						\
325 	set	1, scr1;						\
326 	add	scr2, MMU_PAGESHIFT, scr3;				\
327 	sllx	scr1, scr3, scr1;					\
328 	sub	scr1, 1, scr1;	/* scr1=TTE_PAGE_OFFSET(ttesz) */	\
329 	and	vaddr, scr1, scr2;					\
330 	srln	scr2, MMU_PAGESHIFT, scr2;				\
331 	or	tte, scr2, tte;						\
332 	/* CSTYLED */							\
333 label/**/1:
334 
335 /*
336  * Support for non-coherent I$.
337  *
338  * In sun4v we use tte bit 3 as a software flag indicating whether
339  * execute permission is given. IMMU miss traps cause the real execute
340  * permission to be set. sfmmu_ttesync() will see if execute permission
341  * has been set, and then set P_EXEC in page_t. This causes I-cache
342  * flush when the page is freed.
343  *
344  * However, the hypervisor reserves bit 3 as part of a 4-bit page size.
345  * We allow this flag to be set in hme TTE, but never in TSB or TLB.
346  */
347 #define	TTE_CLR_SOFTEXEC_ML(tte)	bclr TTE_SOFTEXEC_INT, tte
348 #define	TTE_CHK_SOFTEXEC_ML(tte)	andcc tte, TTE_SOFTEXEC_INT, %g0
349 
350 /*
351  * TTE_SET_EXEC_ML is a macro that updates the exec bit if it is
352  * not already set. Will also set reference bit at the same time.
353  *
354  * Caller must check EXECPRM. Do not call if it is already set in the tte.
355  *
356  * Parameters:
357  * tte      = reg containing tte
358  * ttepa    = physical pointer to tte
359  * tmp1     = tmp reg
360  * label    = temporary label
361  */
362 
363 #define	TTE_SET_EXEC_ML(tte, ttepa, tmp1, label)			\
364 	/* BEGIN CSTYLED */						\
365 	/* update execprm bit */					\
366 label/**/1:								\
367 	or	tte, (TTE_EXECPRM_INT | TTE_REF_INT), tmp1;		\
368 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update bits */	\
369 	cmp	tte, tmp1;						\
370 	bne,a,pn %xcc, label/**/1;					\
371 	  mov	tmp1, tte;						\
372 	or	tte, (TTE_EXECPRM_INT | TTE_REF_INT), tte;		\
373 	/* END CSTYLED */
374 
375 
376 /*
377  * TTE_SET_REF_ML is a macro that updates the reference bit if it is
378  * not already set.
379  *
380  * Parameters:
381  * tte      = reg containing tte
382  * ttepa    = physical pointer to tte
383  * tsbarea  = tsb miss area
384  * tmp1     = tmp reg
385  * tmp2     = tmp reg
386  * label    = temporary label
387  */
388 
389 #define	TTE_SET_REF_ML(tte, ttepa, tsbarea, tmp1, tmp2, label)		\
390 	/* BEGIN CSTYLED */						\
391 	/* check reference bit */					\
392 	btst	TTE_REF_INT, tte;					\
393 	bnz,pt	%xcc, label/**/2;	/* if ref bit set-skip ahead */	\
394 	nop;								\
395 	/* update reference bit */					\
396 label/**/1:								\
397 	or	tte, TTE_REF_INT, tmp1;					\
398 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
399 	cmp	tte, tmp1;						\
400 	bne,a,pn %xcc, label/**/1;					\
401 	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
402 	or	tte, TTE_REF_INT, tte;					\
403 label/**/2:								\
404 	/* END CSTYLED */
405 
406 
407 /*
408  * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
409  * if not already set.
410  *
411  * Parameters:
412  * tte      = reg containing tte
413  * ttepa    = physical pointer to tte
414  * tsbarea  = tsb miss area
415  * tmp1     = tmp reg
416  * tmp2     = tmp reg
417  * label    = temporary label
418  * exitlabel = label where to jump to if write perm bit not set.
419  */
420 
421 #define	TTE_SET_REFMOD_ML(tte, ttepa, tsbarea, tmp1, tmp2, label,	\
422 	exitlabel)							\
423 	/* BEGIN CSTYLED */						\
424 	/* check reference bit */					\
425 	btst	TTE_WRPRM_INT, tte;					\
426 	bz,pn	%xcc, exitlabel;	/* exit if wr_perm no set */	\
427 	  btst	TTE_HWWR_INT, tte;					\
428 	bnz,pn	%xcc, label/**/2;	/* nothing to do */		\
429 	  nop;								\
430 	/* update reference bit */					\
431 label/**/1:								\
432 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
433 	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
434 	cmp	tte, tmp1;						\
435 	bne,a,pn %xcc, label/**/1;					\
436 	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
437 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
438 label/**/2:								\
439 	/* END CSTYLED */
440 /*
441  * Get TSB base register from the scratchpad for
442  * shared contexts
443  *
444  * In:
445  *   tsbmiss = pointer to tsbmiss area
446  *   tsbmissoffset = offset to right tsb pointer
447  *   tsbreg = scratch
448  * Out:
449  *   tsbreg = tsbreg from the specified scratchpad register
450  */
451 #define	GET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg)		\
452 	ldx	[tsbmiss + tsbmissoffset], tsbreg
453 
454 
455 /*
456  * Get the location of the TSB entry in the first TSB to probe
457  *
458  * In:
459  *   tagacc = tag access register (not clobbered)
460  *   tsbe, tmp1, tmp2 = scratch registers
461  * Out:
462  *   tsbe = pointer to the tsbe in the 1st TSB
463  */
464 
465 #define	GET_1ST_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
466 	/* BEGIN CSTYLED */						\
467 	mov	SCRATCHPAD_UTSBREG1, tmp1				;\
468 	ldxa	[tmp1]ASI_SCRATCHPAD, tsbe	/* get tsbreg */	;\
469 	and	tsbe, TSB_SOFTSZ_MASK, tmp2	/* tmp2=szc */		;\
470 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe	/* tsbbase */		;\
471 	mov	TSB_ENTRIES(0), tmp1	/* nentries in TSB size 0 */	;\
472 	sllx	tmp1, tmp2, tmp1	/* tmp1 = nentries in TSB */	;\
473 	sub	tmp1, 1, tmp1		/* mask = nentries - 1 */	;\
474 	srlx	tagacc, MMU_PAGESHIFT, tmp2 				;\
475 	and	tmp2, tmp1, tmp1	/* tsbent = virtpage & mask */	;\
476 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1	/* entry num --> ptr */	;\
477 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */ ;\
478 	/* END CSTYLED */
479 
480 
481 /*
482  * Will probe the first TSB, and if it finds a match, will insert it
483  * into the TLB and retry.
484  *
485  * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
486  * vpg_4m = 4M virtual page number for tag matching  (in, ro)
487  * label = where to branch to if this is a miss (text)
488  * %asi = atomic ASI to use for the TSB access
489  *
490  * For trapstat, we have to explicily use these registers.
491  * g4 = location tag will be retrieved into from TSB (out)
492  * g5 = location data(tte) will be retrieved into from TSB (out)
493  */
494 #define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
495 	/* BEGIN CSTYLED */						\
496 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
497 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
498 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
499 	  nop								;\
500 	brgez,pn %g5, label/**/1					;\
501 	  nop								;\
502 	TT_TRACE(trace_tsbhit)						;\
503 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
504 	/* trapstat expects tte in %g5 */				;\
505 	retry				/* retry faulted instruction */	;\
506 label/**/1:								\
507 	/* END CSTYLED */
508 
509 
510 /*
511  * Same as above, only if the TTE doesn't have the execute
512  * bit set, will branch to exec_fault directly.
513  */
514 #define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
515 	/* BEGIN CSTYLED */						\
516 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
517 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
518 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
519 	  nop								;\
520 	brgez,pn %g5, label/**/1					;\
521 	  nop								;\
522 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
523 	bz,pn	%icc, exec_fault					;\
524 	  nop								;\
525 	TT_TRACE(trace_tsbhit)						;\
526 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
527 	retry				/* retry faulted instruction */	;\
528 label/**/1:								\
529 	/* END CSTYLED */
530 
531 /*
532  * vpg_4m = 4M virtual page number for tag matching (in)
533  * tsbe_ptr = precomputed second TSB entry pointer (in)
534  * label = label to use to make branch targets unique (text)
535  *
536  * For trapstat, we have to explicity use these registers.
537  * g4 = tag portion of TSBE (out)
538  * g5 = data portion of TSBE (out)
539  */
540 #define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
541 	/* BEGIN CSTYLED */						\
542 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4  /* g4 = tag, g5 = data */ ;\
543 	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
544 	cmp	%g4, vpg_4m						;\
545 	bne,pn	%xcc, label/**/1					;\
546 	  nop								;\
547 	brgez,pn %g5, label/**/1					;\
548 	  nop								;\
549 	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
550 	TT_TRACE(trace_tsbhit)						;\
551 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
552 	/* trapstat expects tte in %g5 */				;\
553 	retry				/* retry faulted instruction */	;\
554 label/**/1:								\
555 	/* END CSTYLED */
556 
557 
558 /*
559  * Same as above, with the following additions:
560  * If the TTE found is not executable, branch directly
561  * to exec_fault.  If a TSB miss, branch to TSB miss handler.
562  */
563 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m)				\
564 	/* BEGIN CSTYLED */						\
565 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
566 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
567 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
568 	  nop								;\
569 	brgez,pn %g5, sfmmu_tsb_miss_tt					;\
570 	  nop								;\
571 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
572 	bz,pn	%icc, exec_fault					;\
573 	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
574 	TT_TRACE(trace_tsbhit)						;\
575 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
576 	retry				/* retry faulted instruction */	\
577 	/* END CSTYLED */
578 
579 /*
580  * 1. Get ctx1. The traptype is supplied by caller.
581  * 2. If iTSB miss, store in MMFSA_I_CTX
582  * 3. if dTSB miss, store in MMFSA_D_CTX
583  * 4. Thus the [D|I]TLB_STUFF will work as expected.
584  */
585 #define	SAVE_CTX1(traptype, ctx1, tmp, label)				\
586 	/* BEGIN CSTYLED */						\
587 	mov	MMU_SCONTEXT1, tmp					;\
588 	ldxa	[tmp]ASI_MMU_CTX, ctx1					;\
589 	MMU_FAULT_STATUS_AREA(tmp)					;\
590 	cmp     traptype, FAST_IMMU_MISS_TT				;\
591 	be,a,pn %icc, label						;\
592 	  stx	ctx1, [tmp + MMFSA_I_CTX] 				;\
593 	cmp     traptype, T_INSTR_MMU_MISS				;\
594 	be,a,pn %icc, label						;\
595 	  stx	ctx1, [tmp + MMFSA_I_CTX]				;\
596 	stx	ctx1, [tmp + MMFSA_D_CTX]				;\
597 label:
598 	/* END CSTYLED */
599 
600 /*
601  * For shared context mappings, check against the page size bitmap in the
602  * tsbmiss area to decide if we should use private mappings instead to reduce
603  * the number of shared page size searches on Rock based platforms.
604  * In:
605  *   tsbarea (not clobbered)
606  *   tte (not clobbered)
607  *   tmp (clobbered)
608  * Out:
609  *   use_shctx - changed to 0 if page size bit is not set in mask.
610  */
611 #define	CHECK_SHARED_PGSZ(tsbarea, tte, tmp, use_shctx, label)  \
612 	/* BEGIN CSTYLED */					     \
613 	brz     use_shctx, label/**/1				    ;\
614 	 and    tte, TTE_SZ_BITS, tmp			    	    ;\
615 	ldub    [tsbarea + TSBMISS_PGSZ_BITMAP], use_shctx	    ;\
616 	srlx    use_shctx, tmp, use_shctx			    ;\
617 	and     use_shctx, 0x1, use_shctx			    ;\
618 label/**/1:
619 	/* END CSTYLED */
620 
621 #endif /* _ASM */
622 
623 #ifdef	__cplusplus
624 }
625 #endif
626 
627 #endif	/* _VM_MACH_SFMMU_H */
628