xref: /titanic_50/usr/src/uts/sun4v/vm/mach_sfmmu.h (revision 3eae19d9cf3390cf5b75e10c9c1945fd36ad856a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun reference mmu (sfmmu)
30  * specific hat data structures and the sfmmu specific hat procedures.
31  * The machine independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef _VM_MACH_SFMMU_H
35 #define	_VM_MACH_SFMMU_H
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #include <sys/x_call.h>
40 #include <sys/hypervisor_api.h>
41 
42 #ifdef	__cplusplus
43 extern "C" {
44 #endif
45 
46 /*
47  * Define UTSB_PHYS if user TSB is always accessed via physical address.
48  * On sun4v platform, user TSB is accessed via physical address.
49  */
50 #define	UTSB_PHYS	1
51 
52 /*
53  * Hypervisor TSB info
54  */
55 #define	NHV_TSB_INFO	4
56 
57 #ifndef _ASM
58 
59 struct hv_tsb_block {
60 	uint64_t	hv_tsb_info_pa;	/* hypervisor TSB info PA */
61 	uint64_t	hv_tsb_info_cnt; /* hypervisor TSB info count */
62 	hv_tsb_info_t	hv_tsb_info[NHV_TSB_INFO]; /* hypervisor TSB info */
63 };
64 
65 #endif /* _ASM */
66 
67 #ifdef _ASM
68 
69 /*
70  * This macro is used to set private/shared secondary context register in
71  * sfmmu_alloc_ctx().
72  * Input:
73  * cnum     = cnum
74  * is_shctx = sfmmu private/shared flag (0: private, 1: shared)
75  */
76 #define	SET_SECCTX(cnum, is_shctx, tmp1, tmp2)				\
77 	mov	MMU_SCONTEXT, tmp1;					\
78 	movrnz	is_shctx, MMU_SCONTEXT1, tmp1;				\
79 	sethi   %hi(FLUSH_ADDR), tmp2;					\
80 	stxa    cnum, [tmp1]ASI_MMU_CTX;  /* set 2nd ctx reg. */	\
81 	flush   tmp2;							\
82 
83 /*
84  * This macro is used in the MMU code to check if TL should be lowered from
85  * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
86  * for details.
87  */
88 
89 #define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
90 	rdpr	%tpc, scr1;					\
91 	sethi	%hi(KERNELBASE), scr2;				\
92 	or	scr2, %lo(KERNELBASE), scr2; 			\
93 	cmp	scr1, scr2; 					\
94 	bgeu	%xcc, 9f;					\
95 	nop;							\
96 	wrpr	%g0, 1, %gl;					\
97 	ba	label;						\
98 	wrpr	%g0, 1, %tl;					\
99 9:
100 
101 /*
102  * The following macros allow us to share majority of the
103  * SFMMU code between sun4u and sun4v platforms.
104  */
105 
106 #define	SETUP_TSB_ASI(qlp, tmp)
107 
108 #define	SETUP_UTSB_ATOMIC_ASI(tmp1, tmp2)
109 
110 /*
111  * Macro to swtich to alternate global register on sun4u platforms
112  * (not applicable to sun4v platforms)
113  */
114 #define	USE_ALTERNATE_GLOBALS(scr)
115 
116 /*
117  * Macro to set %gl register value on sun4v platforms
118  * (not applicable to sun4u platforms)
119  */
120 #define	SET_GL_REG(val)						\
121 	wrpr	%g0, val, %gl
122 
123 /*
124  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
125  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
126  * (1) for invalid context, and USER_CONTEXT (2) for user context.
127  *
128  * In:
129  *   tagacc, ctxtype = scratch registers
130  * Out:
131  *   tagacc = MMU data tag access register value
132  *   ctx = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
133  */
134 #define	GET_MMU_D_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
135 	MMU_FAULT_STATUS_AREA(ctxtype);					\
136 	ldx	[ctxtype + MMFSA_D_ADDR], ptagacc;			\
137 	ldx	[ctxtype + MMFSA_D_CTX], ctxtype;			\
138 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
139 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
140 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
141 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
142 	or	ptagacc, ctxtype, ptagacc
143 
144 /*
145  * Synthesize/get data tag access register value from the MMU fault area
146  *
147  * In:
148  *   tagacc, scr1 = scratch registers
149  * Out:
150  *   tagacc = MMU data tag access register value
151  */
152 #define	GET_MMU_D_TAGACC(tagacc, scr1)				\
153 	GET_MMU_D_PTAGACC_CTXTYPE(tagacc, scr1)
154 
155 /*
156  * Synthesize/get data tag target register value from the MMU fault area
157  *
158  * In:
159  *   ttarget, scr1 = scratch registers
160  * Out:
161  *   ttarget = MMU data tag target register value
162  */
163 #define	GET_MMU_D_TTARGET(ttarget, scr1)			\
164 	MMU_FAULT_STATUS_AREA(ttarget);				\
165 	ldx	[ttarget + MMFSA_D_CTX], scr1;			\
166 	sllx	scr1, TTARGET_CTX_SHIFT, scr1;			\
167 	ldx	[ttarget + MMFSA_D_ADDR], ttarget;		\
168 	srlx	ttarget, TTARGET_VA_SHIFT, ttarget;		\
169 	or	ttarget, scr1, ttarget
170 
171 /*
172  * Synthesize/get data/instruction psuedo tag access register values
173  * from the MMU fault area (context is 0 for kernel, 1 for invalid, 2 for user)
174  *
175  * In:
176  *   dtagacc, itagacc, scr1, scr2 = scratch registers
177  * Out:
178  *   dtagacc = MMU data tag access register value w/psuedo-context
179  *   itagacc = MMU instruction tag access register value w/pseudo-context
180  */
181 #define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
182 	MMU_FAULT_STATUS_AREA(scr1);				\
183 	ldx	[scr1 + MMFSA_D_ADDR], scr2;			\
184 	ldx	[scr1 + MMFSA_D_CTX], dtagacc;			\
185 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundary */ \
186 	cmp	dtagacc, USER_CONTEXT_TYPE;			\
187 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
188 	movgu	%icc, USER_CONTEXT_TYPE, dtagacc;		\
189 	or	scr2, dtagacc, dtagacc;				\
190 	ldx	[scr1 + MMFSA_I_ADDR], scr2;			\
191 	ldx	[scr1 + MMFSA_I_CTX], itagacc;			\
192 	srlx	scr2, MMU_PAGESHIFT, scr2;	/* align to page boundry */ \
193 	cmp	itagacc, USER_CONTEXT_TYPE;			\
194 	sllx	scr2, MMU_PAGESHIFT, scr2;			\
195 	movgu	%icc, USER_CONTEXT_TYPE, itagacc;		\
196 	or	scr2, itagacc, itagacc
197 
198 /*
199  * Synthesize/get MMU data fault address from the MMU fault area
200  *
201  * In:
202  *   daddr, scr1 = scratch registers
203  * Out:
204  *   daddr = MMU data fault address
205  */
206 #define	GET_MMU_D_ADDR(daddr, scr1)				\
207 	MMU_FAULT_STATUS_AREA(scr1);				\
208 	ldx	[scr1 + MMFSA_D_ADDR], daddr
209 
210 /*
211  * Get pseudo-tagacc value and context from the MMU fault area.  Pseudo-tagacc
212  * is the faulting virtual address OR'd with 0 for KCONTEXT, INVALID_CONTEXT
213  * (1) for invalid context, and USER_CONTEXT (2) for user context.
214  *
215  * In:
216  *   tagacc, ctxtype = scratch registers
217  * Out:
218  *   tagacc = MMU instruction tag access register value
219  *   ctxtype = context type (KCONTEXT, INVALID_CONTEXT or USER_CONTEXT)
220  */
221 #define	GET_MMU_I_PTAGACC_CTXTYPE(ptagacc, ctxtype)			\
222 	MMU_FAULT_STATUS_AREA(ctxtype);					\
223 	ldx	[ctxtype + MMFSA_I_ADDR], ptagacc;			\
224 	ldx	[ctxtype + MMFSA_I_CTX], ctxtype;			\
225 	srlx	ptagacc, MMU_PAGESHIFT, ptagacc; /* align to page boundary */ \
226 	cmp	ctxtype, USER_CONTEXT_TYPE;				\
227 	sllx	ptagacc, MMU_PAGESHIFT, ptagacc;			\
228 	movgu	%icc, USER_CONTEXT_TYPE, ctxtype;			\
229 	or	ptagacc, ctxtype, ptagacc
230 
231 /*
232  * Load ITLB entry
233  *
234  * In:
235  *   tte = reg containing tte
236  *   scr1, scr2, scr3, scr4 = scratch registers
237  */
238 #define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
239 	mov	%o0, scr1;				\
240 	mov	%o1, scr2;				\
241 	mov	%o2, scr3;				\
242 	mov	%o3, scr4;				\
243 	MMU_FAULT_STATUS_AREA(%o2);			\
244 	ldx	[%o2 + MMFSA_I_ADDR], %o0;		\
245 	ldx	[%o2 + MMFSA_I_CTX], %o1;		\
246 	mov	tte, %o2;				\
247 	mov	MAP_ITLB, %o3;				\
248 	ta	MMU_MAP_ADDR;				\
249 	/* BEGIN CSTYLED */				\
250 	brnz,a,pn %o0, ptl1_panic;			\
251 	  mov	PTL1_BAD_HCALL, %g1;			\
252 	/* END CSTYLED */				\
253 	mov	scr1, %o0;				\
254 	mov	scr2, %o1;				\
255 	mov	scr3, %o2;				\
256 	mov	scr4, %o3
257 
258 /*
259  * Load DTLB entry
260  *
261  * In:
262  *   tte = reg containing tte
263  *   scr1, scr2, scr3, scr4 = scratch registers
264  */
265 #define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)		\
266 	mov	%o0, scr1;				\
267 	mov	%o1, scr2;				\
268 	mov	%o2, scr3;				\
269 	mov	%o3, scr4;				\
270 	MMU_FAULT_STATUS_AREA(%o2);			\
271 	ldx	[%o2 + MMFSA_D_ADDR], %o0;		\
272 	ldx	[%o2 + MMFSA_D_CTX], %o1;		\
273 	mov	tte, %o2;				\
274 	mov	MAP_DTLB, %o3;				\
275 	ta	MMU_MAP_ADDR;				\
276 	/* BEGIN CSTYLED */				\
277 	brnz,a,pn %o0, ptl1_panic;			\
278 	  mov	PTL1_BAD_HCALL, %g1;			\
279 	/* END CSTYLED */				\
280 	mov	scr1, %o0;				\
281 	mov	scr2, %o1;				\
282 	mov	scr3, %o2;				\
283 	mov	scr4, %o3
284 
285 /*
286  * Returns PFN given the TTE and vaddr
287  *
288  * In:
289  *   tte = reg containing tte
290  *   vaddr = reg containing vaddr
291  *   scr1, scr2, scr3 = scratch registers
292  * Out:
293  *   tte = PFN value
294  */
295 #define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
296 	and	tte, TTE_SZ_BITS, scr1;		/* scr1 = ttesz */	\
297 	sllx	tte, TTE_PA_LSHIFT, tte;				\
298 	sllx	scr1, 1, scr2;						\
299 	add	scr2, scr1, scr2;		/* mulx 3 */		\
300 	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
301 	/* CSTYLED */							\
302 	brz,pt	scr2, label/**/1;					\
303 	srlx	tte, scr3, tte;						\
304 	sllx	tte, scr2, tte;						\
305 	set	1, scr1;						\
306 	add	scr2, MMU_PAGESHIFT, scr3;				\
307 	sllx	scr1, scr3, scr1;					\
308 	sub	scr1, 1, scr1;	/* scr1=TTE_PAGE_OFFSET(ttesz) */	\
309 	and	vaddr, scr1, scr2;					\
310 	srln	scr2, MMU_PAGESHIFT, scr2;				\
311 	or	tte, scr2, tte;						\
312 	/* CSTYLED */							\
313 label/**/1:
314 
315 /*
316  * TTE_SET_REF_ML is a macro that updates the reference bit if it is
317  * not already set.
318  *
319  * Parameters:
320  * tte      = reg containing tte
321  * ttepa    = physical pointer to tte
322  * tteva    = virtual ptr to tte
323  * tsbarea  = tsb miss area
324  * tmp1     = tmp reg
325  * label    = temporary label
326  */
327 
328 #define	TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label)		\
329 	/* BEGIN CSTYLED */						\
330 	/* check reference bit */					\
331 	btst	TTE_REF_INT, tte;					\
332 	bnz,pt	%xcc, label/**/2;	/* if ref bit set-skip ahead */	\
333 	nop;								\
334 	/* update reference bit */					\
335 label/**/1:								\
336 	or	tte, TTE_REF_INT, tmp1;					\
337 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
338 	cmp	tte, tmp1;						\
339 	bne,a,pn %xcc, label/**/1;					\
340 	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
341 	or	tte, TTE_REF_INT, tte;					\
342 label/**/2:								\
343 	/* END CSTYLED */
344 
345 
346 /*
347  * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
348  * if not already set.
349  *
350  * Parameters:
351  * tte      = reg containing tte
352  * ttepa    = physical pointer to tte
353  * tteva    = virtual ptr to tte
354  * tsbarea  = tsb miss area
355  * tmp1     = tmp reg
356  * label    = temporary label
357  * exitlabel = label where to jump to if write perm bit not set.
358  */
359 
360 #define	TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label,	\
361 	exitlabel)							\
362 	/* BEGIN CSTYLED */						\
363 	/* check reference bit */					\
364 	btst	TTE_WRPRM_INT, tte;					\
365 	bz,pn	%xcc, exitlabel;	/* exit if wr_perm no set */	\
366 	  btst	TTE_HWWR_INT, tte;					\
367 	bnz,pn	%xcc, label/**/2;	/* nothing to do */		\
368 	  nop;								\
369 	/* update reference bit */					\
370 label/**/1:								\
371 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
372 	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
373 	cmp	tte, tmp1;						\
374 	bne,a,pn %xcc, label/**/1;					\
375 	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
376 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
377 label/**/2:								\
378 	/* END CSTYLED */
379 
380 
381 /*
382  * Synthesize a TSB base register contents for a process.
383  *
384  * In:
385  *   tsbinfo = TSB info pointer (ro)
386  *   tsbreg, tmp1 = scratch registers
387  * Out:
388  *   tsbreg = value to program into TSB base register
389  */
390 
391 #define	MAKE_UTSBREG(tsbinfo, tsbreg, tmp1)			\
392 	ldx	[tsbinfo + TSBINFO_PADDR], tsbreg;		\
393 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1;		\
394 	and	tmp1, TSB_SOFTSZ_MASK, tmp1;			\
395 	or	tsbreg, tmp1, tsbreg;
396 
397 
398 /*
399  * Load TSB base register into a dedicated scratchpad register
400  * for private contexts.
401  * Load TSB base register to TSBMISS area for shared contexts.
402  * This register contains utsb_pabase in bits 63:13, and TSB size
403  * code in bits 2:0.
404  *
405  * For private context
406  * In:
407  *   tsbreg = value to load (ro)
408  *   regnum = constant or register
409  *   tmp1 = scratch register
410  * Out:
411  *   Specified scratchpad register updated
412  *
413  */
414 #define	SET_UTSBREG(regnum, tsbreg, tmp1)				\
415 	mov	regnum, tmp1;						\
416 	stxa	tsbreg, [tmp1]ASI_SCRATCHPAD	/* save tsbreg */
417 
418 /*
419  * Load TSB base register to TSBMISS area for shared contexts.
420  * This register contains utsb_pabase in bits 63:13, and TSB size
421  * code in bits 2:0.
422  *
423  * In:
424  *   tsbmiss = pointer to tsbmiss area
425  *   tsbmissoffset = offset to right tsb pointer
426  *   tsbreg = value to load (ro)
427  * Out:
428  *   Specified tsbmiss area updated
429  *
430  */
431 #define	SET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg)		\
432 	stx	tsbreg, [tsbmiss + tsbmissoffset]	/* save tsbreg */
433 
434 /*
435  * Get TSB base register from the scratchpad for
436  * private contexts
437  *
438  * In:
439  *   regnum = constant or register
440  *   tsbreg = scratch
441  * Out:
442  *   tsbreg = tsbreg from the specified scratchpad register
443  */
444 #define	GET_UTSBREG(regnum, tsbreg)					\
445 	mov	regnum, tsbreg;						\
446 	ldxa	[tsbreg]ASI_SCRATCHPAD, tsbreg
447 
448 /*
449  * Get TSB base register from the scratchpad for
450  * shared contexts
451  *
452  * In:
453  *   tsbmiss = pointer to tsbmiss area
454  *   tsbmissoffset = offset to right tsb pointer
455  *   tsbreg = scratch
456  * Out:
457  *   tsbreg = tsbreg from the specified scratchpad register
458  */
459 #define	GET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg)		\
460 	ldx	[tsbmiss + tsbmissoffset], tsbreg
461 
462 
463 /*
464  * Get the location of the TSB entry in the first TSB to probe
465  *
466  * In:
467  *   tagacc = tag access register (not clobbered)
468  *   tsbe, tmp1, tmp2 = scratch registers
469  * Out:
470  *   tsbe = pointer to the tsbe in the 1st TSB
471  */
472 
473 #define	GET_1ST_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
474 	/* BEGIN CSTYLED */						\
475 	mov	SCRATCHPAD_UTSBREG1, tmp1				;\
476 	ldxa	[tmp1]ASI_SCRATCHPAD, tsbe	/* get tsbreg */	;\
477 	and	tsbe, TSB_SOFTSZ_MASK, tmp2	/* tmp2=szc */		;\
478 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe	/* tsbbase */		;\
479 	mov	TSB_ENTRIES(0), tmp1	/* nentries in TSB size 0 */	;\
480 	sllx	tmp1, tmp2, tmp1	/* tmp1 = nentries in TSB */	;\
481 	sub	tmp1, 1, tmp1		/* mask = nentries - 1 */	;\
482 	srlx	tagacc, MMU_PAGESHIFT, tmp2 				;\
483 	and	tmp2, tmp1, tmp1	/* tsbent = virtpage & mask */	;\
484 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1	/* entry num --> ptr */	;\
485 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */ ;\
486 	/* END CSTYLED */
487 
488 
489 /*
490  * Will probe the first TSB, and if it finds a match, will insert it
491  * into the TLB and retry.
492  *
493  * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
494  * vpg_4m = 4M virtual page number for tag matching  (in, ro)
495  * label = where to branch to if this is a miss (text)
496  * %asi = atomic ASI to use for the TSB access
497  *
498  * For trapstat, we have to explicily use these registers.
499  * g4 = location tag will be retrieved into from TSB (out)
500  * g5 = location data(tte) will be retrieved into from TSB (out)
501  */
502 #define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
503 	/* BEGIN CSTYLED */						\
504 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
505 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
506 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
507 	  nop								;\
508 	brgez,pn %g5, label/**/1					;\
509 	  nop								;\
510 	TT_TRACE(trace_tsbhit)						;\
511 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
512 	/* trapstat expects tte in %g5 */				;\
513 	retry				/* retry faulted instruction */	;\
514 label/**/1:								\
515 	/* END CSTYLED */
516 
517 
518 /*
519  * Same as above, only if the TTE doesn't have the execute
520  * bit set, will branch to exec_fault directly.
521  */
522 #define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
523 	/* BEGIN CSTYLED */						\
524 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
525 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
526 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
527 	  nop								;\
528 	brgez,pn %g5, label/**/1					;\
529 	  nop								;\
530 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
531 	bz,pn	%icc, exec_fault					;\
532 	  nop								;\
533 	TT_TRACE(trace_tsbhit)						;\
534 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
535 	retry				/* retry faulted instruction */	;\
536 label/**/1:								\
537 	/* END CSTYLED */
538 
539 
540 /*
541  * Get the location in the 2nd TSB of the tsbe for this fault.
542  * Assumes that the second TSB only contains 4M mappings.
543  *
544  * In:
545  *   tagacc = tag access register (not clobbered)
546  *   tsbe, tmp1, tmp2 = scratch registers
547  * Out:
548  *   tsbe = pointer to the tsbe in the 2nd TSB
549  */
550 
551 #define	GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
552 	mov	SCRATCHPAD_UTSBREG2, tmp1;				\
553 	ldxa	[tmp1]ASI_SCRATCHPAD, tsbe;	/* get tsbreg */	\
554 	and	tsbe, TSB_SOFTSZ_MASK, tmp2;	/* tmp2=szc */		\
555 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe;	/* tsbbase */		\
556 	mov	TSB_ENTRIES(0), tmp1;	/* nentries in TSB size 0 */	\
557 	sllx	tmp1, tmp2, tmp1;	/* tmp1 = nentries in TSB */	\
558 	sub	tmp1, 1, tmp1;		/* mask = nentries - 1 */	\
559 	srlx	tagacc, MMU_PAGESHIFT4M, tmp2; 				\
560 	and	tmp2, tmp1, tmp1;	/* tsbent = virtpage & mask */	\
561 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1;	/* entry num --> ptr */	\
562 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */
563 
564 
565 /*
566  * vpg_4m = 4M virtual page number for tag matching (in)
567  * tsbe_ptr = precomputed second TSB entry pointer (in)
568  * label = label to use to make branch targets unique (text)
569  *
570  * For trapstat, we have to explicity use these registers.
571  * g4 = tag portion of TSBE (out)
572  * g5 = data portion of TSBE (out)
573  */
574 #define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
575 	/* BEGIN CSTYLED */						\
576 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4  /* g4 = tag, g5 = data */ ;\
577 	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
578 	cmp	%g4, vpg_4m						;\
579 	bne,pn	%xcc, label/**/1					;\
580 	  nop								;\
581 	brgez,pn %g5, label/**/1					;\
582 	  nop								;\
583 	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
584 	TT_TRACE(trace_tsbhit)						;\
585 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
586 	/* trapstat expects tte in %g5 */				;\
587 	retry				/* retry faulted instruction */	;\
588 label/**/1:								\
589 	/* END CSTYLED */
590 
591 
592 /*
593  * Same as above, with the following additions:
594  * If the TTE found is not executable, branch directly
595  * to exec_fault.  If a TSB miss, branch to TSB miss handler.
596  */
597 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m)				\
598 	/* BEGIN CSTYLED */						\
599 	ldda	[tsbe_ptr]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
600 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
601 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
602 	  nop								;\
603 	brgez,pn %g5, sfmmu_tsb_miss_tt					;\
604 	  nop								;\
605 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
606 	bz,pn	%icc, exec_fault					;\
607 	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
608 	TT_TRACE(trace_tsbhit)						;\
609 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
610 	retry				/* retry faulted instruction */	\
611 	/* END CSTYLED */
612 
613 
614 /*
615  * Get the location in the 3rd TSB of the tsbe for this fault.
616  * The 3rd TSB corresponds to the shared context, and is used
617  * for 8K - 512k pages.
618  *
619  * In:
620  *   tagacc = tag access register (not clobbered)
621  *   tsbe   = TSB base register
622  *   tmp1, tmp2 = scratch registers
623  * Out:
624  *   tsbe = pointer to the tsbe in the 3rd TSB
625  */
626 #define	GET_3RD_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)		\
627 	and	tsbe, TSB_SOFTSZ_MASK, tmp2;	/* tmp2=szc */		\
628 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe;	/* tsbbase */		\
629 	mov	TSB_ENTRIES(0), tmp1;	/* nentries in TSB size 0 */	\
630 	sllx	tmp1, tmp2, tmp1;	/* tmp1 = nentries in TSB */	\
631 	sub	tmp1, 1, tmp1;		/* mask = nentries - 1 */	\
632 	srlx	tagacc, MMU_PAGESHIFT, tmp2; 				\
633 	and	tmp2, tmp1, tmp1;	/* tsbent = virtpage & mask */	\
634 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1;	/* entry num --> ptr */	\
635 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */
636 
637 
638 /*
639  * Get the location in the 4th TSB of the tsbe for this fault.
640  * The 4th TSB is for the shared context. It is used for 4M - 256M pages.
641  *
642  * In:
643  *   tagacc = tag access register (not clobbered)
644  *   tsbe   = TSB base register
645  *   tmp1, tmp2 = scratch registers
646  * Out:
647  *   tsbe = pointer to the tsbe in the 4th TSB
648  */
649 #define	GET_4TH_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)		\
650 	and	tsbe, TSB_SOFTSZ_MASK, tmp2;	/* tmp2=szc */		\
651 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe;	/* tsbbase */		\
652 	mov	TSB_ENTRIES(0), tmp1;	/* nentries in TSB size 0 */	\
653 	sllx	tmp1, tmp2, tmp1;	/* tmp1 = nentries in TSB */	\
654 	sub	tmp1, 1, tmp1;		/* mask = nentries - 1 */	\
655 	srlx	tagacc, MMU_PAGESHIFT4M, tmp2; 				\
656 	and	tmp2, tmp1, tmp1;	/* tsbent = virtpage & mask */	\
657 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1;	/* entry num --> ptr */	\
658 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */
659 
660 /*
661  * Copy the sfmmu_region_map or scd_region_map to the tsbmiss
662  * shmermap or scd_shmermap, from sfmmu_load_mmustate.
663  */
664 #define	SET_REGION_MAP(rgn_map, tsbmiss_map, cnt, tmp, label)		\
665 	/* BEGIN CSTYLED */						\
666 label:									;\
667 	ldx	[rgn_map], tmp						;\
668 	dec	cnt							;\
669 	add	rgn_map, CLONGSIZE, rgn_map				;\
670 	stx	tmp, [tsbmiss_map]					;\
671 	brnz,pt	cnt, label						;\
672 	  add	tsbmiss_map, CLONGSIZE, tsbmiss_map			\
673 	/* END CSTYLED */
674 
675 /*
676  * If there is no scd, then zero the tsbmiss scd_shmermap,
677  * from sfmmu_load_mmustate.
678  */
679 #define	ZERO_REGION_MAP(tsbmiss_map, cnt, label)			\
680 	/* BEGIN CSTYLED */						\
681 label:									;\
682 	dec	cnt							;\
683 	stx	%g0, [tsbmiss_map]					;\
684 	brnz,pt	cnt, label						;\
685 	  add	tsbmiss_map, CLONGSIZE, tsbmiss_map			\
686 	/* END CSTYLED */
687 
688 /*
689  * Set hmemisc to 1 if the shared hme is also part of an scd.
690  * In:
691  *   tsbarea = tsbmiss area (not clobbered)
692  *   hmeblkpa  = hmeblkpa +  hmentoff + SFHME_TTE (not clobbered)
693  *   hmentoff = hmentoff + SFHME_TTE = tte offset(clobbered)
694  * Out:
695  *   use_shctx = 1 if shme is in scd and 0 otherwise
696  */
697 #define	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hmentoff, use_shctx)		      \
698 	/* BEGIN CSTYLED */						      \
699 	sub	hmeblkpa, hmentoff, hmentoff	/* hmentofff = hmeblkpa */   ;\
700 	add	hmentoff, HMEBLK_TAG, hmentoff				     ;\
701 	ldxa	[hmentoff]ASI_MEM, hmentoff	/* read 1st part of tag */   ;\
702 	and	hmentoff, HTAG_RID_MASK, hmentoff	/* mask off rid */   ;\
703 	and	hmentoff, BT_ULMASK, use_shctx	/* mask bit index */	     ;\
704 	srlx	hmentoff, BT_ULSHIFT, hmentoff	/* extract word */	     ;\
705 	sllx	hmentoff, CLONGSHIFT, hmentoff	/* index */		     ;\
706 	add	tsbarea, hmentoff, hmentoff		/* add to tsbarea */ ;\
707 	ldx	[hmentoff + TSBMISS_SCDSHMERMAP], hmentoff	/* scdrgn */ ;\
708 	srlx	hmentoff, use_shctx, use_shctx				     ;\
709 	and	use_shctx, 0x1, use_shctx      				      \
710 	/* END CSTYLED */
711 
712 /*
713  * 1. Get ctx1. The traptype is supplied by caller.
714  * 2. If iTSB miss, store in MMFSA_I_CTX
715  * 3. if dTSB miss, store in MMFSA_D_CTX
716  * 4. Thus the [D|I]TLB_STUFF will work as expected.
717  */
718 #define	SAVE_CTX1(traptype, ctx1, tmp, label)				\
719 	/* BEGIN CSTYLED */						\
720 	mov	MMU_SCONTEXT1, tmp					;\
721 	ldxa	[tmp]ASI_MMU_CTX, ctx1					;\
722 	MMU_FAULT_STATUS_AREA(tmp)					;\
723 	cmp     traptype, FAST_IMMU_MISS_TT				;\
724 	be,a,pn %icc, label						;\
725 	  stx	ctx1, [tmp + MMFSA_I_CTX] 				;\
726 	cmp     traptype, T_INSTR_MMU_MISS				;\
727 	be,a,pn %icc, label						;\
728 	  stx	ctx1, [tmp + MMFSA_I_CTX]				;\
729 	stx	ctx1, [tmp + MMFSA_D_CTX]				;\
730 label:
731 	/* END CSTYLED */
732 
733 #endif /* _ASM */
734 
735 #ifdef	__cplusplus
736 }
737 #endif
738 
739 #endif	/* _VM_MACH_SFMMU_H */
740