xref: /titanic_44/usr/src/uts/sun4u/vm/mach_sfmmu.h (revision 98157a7002f4f2cf7978f3084ca5577f0a1d72b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun reference mmu (sfmmu)
30  * specific hat data structures and the sfmmu specific hat procedures.
31  * The machine independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef _VM_MACH_SFMMU_H
35 #define	_VM_MACH_SFMMU_H
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #include <sys/x_call.h>
40 #include <sys/cheetahregs.h>
41 #include <sys/spitregs.h>
42 #include <sys/opl_olympus_regs.h>
43 
44 #ifdef	__cplusplus
45 extern "C" {
46 #endif
47 
48 /*
49  * On sun4u platforms, user TSBs are accessed via virtual address by default.
50  * Platforms that support ASI_SCRATCHPAD registers can define UTSB_PHYS in the
51  * platform Makefile to access user TSBs via physical address but must also
52  * designate one ASI_SCRATCHPAD register to hold the second user TSB.  To
53  * designate the user TSB scratchpad register, platforms must provide a
54  * definition for SCRATCHPAD_UTSBREG below.
55  *
56  * Platforms that use UTSB_PHYS do not allocate 2 locked TLB entries to access
57  * the user TSBs.
58  */
59 #if defined(UTSB_PHYS)
60 
61 #if defined(_OPL)
62 #define	SCRATCHPAD_UTSBREG	OPL_SCRATCHPAD_UTSBREG4
63 #else
64 #error "Compiling UTSB_PHYS but no SCRATCHPAD_UTSBREG specified"
65 #endif
66 
67 #endif /* UTSB_PHYS */
68 
69 
70 #ifdef _ASM
71 
72 /*
73  * This macro is used to set private secondary context register in
74  * sfmmu_alloc_ctx().
75  * Input:
76  * cnum : cnum
77  * arg2 : unused
78  */
79 #define	SET_SECCTX(cnum, arg2, tmp1, tmp2)			\
80 	mov	MMU_SCONTEXT, tmp1;				\
81 	sethi	%hi(FLUSH_ADDR), tmp2;				\
82 	stxa	cnum, [tmp1]ASI_MMU_CTX;			\
83 	flush	tmp2
84 
85 /*
86  * This macro is used in the MMU code to check if TL should be lowered from
87  * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
88  * for details.
89  */
90 
91 #define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
92 	rdpr	%tpc, scr1;					\
93 	sethi	%hi(KERNELBASE), scr2;				\
94 	or	scr2, %lo(KERNELBASE), scr2; 			\
95 	cmp	scr1, scr2; 					\
96 	bgeu	%xcc, 9f;					\
97 	nop;							\
98 	ba	label;						\
99 	wrpr	%g0, 1, %tl;					\
100 9:
101 
102 
103 /*
104  * The following macros allow us to share majority of the
105  * SFMMU code between sun4u and sun4v platforms.
106  */
107 
108 #define	SETUP_TSB_ASI(qlp, tmp)					\
109 	movrz	qlp, ASI_N, tmp;				\
110 	movrnz	qlp, ASI_MEM, tmp;				\
111 	mov	tmp, %asi
112 
113 /*
114  * Macro to swtich to alternate global register on sun4u platforms
115  * (not applicable to sun4v platforms)
116  */
117 #define	USE_ALTERNATE_GLOBALS(scr)				\
118 	rdpr	%pstate, scr;					\
119 	wrpr	scr, PSTATE_MG | PSTATE_AG, %pstate
120 
121 /*
122  * Macro to set %gl register value on sun4v platforms
123  * (not applicable to sun4u platforms)
124  */
125 #define	SET_GL_REG(val)
126 
127 /*
128  * Get MMU data tag access register value
129  *
130  * In:
131  *   tagacc, scr1 = scratch registers
132  * Out:
133  *   tagacc = MMU data tag access register value
134  */
135 #define	GET_MMU_D_TAGACC(tagacc, scr1)				\
136 	mov	MMU_TAG_ACCESS, scr1;				\
137 	ldxa	[scr1]ASI_DMMU, tagacc
138 
139 /*
140  * Get MMU data tag target register
141  *
142  * In:
143  *   ttarget, scr1 = scratch registers
144  * Out:
145  *   ttarget = MMU data tag target register value
146  */
147 #define	GET_MMU_D_TTARGET(ttarget, scr1)			\
148 	ldxa	[%g0]ASI_DMMU, ttarget
149 
150 /*
151  * Get MMU data/instruction tag access register values
152  *
153  * In:
154  *   dtagacc, itagacc, scr1, scr2 = scratch registers
155  * Out:
156  *   dtagacc = MMU data tag access register value
157  *   itagacc = MMU instruction tag access register value
158  */
159 #define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
160 	mov	MMU_TAG_ACCESS, scr1;				\
161 	ldxa	[scr1]ASI_DMMU, dtagacc;			\
162 	ldxa	[scr1]ASI_IMMU, itagacc
163 
164 /*
165  * Get MMU data fault address from the tag access register
166  *
167  * In:
168  *   daddr, scr1 = scratch registers
169  * Out:
170  *   daddr = MMU data fault address
171  */
172 #define	GET_MMU_D_ADDR(daddr, scr1)				\
173 	mov	MMU_TAG_ACCESS, scr1;				\
174 	ldxa	[scr1]ASI_DMMU, daddr;				\
175 	set	TAGACC_CTX_MASK, scr1;				\
176 	andn	daddr, scr1, daddr
177 
178 
179 /*
180  * Load ITLB entry
181  *
182  * In:
183  *   tte = reg containing tte
184  *   scr1, scr2, scr3, scr4 = scratch registers (not used)
185  */
186 #define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)			\
187 	stxa	tte, [%g0]ASI_ITLB_IN
188 
189 /*
190  * Load DTLB entry
191  *
192  * In:
193  *   tte = reg containing tte
194  *   scr1, scr2, scr3, scr4 = scratch register (not used)
195  */
196 #define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)			\
197 	stxa	tte, [%g0]ASI_DTLB_IN
198 
199 
200 /*
201  * Returns PFN given the TTE and vaddr
202  *
203  * In:
204  *   tte = reg containing tte
205  *   vaddr = reg containing vaddr
206  *   scr1, scr2, scr3 = scratch registers
207  * Out:
208  *   tte = PFN value
209  */
210 #define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
211 	srlx	tte, TTE_SZ_SHFT, scr1;					\
212 	and	scr1, TTE_SZ_BITS, scr1;	/* scr1 = tte_size */	\
213 	srlx	tte, TTE_SZ2_SHFT, scr3;				\
214 	and	scr3, TTE_SZ2_BITS, scr3;	/* scr3 = tte_size2 */	\
215 	or	scr1, scr3, scr1;					\
216 	sllx	scr1, 1, scr2;						\
217 	add	scr2, scr1, scr2;		/* mulx 3 */		\
218 	sllx	tte, TTE_PA_LSHIFT, tte;				\
219 	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
220 	/* BEGIN CSTYLED */						\
221 	brz,pt	scr2, label/**/1;					\
222 	  srlx	tte, scr3, tte;						\
223 	/* END CSTYLED */						\
224 	sllx	tte, scr2, tte;						\
225 	set	1, scr1;						\
226 	add	scr2, MMU_PAGESHIFT, scr3;				\
227 	sllx	scr1, scr3, scr1;					\
228 	sub	scr1, 1, scr1;		/* g2=TTE_PAGE_OFFSET(ttesz) */	\
229 	and	vaddr, scr1, scr2;					\
230 	srln	scr2, MMU_PAGESHIFT, scr2;				\
231 	or	tte, scr2, tte;						\
232 	/* CSTYLED */							\
233 label/**/1:
234 
235 
236 /*
237  * TTE_SET_REF_ML is a macro that updates the reference bit if it is
238  * not already set.
239  *
240  * Parameters:
241  * tte      = reg containing tte
242  * ttepa    = physical pointer to tte
243  * tteva    = virtual ptr to tte
244  * tsbarea  = tsb miss area
245  * tmp1     = tmp reg
246  * label    = temporary label
247  */
248 
249 #define	TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label)		\
250 	/* BEGIN CSTYLED */						\
251 	/* check reference bit */					\
252 	andcc	tte, TTE_REF_INT, %g0;					\
253 	bnz,pt	%xcc, label/**/4;	/* if ref bit set-skip ahead */	\
254 	  nop;								\
255 	GET_CPU_IMPL(tmp1);						\
256 	cmp	tmp1, SPITFIRE_IMPL;					\
257 	blt	%icc, label/**/2;	/* skip flush if FJ-OPL cpus */	\
258 	cmp	tmp1, CHEETAH_IMPL;					\
259 	bl,a	%icc, label/**/1;					\
260 	/* update reference bit */					\
261 	lduh	[tsbarea + TSBMISS_DMASK], tmp1;			\
262 	stxa	%g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */	\
263 	membar	#Sync;							\
264 	ba	label/**/2;						\
265 label/**/1:								\
266 	and	tteva, tmp1, tmp1;					\
267 	stxa	%g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */	\
268 	membar	#Sync;							\
269 label/**/2:								\
270 	or	tte, TTE_REF_INT, tmp1;					\
271 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
272 	cmp	tte, tmp1;						\
273 	bne,a,pn %xcc, label/**/2;					\
274 	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
275 	or	tte, TTE_REF_INT, tte;					\
276 label/**/4:								\
277 	/* END CSTYLED */
278 
279 
280 /*
281  * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
282  * if not already set.
283  *
284  * Parameters:
285  * tte      = reg containing tte
286  * ttepa    = physical pointer to tte
287  * tteva    = virtual ptr to tte
288  * tsbarea  = tsb miss area
289  * tmp1     = tmp reg
290  * label    = temporary label
291  * exitlabel = label where to jump to if write perm bit not set.
292  */
293 
294 #define	TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label,	\
295 	exitlabel)							\
296 	/* BEGIN CSTYLED */						\
297 	/* check reference bit */					\
298 	andcc	tte, TTE_WRPRM_INT, %g0;				\
299 	bz,pn	%xcc, exitlabel;	/* exit if wr_perm not set */	\
300 	  nop;								\
301 	andcc	tte, TTE_HWWR_INT, %g0;					\
302 	bnz,pn	%xcc, label/**/4;	/* nothing to do */		\
303 	  nop;								\
304 	GET_CPU_IMPL(tmp1);						\
305 	cmp	tmp1, SPITFIRE_IMPL;					\
306 	blt	%icc, label/**/2;	/* skip flush if FJ-OPL cpus */	\
307 	cmp	tmp1, CHEETAH_IMPL;					\
308 	bl,a	%icc, label/**/1;					\
309 	/* update reference bit */					\
310 	lduh	[tsbarea + TSBMISS_DMASK], tmp1;			\
311 	stxa    %g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */ 	\
312 	membar	#Sync;							\
313 	ba	label/**/2;						\
314 label/**/1:								\
315 	and	tteva, tmp1, tmp1;					\
316 	stxa	%g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */	\
317 	membar	#Sync;							\
318 label/**/2:								\
319 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
320 	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
321 	cmp	tte, tmp1;						\
322 	bne,a,pn %xcc, label/**/2;					\
323 	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
324 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
325 label/**/4:								\
326 	/* END CSTYLED */
327 
328 
329 #ifndef UTSB_PHYS
330 
331 /*
332  * Synthesize TSB base register contents for a process with
333  * a single TSB.
334  *
335  * We patch the virtual address mask in at runtime since the
336  * number of significant virtual address bits in the TSB VA
337  * can vary depending upon the TSB slab size being used on the
338  * machine.
339  *
340  * In:
341  *   tsbinfo = TSB info pointer (ro)
342  *   vabase = value of utsb_vabase (ro)
343  * Out:
344  *   tsbreg = value to program into TSB base register
345  */
346 
347 #define	MAKE_TSBREG(tsbreg, tsbinfo, vabase, tmp1, tmp2, label)		\
348 	/* BEGIN CSTYLED */						\
349 	ldx	[tsbinfo + TSBINFO_VADDR], tmp1;			\
350 	.global	label/**/_tsbreg_vamask					;\
351 label/**/_tsbreg_vamask:						\
352 	or	%g0, RUNTIME_PATCH, tsbreg;				\
353 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp2;			\
354 	sllx	tsbreg, TSBREG_VAMASK_SHIFT, tsbreg;			\
355 	or	vabase, tmp2, tmp2;					\
356 	and	tmp1, tsbreg, tsbreg;					\
357 	or	tsbreg, tmp2, tsbreg;					\
358 	/* END CSTYLED */
359 
360 
361 /*
362  * Synthesize TSB base register contents for a process with
363  * two TSBs.  See hat_sfmmu.h for the layout of the TSB base
364  * register in this case.
365  *
366  * In:
367  *   tsb1 = pointer to first TSB info (ro)
368  *   tsb2 = pointer to second TSB info (ro)
369  * Out:
370  *   tsbreg = value to program into TSB base register
371  */
372 #define	MAKE_TSBREG_SECTSB(tsbreg, tsb1, tsb2, tmp1, tmp2, tmp3, label)	\
373 	/* BEGIN CSTYLED */						\
374 	set	TSBREG_MSB_CONST, tmp3					;\
375 	sllx	tmp3, TSBREG_MSB_SHIFT, tsbreg				;\
376 	.global	label/**/_tsbreg_vamask					;\
377 label/**/_tsbreg_vamask:						;\
378 	or	%g0, RUNTIME_PATCH, tmp3				;\
379 	sll	tmp3, TSBREG_VAMASK_SHIFT, tmp3				;\
380 	ldx	[tsb1 + TSBINFO_VADDR], tmp1				;\
381 	ldx	[tsb2 + TSBINFO_VADDR], tmp2				;\
382 	and	tmp1, tmp3, tmp1					;\
383 	and	tmp2, tmp3, tmp2					;\
384 	sllx	tmp2, TSBREG_SECTSB_MKSHIFT, tmp2			;\
385 	or	tmp1, tmp2, tmp3					;\
386 	or	tsbreg, tmp3, tsbreg					;\
387 	lduh	[tsb1 + TSBINFO_SZCODE], tmp1				;\
388 	lduh	[tsb2 + TSBINFO_SZCODE], tmp2				;\
389 	and	tmp1, TSB_SOFTSZ_MASK, tmp1				;\
390 	and	tmp2, TSB_SOFTSZ_MASK, tmp2				;\
391 	sllx	tmp2, TSBREG_SECSZ_SHIFT, tmp2				;\
392 	or	tmp1, tmp2, tmp3					;\
393 	or	tsbreg, tmp3, tsbreg					;\
394 	/* END CSTYLED */
395 
396 
397 /*
398  * Load the locked TSB TLB entry.
399  *
400  * In:
401  *   tsbinfo = tsb_info pointer as va (ro)
402  *   tteidx = shifted index into TLB to load the locked entry (ro)
403  *   va = virtual address at which to load the locked TSB entry (ro)
404  * Out:
405  * Scratch:
406  *   tmp
407  */
408 #define	LOAD_TSBTTE(tsbinfo, tteidx, va, tmp)				\
409 	mov	MMU_TAG_ACCESS, tmp;					\
410 	stxa	va, [tmp]ASI_DMMU;		/* set tag access */	\
411 	membar	#Sync;							\
412 	ldx	[tsbinfo + TSBINFO_TTE], tmp;	/* fetch locked tte */	\
413 	stxa	tmp, [tteidx]ASI_DTLB_ACCESS;	/* load locked tte */	\
414 	membar	#Sync
415 
416 
417 /*
418  * In the current implementation, TSBs usually come from physically
419  * contiguous chunks of memory up to 4MB in size, but 8K TSBs may be
420  * allocated from 8K chunks of memory under certain conditions.  To
421  * prevent aliasing in the virtual address cache when the TSB slab is
422  * 8K in size we must align the reserved (TL>0) TSB virtual address to
423  * have the same low-order bits as the kernel (TL=0) TSB virtual address,
424  * and map 8K TSBs with an 8K TTE.  In cases where the TSB reserved VA
425  * range is smaller than the assumed 4M we will patch the shift at
426  * runtime; otherwise we leave it alone (which is why RUNTIME_PATCH
427  * constant doesn't appear below).
428  *
429  * In:
430  *   tsbinfo (ro)
431  *   resva: reserved VA base for this TSB
432  * Out:
433  *   resva: corrected VA for this TSB
434  */
435 #define	RESV_OFFSET(tsbinfo, resva, tmp1, label)			\
436 	/* BEGIN CSTYLED */						\
437 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1			;\
438 	brgz,pn	tmp1, label/**/9	 				;\
439 	  nop								;\
440 	ldx	[tsbinfo + TSBINFO_VADDR], tmp1				;\
441 	.global	label/**/_resv_offset					;\
442 label/**/_resv_offset:							;\
443 	sllx	tmp1, (64 - MMU_PAGESHIFT4M), tmp1			;\
444 	srlx	tmp1, (64 - MMU_PAGESHIFT4M), tmp1			;\
445 	or	tmp1, resva, resva					;\
446 label/**/9:								\
447 	/* END CSTYLED */
448 
449 /*
450  * Determine the pointer of the entry in the first TSB to probe given
451  * the 8K TSB pointer register contents.
452  *
453  * In:
454  *   tsbp8k = 8K TSB pointer register (ro)
455  *   tmp = scratch register
456  *   label = label for hot patching of utsb_vabase
457  *
458  * Out: tsbe_ptr = TSB entry address
459  *
460  * Note: This function is patched at runtime for performance reasons.
461  *	 Any changes here require sfmmu_patch_utsb fixed.
462  */
463 
464 #define	GET_1ST_TSBE_PTR(tsbp8k, tsbe_ptr, tmp, label)			\
465 	/* BEGIN CSTYLED */						\
466 label/**/_get_1st_tsbe_ptr:						;\
467 	RUNTIME_PATCH_SETX(tsbe_ptr, tmp)				;\
468 	/* tsbeptr = contents of utsb_vabase */				;\
469 	/* clear upper bits leaving just bits 21:0 of TSB ptr. */	;\
470 	sllx	tsbp8k, TSBREG_FIRTSB_SHIFT, tmp			;\
471 	/* finish clear */						;\
472 	srlx	tmp, TSBREG_FIRTSB_SHIFT, tmp				;\
473 	/* or-in bits 41:22 of the VA to form the real pointer. */	;\
474 	or	tsbe_ptr, tmp, tsbe_ptr					\
475 	/* END CSTYLED */
476 
477 /*
478  * Determine the base address of the second TSB given the 8K TSB
479  * pointer register contents.
480  *
481  * In:
482  *   tsbp8k = 8K TSB pointer register (ro)
483  *   tmp = scratch register
484  *   label = label for hot patching of utsb_vabase
485  *
486  * Out:
487  *   tsbbase = TSB base address
488  *
489  * Note: This function is patched at runtime for performance reasons.
490  *	 Any changes here require sfmmu_patch_utsb fixed.
491  */
492 
493 #define	GET_2ND_TSB_BASE(tsbp8k, tsbbase, tmp, label)			\
494 	/* BEGIN CSTYLED */						\
495 label/**/_get_2nd_tsb_base:						;\
496 	RUNTIME_PATCH_SETX(tsbbase, tmp)				;\
497 	/* tsbbase = contents of utsb4m_vabase */			;\
498 	/* clear upper bits leaving just bits 21:xx of TSB addr. */	;\
499 	sllx	tsbp8k, TSBREG_SECTSB_LSHIFT, tmp			;\
500 	/* clear lower bits leaving just 21:13 in 8:0 */		;\
501 	srlx	tmp, (TSBREG_SECTSB_RSHIFT + MMU_PAGESHIFT), tmp	;\
502 	/* adjust TSB offset to bits 21:13 */				;\
503 	sllx	tmp, MMU_PAGESHIFT, tmp					;\
504 	or	tsbbase, tmp, tsbbase					;\
505 	/* END CSTYLED */
506 
507 /*
508  * Determine the size code of the second TSB given the 8K TSB
509  * pointer register contents.
510  *
511  * In:
512  *   tsbp8k = 8K TSB pointer register (ro)
513  * Out:
514  *   size = TSB size code
515  */
516 
517 #define	GET_2ND_TSB_SIZE(tsbp8k, size)					\
518 	srlx	tsbp8k, TSBREG_SECSZ_SHIFT, size;			\
519 	and	size, TSB_SOFTSZ_MASK, size
520 
521 /*
522  * Get the location in the 2nd TSB of the tsbe for this fault.
523  * Assumes that the second TSB only contains 4M mappings.
524  *
525  * In:
526  *   tagacc = tag access register (clobbered)
527  *   tsbp8k = contents of TSB8K pointer register (ro)
528  *   tmp1, tmp2 = scratch registers
529  *   label = label at which to patch in reserved TSB 4M VA range
530  * Out:
531  *   tsbe_ptr = pointer to the tsbe in the 2nd TSB
532  */
533 #define	GET_2ND_TSBE_PTR(tagacc, tsbp8k, tsbe_ptr, tmp1, tmp2, label)	\
534 	GET_2ND_TSB_BASE(tsbp8k, tsbe_ptr, tmp2, label);		\
535 	/* tsbe_ptr = TSB base address, tmp2 = junk */			\
536 	GET_2ND_TSB_SIZE(tsbp8k, tmp1);					\
537 	/* tmp1 = TSB size code */					\
538 	GET_TSBE_POINTER(MMU_PAGESHIFT4M, tsbe_ptr, tagacc, tmp1, tmp2)
539 
540 #endif /* UTSB_PHYS */
541 
542 
543 #ifdef UTSB_PHYS
544 
545 /*
546  * Synthesize a TSB base register contents for a process.
547  *
548  * In:
549  *   tsbinfo = TSB info pointer (ro)
550  *   tsbreg, tmp1 = scratch registers
551  * Out:
552  *   tsbreg = value to program into TSB base register
553  */
554 
555 #define	MAKE_UTSBREG_PHYS(tsbinfo, tsbreg, tmp1)			\
556 	ldx	[tsbinfo + TSBINFO_PADDR], tsbreg;		\
557 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1;		\
558 	and	tmp1, TSB_SOFTSZ_MASK, tmp1;			\
559 	or	tsbreg, tmp1, tsbreg;				\
560 
561 /*
562  * Load TSB base register into a dedicated scratchpad register.
563  * This register contains utsb_pabase in bits 63:13, and TSB size
564  * code in bits 2:0.
565  *
566  * In:
567  *   tsbreg = value to load (ro)
568  *   regnum = constant or register
569  *   tmp1 = scratch register
570  * Out:
571  *   Specified scratchpad register updated
572  *
573  * Note: If this is enabled on Panther, a membar #Sync is required
574  *	 following an ASI store to the scratchpad registers.
575  */
576 
577 #define	SET_UTSBREG(regnum, tsbreg, tmp1)				\
578 	mov	regnum, tmp1;						\
579 	stxa	tsbreg, [tmp1]ASI_SCRATCHPAD;	/* save tsbreg */	\
580 
581 /*
582  * Get TSB base register from the scratchpad
583  *
584  * In:
585  *   regnum = constant or register
586  *   tsbreg = scratch
587  * Out:
588  *   tsbreg = tsbreg from the specified scratchpad register
589  */
590 
591 #define	GET_UTSBREG(regnum, tsbreg)					\
592 	mov	regnum, tsbreg;						\
593 	ldxa	[tsbreg]ASI_SCRATCHPAD, tsbreg
594 
595 /*
596  * Determine the pointer of the entry in the first TSB to probe given
597  * the 8K TSB pointer register contents.
598  *
599  * In:
600  *   tagacc = tag access register
601  *   tsbe_ptr = 8K TSB pointer register
602  *   tmp = scratch registers
603  *
604  * Out: tsbe_ptr = TSB entry address
605  *
606  * Note: This macro is a nop since the 8K TSB pointer register
607  *	 is the entry pointer and does not need to be decoded.
608  *	 It is defined to allow for code sharing with sun4v.
609  */
610 
611 #define	GET_1ST_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2)
612 
613 /*
614  * Get the location in the 2nd TSB of the tsbe for this fault.
615  * Assumes that the second TSB only contains 4M mappings.
616  *
617  * In:
618  *   tagacc = tag access register (not clobbered)
619  *   tsbe = 2nd TSB base register
620  *   tmp1, tmp2 = scratch registers
621  * Out:
622  *   tsbe = pointer to the tsbe in the 2nd TSB
623  */
624 
625 #define	GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
626 	and	tsbe, TSB_SOFTSZ_MASK, tmp2;	/* tmp2=szc */		\
627 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe;	/* tsbbase */		\
628 	mov	TSB_ENTRIES(0), tmp1;	/* nentries in TSB size 0 */	\
629 	sllx	tmp1, tmp2, tmp1;	/* tmp1 = nentries in TSB */	\
630 	sub	tmp1, 1, tmp1;		/* mask = nentries - 1 */	\
631 	srlx	tagacc, MMU_PAGESHIFT4M, tmp2; 				\
632 	and	tmp2, tmp1, tmp1;	/* tsbent = virtpage & mask */	\
633 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1;	/* entry num --> ptr */	\
634 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */
635 
636 /*
637  * Read the 2nd TSB base register.  This is not done in GET_2ND_TSBE_PTR as
638  * an optimization since the TLB miss trap handler entries have potentially
639  * already loaded the 2nd TSB base reg when we invoke GET_2ND_TSBE_PTR.
640  *
641  * Out:
642  *   tsbreg = contents of the 2nd TSB base register
643  */
644 #define	GET_2ND_TSBREG(tsbreg)						\
645 	GET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg);
646 
647 /*
648  * Load the 2nd TSB base into a dedicated scratchpad register which
649  * is used as a pseudo TSB base register.
650  *
651  * In:
652  *   tsbreg = value to load (ro)
653  *   regnum = constant or register
654  *   tmp1 = scratch register
655  * Out:
656  *   Specified scratchpad register updated
657  */
658 #define	LOAD_2ND_TSBREG(tsbreg, tmp1)					\
659 	SET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg, tmp1);
660 
661 #endif /* UTSB_PHYS */
662 
663 
664 /*
665  * Load TSB base register.  In the single TSB case this register
666  * contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the
667  * TSB size code in bits 2:0.  See hat_sfmmu.h for the layout in
668  * the case where we have multiple TSBs per process.
669  *
670  * In:
671  *   tsbreg = value to load (ro)
672  */
673 #define	LOAD_TSBREG(tsbreg, tmp1, tmp2)					\
674 	mov	MMU_TSB, tmp1;						\
675 	sethi	%hi(FLUSH_ADDR), tmp2;					\
676 	stxa	tsbreg, [tmp1]ASI_DMMU;		/* dtsb reg */		\
677 	stxa	tsbreg, [tmp1]ASI_IMMU;		/* itsb reg */		\
678 	flush	tmp2
679 
680 #ifdef UTSB_PHYS
681 #define	UTSB_PROBE_ASI	ASI_QUAD_LDD_PHYS
682 #else
683 #define	UTSB_PROBE_ASI	ASI_NQUAD_LD
684 #endif
685 
686 /*
687  * Will probe the first TSB, and if it finds a match, will insert it
688  * into the TLB and retry.
689  *
690  * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
691  * vpg_4m = 4M virtual page number for tag matching  (in, ro)
692  * label = where to branch to if this is a miss (text)
693  * %asi = atomic ASI to use for the TSB access
694  *
695  * For trapstat, we have to explicily use these registers.
696  * g4 = location tag will be retrieved into from TSB (out)
697  * g5 = location data(tte) will be retrieved into from TSB (out)
698  */
699 #define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
700 	/* BEGIN CSTYLED */						\
701 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
702 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
703 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
704 	  nop								;\
705 	TT_TRACE(trace_tsbhit)						;\
706 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
707 	/* trapstat expects tte in %g5 */				;\
708 	retry				/* retry faulted instruction */	;\
709 label/**/1:								\
710 	/* END CSTYLED */
711 
712 /*
713  * Same as above, only if the TTE doesn't have the execute
714  * bit set, will branch to exec_fault directly.
715  */
716 #define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
717 	/* BEGIN CSTYLED */						\
718 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
719 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
720 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
721 	  nop								;\
722 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
723 	bz,pn	%icc, exec_fault					;\
724 	  nop								;\
725 	TT_TRACE(trace_tsbhit)						;\
726 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
727 	retry				/* retry faulted instruction */	;\
728 label/**/1:								\
729 	/* END CSTYLED */
730 
731 /*
732  * vpg_4m = 4M virtual page number for tag matching (in)
733  * tsbe_ptr = precomputed second TSB entry pointer (in)
734  * label = label to use to make branch targets unique (text)
735  *
736  * For trapstat, we have to explicity use these registers.
737  * g4 = tag portion of TSBE (out)
738  * g5 = data portion of TSBE (out)
739  */
740 #define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
741 	/* BEGIN CSTYLED */						\
742 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
743 	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
744 	cmp	%g4, vpg_4m						;\
745 	bne,pn	%xcc, label/**/1					;\
746 	  nop								;\
747 	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
748 	TT_TRACE(trace_tsbhit)						;\
749 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
750 	/* trapstat expects tte in %g5 */				;\
751 	retry				/* retry faulted instruction */	;\
752 label/**/1:								\
753 	/* END CSTYLED */
754 
755 /*
756  * Macro to get SCD shared hme map on sun4v platforms
757  * (not applicable to sun4u platforms)
758  */
759 #define	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)
760 
761 #ifndef TRAPTRACE
762 /*
763  * Same as above, with the following additions:
764  * If the TTE found is not executable, branch directly
765  * to exec_fault after checking for ITLB synthesis.
766  * If a TSB miss, branch to TSB miss handler.
767  */
768 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label)				\
769 	/* BEGIN CSTYLED */						\
770 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
771 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
772 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
773 	  or	%g0, TTE4M, %g6						;\
774 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
775 	bz,a,pn	%icc, label/**/1					;\
776 	  sllx	%g6, TTE_SZ_SHFT, %g6					;\
777 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
778 	retry				/* retry faulted instruction */ ;\
779 label/**/1:								;\
780 	andcc %g5, TTE_E_SYNTH_INT, %g0					;\
781 	bz,pn	%icc, exec_fault					;\
782 	  or	%g5, %g6, %g5						;\
783 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
784 	retry				/* retry faulted instruction */	\
785 	/* END CSTYLED */
786 #else /* TRAPTRACE */
787 /*
788  * Same as above, with the TT_TRACE and mov tsbe_ptr, %g1 additions.
789  */
790 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label)				\
791 	/* BEGIN CSTYLED */						\
792 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
793 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
794 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
795 	  or	%g0, TTE4M, %g6						;\
796 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
797 	bz,a,pn	%icc, label/**/1					;\
798 	  sllx	%g6, TTE_SZ_SHFT, %g6					;\
799 	mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
800 	TT_TRACE(trace_tsbhit)						;\
801 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
802 	retry				/* retry faulted instruction */ ;\
803 label/**/1:								;\
804 	andcc %g5, TTE_E_SYNTH_INT, %g0				;\
805 	bz,pn	%icc, exec_fault					;\
806 	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
807 	or	%g5, %g6, %g5						;\
808 	TT_TRACE(trace_tsbhit)						;\
809 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
810 	retry				/* retry faulted instruction */	\
811 	/* END CSTYLED */
812 
813 #endif /* TRAPTRACE */
814 #endif /* _ASM */
815 
816 #ifdef	__cplusplus
817 }
818 #endif
819 
820 #endif	/* _VM_MACH_SFMMU_H */
821