xref: /titanic_50/usr/src/uts/sun4u/vm/mach_sfmmu.h (revision 292f4c1c373bd6e2c3c0b6e199a87392f265291f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun reference mmu (sfmmu)
30  * specific hat data structures and the sfmmu specific hat procedures.
31  * The machine independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef _VM_MACH_SFMMU_H
35 #define	_VM_MACH_SFMMU_H
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #include <sys/x_call.h>
40 #include <sys/cheetahregs.h>
41 #include <sys/spitregs.h>
42 #include <sys/opl_olympus_regs.h>
43 
44 #ifdef	__cplusplus
45 extern "C" {
46 #endif
47 
48 /*
49  * On sun4u platforms, user TSBs are accessed via virtual address by default.
50  * Platforms that support ASI_SCRATCHPAD registers can define UTSB_PHYS in the
51  * platform Makefile to access user TSBs via physical address but must also
52  * designate one ASI_SCRATCHPAD register to hold the second user TSB.  To
53  * designate the user TSB scratchpad register, platforms must provide a
54  * definition for SCRATCHPAD_UTSBREG below.
55  *
56  * Platforms that use UTSB_PHYS do not allocate 2 locked TLB entries to access
57  * the user TSBs.
58  */
59 #if defined(UTSB_PHYS)
60 
61 #if defined(_OPL)
62 #define	SCRATCHPAD_UTSBREG	OPL_SCRATCHPAD_UTSBREG4
63 #else
64 #error "Compiling UTSB_PHYS but no SCRATCHPAD_UTSBREG specified"
65 #endif
66 
67 #endif /* UTSB_PHYS */
68 
69 
70 #ifdef _ASM
71 
72 /*
73  * This macro is used in the MMU code to check if TL should be lowered from
74  * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
75  * for details.
76  */
77 
78 #define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
79 	rdpr	%tpc, scr1;					\
80 	sethi	%hi(KERNELBASE), scr2;				\
81 	or	scr2, %lo(KERNELBASE), scr2; 			\
82 	cmp	scr1, scr2; 					\
83 	bgeu	%xcc, 9f;					\
84 	nop;							\
85 	ba	label;						\
86 	wrpr	%g0, 1, %tl;					\
87 9:
88 
89 
90 /*
91  * The following macros allow us to share majority of the
92  * SFMMU code between sun4u and sun4v platforms.
93  */
94 
95 #define	SETUP_TSB_ASI(qlp, tmp)					\
96 	movrz	qlp, ASI_N, tmp;				\
97 	movrnz	qlp, ASI_MEM, tmp;				\
98 	mov	tmp, %asi
99 
100 /*
101  * Macro to swtich to alternate global register on sun4u platforms
102  * (not applicable to sun4v platforms)
103  */
104 #define	USE_ALTERNATE_GLOBALS(scr)				\
105 	rdpr	%pstate, scr;					\
106 	wrpr	scr, PSTATE_MG | PSTATE_AG, %pstate
107 
108 /*
109  * Macro to set %gl register value on sun4v platforms
110  * (not applicable to sun4u platforms)
111  */
112 #define	SET_GL_REG(val)
113 
114 /*
115  * Get MMU data tag access register value
116  *
117  * In:
118  *   tagacc, scr1 = scratch registers
119  * Out:
120  *   tagacc = MMU data tag access register value
121  */
122 #define	GET_MMU_D_TAGACC(tagacc, scr1)				\
123 	mov	MMU_TAG_ACCESS, scr1;				\
124 	ldxa	[scr1]ASI_DMMU, tagacc
125 
126 /*
127  * Get MMU data tag target register
128  *
129  * In:
130  *   ttarget, scr1 = scratch registers
131  * Out:
132  *   ttarget = MMU data tag target register value
133  */
134 #define	GET_MMU_D_TTARGET(ttarget, scr1)			\
135 	ldxa	[%g0]ASI_DMMU, ttarget
136 
137 /*
138  * Get MMU data/instruction tag access register values
139  *
140  * In:
141  *   dtagacc, itagacc, scr1, scr2 = scratch registers
142  * Out:
143  *   dtagacc = MMU data tag access register value
144  *   itagacc = MMU instruction tag access register value
145  */
146 #define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
147 	mov	MMU_TAG_ACCESS, scr1;				\
148 	ldxa	[scr1]ASI_DMMU, dtagacc;			\
149 	ldxa	[scr1]ASI_IMMU, itagacc
150 
151 /*
152  * Get MMU data fault address from the tag access register
153  *
154  * In:
155  *   daddr, scr1 = scratch registers
156  * Out:
157  *   daddr = MMU data fault address
158  */
159 #define	GET_MMU_D_ADDR(daddr, scr1)				\
160 	mov	MMU_TAG_ACCESS, scr1;				\
161 	ldxa	[scr1]ASI_DMMU, daddr;				\
162 	set	TAGACC_CTX_MASK, scr1;				\
163 	andn	daddr, scr1, daddr
164 
165 
166 /*
167  * Load ITLB entry
168  *
169  * In:
170  *   tte = reg containing tte
171  *   scr1, scr2, scr3, scr4 = scratch registers (not used)
172  */
173 #define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)			\
174 	stxa	tte, [%g0]ASI_ITLB_IN
175 
176 /*
177  * Load DTLB entry
178  *
179  * In:
180  *   tte = reg containing tte
181  *   scr1, scr2, scr3, scr4 = scratch register (not used)
182  */
183 #define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)			\
184 	stxa	tte, [%g0]ASI_DTLB_IN
185 
186 
187 /*
188  * Returns PFN given the TTE and vaddr
189  *
190  * In:
191  *   tte = reg containing tte
192  *   vaddr = reg containing vaddr
193  *   scr1, scr2, scr3 = scratch registers
194  * Out:
195  *   tte = PFN value
196  */
197 #define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
198 	srlx	tte, TTE_SZ_SHFT, scr1;					\
199 	and	scr1, TTE_SZ_BITS, scr1;	/* scr1 = tte_size */	\
200 	srlx	tte, TTE_SZ2_SHFT, scr3;				\
201 	and	scr3, TTE_SZ2_BITS, scr3;	/* scr3 = tte_size2 */	\
202 	or	scr1, scr3, scr1;					\
203 	sllx	scr1, 1, scr2;						\
204 	add	scr2, scr1, scr2;		/* mulx 3 */		\
205 	sllx	tte, TTE_PA_LSHIFT, tte;				\
206 	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
207 	/* BEGIN CSTYLED */						\
208 	brz,pt	scr2, label/**/1;					\
209 	  srlx	tte, scr3, tte;						\
210 	/* END CSTYLED */						\
211 	sllx	tte, scr2, tte;						\
212 	set	1, scr1;						\
213 	add	scr2, MMU_PAGESHIFT, scr3;				\
214 	sllx	scr1, scr3, scr1;					\
215 	sub	scr1, 1, scr1;		/* g2=TTE_PAGE_OFFSET(ttesz) */	\
216 	and	vaddr, scr1, scr2;					\
217 	srln	scr2, MMU_PAGESHIFT, scr2;				\
218 	or	tte, scr2, tte;						\
219 	/* CSTYLED */							\
220 label/**/1:
221 
222 
223 /*
224  * TTE_SET_REF_ML is a macro that updates the reference bit if it is
225  * not already set.
226  *
227  * Parameters:
228  * tte      = reg containing tte
229  * ttepa    = physical pointer to tte
230  * tteva    = virtual ptr to tte
231  * tsbarea  = tsb miss area
232  * tmp1     = tmp reg
233  * label    = temporary label
234  */
235 
236 #define	TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label)		\
237 	/* BEGIN CSTYLED */						\
238 	/* check reference bit */					\
239 	andcc	tte, TTE_REF_INT, %g0;					\
240 	bnz,pt	%xcc, label/**/4;	/* if ref bit set-skip ahead */	\
241 	  nop;								\
242 	GET_CPU_IMPL(tmp1);						\
243 	cmp	tmp1, SPITFIRE_IMPL;					\
244 	blt	%icc, label/**/2;	/* skip flush if FJ-OPL cpus */	\
245 	cmp	tmp1, CHEETAH_IMPL;					\
246 	bl,a	%icc, label/**/1;					\
247 	/* update reference bit */					\
248 	lduh	[tsbarea + TSBMISS_DMASK], tmp1;			\
249 	stxa	%g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */	\
250 	membar	#Sync;							\
251 	ba	label/**/2;						\
252 label/**/1:								\
253 	and	tteva, tmp1, tmp1;					\
254 	stxa	%g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */	\
255 	membar	#Sync;							\
256 label/**/2:								\
257 	or	tte, TTE_REF_INT, tmp1;					\
258 	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
259 	cmp	tte, tmp1;						\
260 	bne,a,pn %xcc, label/**/2;					\
261 	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
262 	or	tte, TTE_REF_INT, tte;					\
263 label/**/4:								\
264 	/* END CSTYLED */
265 
266 
267 /*
268  * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
269  * if not already set.
270  *
271  * Parameters:
272  * tte      = reg containing tte
273  * ttepa    = physical pointer to tte
274  * tteva    = virtual ptr to tte
275  * tsbarea  = tsb miss area
276  * tmp1     = tmp reg
277  * label    = temporary label
278  * exitlabel = label where to jump to if write perm bit not set.
279  */
280 
281 #define	TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label,	\
282 	exitlabel)							\
283 	/* BEGIN CSTYLED */						\
284 	/* check reference bit */					\
285 	andcc	tte, TTE_WRPRM_INT, %g0;				\
286 	bz,pn	%xcc, exitlabel;	/* exit if wr_perm not set */	\
287 	  nop;								\
288 	andcc	tte, TTE_HWWR_INT, %g0;					\
289 	bnz,pn	%xcc, label/**/4;	/* nothing to do */		\
290 	  nop;								\
291 	GET_CPU_IMPL(tmp1);						\
292 	cmp	tmp1, SPITFIRE_IMPL;					\
293 	blt	%icc, label/**/2;	/* skip flush if FJ-OPL cpus */	\
294 	cmp	tmp1, CHEETAH_IMPL;					\
295 	bl,a	%icc, label/**/1;					\
296 	/* update reference bit */					\
297 	lduh	[tsbarea + TSBMISS_DMASK], tmp1;			\
298 	stxa    %g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */ 	\
299 	membar	#Sync;							\
300 	ba	label/**/2;						\
301 label/**/1:								\
302 	and	tteva, tmp1, tmp1;					\
303 	stxa	%g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */	\
304 	membar	#Sync;							\
305 label/**/2:								\
306 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
307 	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
308 	cmp	tte, tmp1;						\
309 	bne,a,pn %xcc, label/**/2;					\
310 	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
311 	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
312 label/**/4:								\
313 	/* END CSTYLED */
314 
315 
316 #ifndef UTSB_PHYS
317 
318 /*
319  * Synthesize TSB base register contents for a process with
320  * a single TSB.
321  *
322  * We patch the virtual address mask in at runtime since the
323  * number of significant virtual address bits in the TSB VA
324  * can vary depending upon the TSB slab size being used on the
325  * machine.
326  *
327  * In:
328  *   tsbinfo = TSB info pointer (ro)
329  *   vabase = value of utsb_vabase (ro)
330  * Out:
331  *   tsbreg = value to program into TSB base register
332  */
333 
334 #define	MAKE_TSBREG(tsbreg, tsbinfo, vabase, tmp1, tmp2, label)		\
335 	/* BEGIN CSTYLED */						\
336 	ldx	[tsbinfo + TSBINFO_VADDR], tmp1;			\
337 	.global	label/**/_tsbreg_vamask					;\
338 label/**/_tsbreg_vamask:						\
339 	or	%g0, RUNTIME_PATCH, tsbreg;				\
340 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp2;			\
341 	sllx	tsbreg, TSBREG_VAMASK_SHIFT, tsbreg;			\
342 	or	vabase, tmp2, tmp2;					\
343 	and	tmp1, tsbreg, tsbreg;					\
344 	or	tsbreg, tmp2, tsbreg;					\
345 	/* END CSTYLED */
346 
347 
348 /*
349  * Synthesize TSB base register contents for a process with
350  * two TSBs.  See hat_sfmmu.h for the layout of the TSB base
351  * register in this case.
352  *
353  * In:
354  *   tsb1 = pointer to first TSB info (ro)
355  *   tsb2 = pointer to second TSB info (ro)
356  * Out:
357  *   tsbreg = value to program into TSB base register
358  */
359 #define	MAKE_TSBREG_SECTSB(tsbreg, tsb1, tsb2, tmp1, tmp2, tmp3, label)	\
360 	/* BEGIN CSTYLED */						\
361 	set	TSBREG_MSB_CONST, tmp3					;\
362 	sllx	tmp3, TSBREG_MSB_SHIFT, tsbreg				;\
363 	.global	label/**/_tsbreg_vamask					;\
364 label/**/_tsbreg_vamask:						;\
365 	or	%g0, RUNTIME_PATCH, tmp3				;\
366 	sll	tmp3, TSBREG_VAMASK_SHIFT, tmp3				;\
367 	ldx	[tsb1 + TSBINFO_VADDR], tmp1				;\
368 	ldx	[tsb2 + TSBINFO_VADDR], tmp2				;\
369 	and	tmp1, tmp3, tmp1					;\
370 	and	tmp2, tmp3, tmp2					;\
371 	sllx	tmp2, TSBREG_SECTSB_MKSHIFT, tmp2			;\
372 	or	tmp1, tmp2, tmp3					;\
373 	or	tsbreg, tmp3, tsbreg					;\
374 	lduh	[tsb1 + TSBINFO_SZCODE], tmp1				;\
375 	lduh	[tsb2 + TSBINFO_SZCODE], tmp2				;\
376 	and	tmp1, TSB_SOFTSZ_MASK, tmp1				;\
377 	and	tmp2, TSB_SOFTSZ_MASK, tmp2				;\
378 	sllx	tmp2, TSBREG_SECSZ_SHIFT, tmp2				;\
379 	or	tmp1, tmp2, tmp3					;\
380 	or	tsbreg, tmp3, tsbreg					;\
381 	/* END CSTYLED */
382 
383 
384 /*
385  * Load the locked TSB TLB entry.
386  *
387  * In:
388  *   tsbinfo = tsb_info pointer as va (ro)
389  *   tteidx = shifted index into TLB to load the locked entry (ro)
390  *   va = virtual address at which to load the locked TSB entry (ro)
391  * Out:
392  * Scratch:
393  *   tmp
394  */
395 #define	LOAD_TSBTTE(tsbinfo, tteidx, va, tmp)				\
396 	mov	MMU_TAG_ACCESS, tmp;					\
397 	stxa	va, [tmp]ASI_DMMU;		/* set tag access */	\
398 	membar	#Sync;							\
399 	ldx	[tsbinfo + TSBINFO_TTE], tmp;	/* fetch locked tte */	\
400 	stxa	tmp, [tteidx]ASI_DTLB_ACCESS;	/* load locked tte */	\
401 	membar	#Sync
402 
403 
404 /*
405  * In the current implementation, TSBs usually come from physically
406  * contiguous chunks of memory up to 4MB in size, but 8K TSBs may be
407  * allocated from 8K chunks of memory under certain conditions.  To
408  * prevent aliasing in the virtual address cache when the TSB slab is
409  * 8K in size we must align the reserved (TL>0) TSB virtual address to
410  * have the same low-order bits as the kernel (TL=0) TSB virtual address,
411  * and map 8K TSBs with an 8K TTE.  In cases where the TSB reserved VA
412  * range is smaller than the assumed 4M we will patch the shift at
413  * runtime; otherwise we leave it alone (which is why RUNTIME_PATCH
414  * constant doesn't appear below).
415  *
416  * In:
417  *   tsbinfo (ro)
418  *   resva: reserved VA base for this TSB
419  * Out:
420  *   resva: corrected VA for this TSB
421  */
422 #define	RESV_OFFSET(tsbinfo, resva, tmp1, label)			\
423 	/* BEGIN CSTYLED */						\
424 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1			;\
425 	brgz,pn	tmp1, 9f						;\
426 	  nop								;\
427 	ldx	[tsbinfo + TSBINFO_VADDR], tmp1				;\
428 	.global	label/**/_resv_offset					;\
429 label/**/_resv_offset:							;\
430 	sllx	tmp1, (64 - MMU_PAGESHIFT4M), tmp1			;\
431 	srlx	tmp1, (64 - MMU_PAGESHIFT4M), tmp1			;\
432 	or	tmp1, resva, resva					;\
433 9:	/* END CSTYLED */
434 
435 /*
436  * Determine the pointer of the entry in the first TSB to probe given
437  * the 8K TSB pointer register contents.
438  *
439  * In:
440  *   tsbp8k = 8K TSB pointer register (ro)
441  *   tmp = scratch register
442  *   label = label for hot patching of utsb_vabase
443  *
444  * Out: tsbe_ptr = TSB entry address
445  *
446  * Note: This function is patched at runtime for performance reasons.
447  *	 Any changes here require sfmmu_patch_utsb fixed.
448  */
449 
450 #define	GET_1ST_TSBE_PTR(tsbp8k, tsbe_ptr, tmp, label)			\
451 	/* BEGIN CSTYLED */						\
452 label/**/_get_1st_tsbe_ptr:						;\
453 	RUNTIME_PATCH_SETX(tsbe_ptr, tmp)				;\
454 	/* tsbeptr = contents of utsb_vabase */				;\
455 	/* clear upper bits leaving just bits 21:0 of TSB ptr. */	;\
456 	sllx	tsbp8k, TSBREG_FIRTSB_SHIFT, tmp			;\
457 	/* finish clear */						;\
458 	srlx	tmp, TSBREG_FIRTSB_SHIFT, tmp				;\
459 	/* or-in bits 41:22 of the VA to form the real pointer. */	;\
460 	or	tsbe_ptr, tmp, tsbe_ptr					\
461 	/* END CSTYLED */
462 
463 /*
464  * Determine the base address of the second TSB given the 8K TSB
465  * pointer register contents.
466  *
467  * In:
468  *   tsbp8k = 8K TSB pointer register (ro)
469  *   tmp = scratch register
470  *   label = label for hot patching of utsb_vabase
471  *
472  * Out:
473  *   tsbbase = TSB base address
474  *
475  * Note: This function is patched at runtime for performance reasons.
476  *	 Any changes here require sfmmu_patch_utsb fixed.
477  */
478 
479 #define	GET_2ND_TSB_BASE(tsbp8k, tsbbase, tmp, label)			\
480 	/* BEGIN CSTYLED */						\
481 label/**/_get_2nd_tsb_base:						;\
482 	RUNTIME_PATCH_SETX(tsbbase, tmp)				;\
483 	/* tsbbase = contents of utsb4m_vabase */			;\
484 	/* clear upper bits leaving just bits 21:xx of TSB addr. */	;\
485 	sllx	tsbp8k, TSBREG_SECTSB_LSHIFT, tmp			;\
486 	/* clear lower bits leaving just 21:13 in 8:0 */		;\
487 	srlx	tmp, (TSBREG_SECTSB_RSHIFT + MMU_PAGESHIFT), tmp	;\
488 	/* adjust TSB offset to bits 21:13 */				;\
489 	sllx	tmp, MMU_PAGESHIFT, tmp					;\
490 	or	tsbbase, tmp, tsbbase					;\
491 	/* END CSTYLED */
492 
493 /*
494  * Determine the size code of the second TSB given the 8K TSB
495  * pointer register contents.
496  *
497  * In:
498  *   tsbp8k = 8K TSB pointer register (ro)
499  * Out:
500  *   size = TSB size code
501  */
502 
503 #define	GET_2ND_TSB_SIZE(tsbp8k, size)					\
504 	srlx	tsbp8k, TSBREG_SECSZ_SHIFT, size;			\
505 	and	size, TSB_SOFTSZ_MASK, size
506 
507 /*
508  * Get the location in the 2nd TSB of the tsbe for this fault.
509  * Assumes that the second TSB only contains 4M mappings.
510  *
511  * In:
512  *   tagacc = tag access register (clobbered)
513  *   tsbp8k = contents of TSB8K pointer register (ro)
514  *   tmp1, tmp2 = scratch registers
515  *   label = label at which to patch in reserved TSB 4M VA range
516  * Out:
517  *   tsbe_ptr = pointer to the tsbe in the 2nd TSB
518  */
519 #define	GET_2ND_TSBE_PTR(tagacc, tsbp8k, tsbe_ptr, tmp1, tmp2, label)	\
520 	GET_2ND_TSB_BASE(tsbp8k, tsbe_ptr, tmp2, label);		\
521 	/* tsbe_ptr = TSB base address, tmp2 = junk */			\
522 	GET_2ND_TSB_SIZE(tsbp8k, tmp1);					\
523 	/* tmp1 = TSB size code */					\
524 	GET_TSBE_POINTER(MMU_PAGESHIFT4M, tsbe_ptr, tagacc, tmp1, tmp2)
525 
526 #endif /* UTSB_PHYS */
527 
528 
529 #ifdef UTSB_PHYS
530 
531 /*
532  * Synthesize a TSB base register contents for a process.
533  *
534  * In:
535  *   tsbinfo = TSB info pointer (ro)
536  *   tsbreg, tmp1 = scratch registers
537  * Out:
538  *   tsbreg = value to program into TSB base register
539  */
540 
541 #define	MAKE_UTSBREG_PHYS(tsbinfo, tsbreg, tmp1)			\
542 	ldx	[tsbinfo + TSBINFO_PADDR], tsbreg;		\
543 	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1;		\
544 	and	tmp1, TSB_SOFTSZ_MASK, tmp1;			\
545 	or	tsbreg, tmp1, tsbreg;				\
546 
547 /*
548  * Load TSB base register into a dedicated scratchpad register.
549  * This register contains utsb_pabase in bits 63:13, and TSB size
550  * code in bits 2:0.
551  *
552  * In:
553  *   tsbreg = value to load (ro)
554  *   regnum = constant or register
555  *   tmp1 = scratch register
556  * Out:
557  *   Specified scratchpad register updated
558  *
559  * Note: If this is enabled on Panther, a membar #Sync is required
560  *	 following an ASI store to the scratchpad registers.
561  */
562 
563 #define	SET_UTSBREG(regnum, tsbreg, tmp1)				\
564 	mov	regnum, tmp1;						\
565 	stxa	tsbreg, [tmp1]ASI_SCRATCHPAD;	/* save tsbreg */	\
566 
567 /*
568  * Get TSB base register from the scratchpad
569  *
570  * In:
571  *   regnum = constant or register
572  *   tsbreg = scratch
573  * Out:
574  *   tsbreg = tsbreg from the specified scratchpad register
575  */
576 
577 #define	GET_UTSBREG(regnum, tsbreg)					\
578 	mov	regnum, tsbreg;						\
579 	ldxa	[tsbreg]ASI_SCRATCHPAD, tsbreg
580 
581 /*
582  * Determine the pointer of the entry in the first TSB to probe given
583  * the 8K TSB pointer register contents.
584  *
585  * In:
586  *   tagacc = tag access register
587  *   tsbe_ptr = 8K TSB pointer register
588  *   tmp = scratch registers
589  *
590  * Out: tsbe_ptr = TSB entry address
591  *
592  * Note: This macro is a nop since the 8K TSB pointer register
593  *	 is the entry pointer and does not need to be decoded.
594  *	 It is defined to allow for code sharing with sun4v.
595  */
596 
597 #define	GET_1ST_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2)
598 
599 /*
600  * Get the location in the 2nd TSB of the tsbe for this fault.
601  * Assumes that the second TSB only contains 4M mappings.
602  *
603  * In:
604  *   tagacc = tag access register (not clobbered)
605  *   tsbe = 2nd TSB base register
606  *   tmp1, tmp2 = scratch registers
607  * Out:
608  *   tsbe = pointer to the tsbe in the 2nd TSB
609  */
610 
611 #define	GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)			\
612 	and	tsbe, TSB_SOFTSZ_MASK, tmp2;	/* tmp2=szc */		\
613 	andn	tsbe, TSB_SOFTSZ_MASK, tsbe;	/* tsbbase */		\
614 	mov	TSB_ENTRIES(0), tmp1;	/* nentries in TSB size 0 */	\
615 	sllx	tmp1, tmp2, tmp1;	/* tmp1 = nentries in TSB */	\
616 	sub	tmp1, 1, tmp1;		/* mask = nentries - 1 */	\
617 	srlx	tagacc, MMU_PAGESHIFT4M, tmp2; 				\
618 	and	tmp2, tmp1, tmp1;	/* tsbent = virtpage & mask */	\
619 	sllx	tmp1, TSB_ENTRY_SHIFT, tmp1;	/* entry num --> ptr */	\
620 	add	tsbe, tmp1, tsbe	/* add entry offset to TSB base */
621 
622 /*
623  * Read the 2nd TSB base register.  This is not done in GET_2ND_TSBE_PTR as
624  * an optimization since the TLB miss trap handler entries have potentially
625  * already loaded the 2nd TSB base reg when we invoke GET_2ND_TSBE_PTR.
626  *
627  * Out:
628  *   tsbreg = contents of the 2nd TSB base register
629  */
630 #define	GET_2ND_TSBREG(tsbreg)						\
631 	GET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg);
632 
633 /*
634  * Load the 2nd TSB base into a dedicated scratchpad register which
635  * is used as a pseudo TSB base register.
636  *
637  * In:
638  *   tsbreg = value to load (ro)
639  *   regnum = constant or register
640  *   tmp1 = scratch register
641  * Out:
642  *   Specified scratchpad register updated
643  */
644 #define	LOAD_2ND_TSBREG(tsbreg, tmp1)					\
645 	SET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg, tmp1);
646 
647 #endif /* UTSB_PHYS */
648 
649 
650 /*
651  * Load TSB base register.  In the single TSB case this register
652  * contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the
653  * TSB size code in bits 2:0.  See hat_sfmmu.h for the layout in
654  * the case where we have multiple TSBs per process.
655  *
656  * In:
657  *   tsbreg = value to load (ro)
658  */
659 #define	LOAD_TSBREG(tsbreg, tmp1, tmp2)					\
660 	mov	MMU_TSB, tmp1;						\
661 	sethi	%hi(FLUSH_ADDR), tmp2;					\
662 	stxa	tsbreg, [tmp1]ASI_DMMU;		/* dtsb reg */		\
663 	stxa	tsbreg, [tmp1]ASI_IMMU;		/* itsb reg */		\
664 	flush	tmp2
665 
666 #ifdef UTSB_PHYS
667 #define	UTSB_PROBE_ASI	ASI_QUAD_LDD_PHYS
668 #else
669 #define	UTSB_PROBE_ASI	ASI_NQUAD_LD
670 #endif
671 
672 /*
673  * Will probe the first TSB, and if it finds a match, will insert it
674  * into the TLB and retry.
675  *
676  * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
677  * vpg_4m = 4M virtual page number for tag matching  (in, ro)
678  * label = where to branch to if this is a miss (text)
679  * %asi = atomic ASI to use for the TSB access
680  *
681  * For trapstat, we have to explicily use these registers.
682  * g4 = location tag will be retrieved into from TSB (out)
683  * g5 = location data(tte) will be retrieved into from TSB (out)
684  */
685 #define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
686 	/* BEGIN CSTYLED */						\
687 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
688 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
689 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
690 	  nop								;\
691 	TT_TRACE(trace_tsbhit)						;\
692 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
693 	/* trapstat expects tte in %g5 */				;\
694 	retry				/* retry faulted instruction */	;\
695 label/**/1:								\
696 	/* END CSTYLED */
697 
698 /*
699  * Same as above, only if the TTE doesn't have the execute
700  * bit set, will branch to exec_fault directly.
701  */
702 #define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
703 	/* BEGIN CSTYLED */						\
704 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
705 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
706 	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
707 	  nop								;\
708 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
709 	bz,pn	%icc, exec_fault					;\
710 	  nop								;\
711 	TT_TRACE(trace_tsbhit)						;\
712 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
713 	retry				/* retry faulted instruction */	;\
714 label/**/1:								\
715 	/* END CSTYLED */
716 
717 /*
718  * vpg_4m = 4M virtual page number for tag matching (in)
719  * tsbe_ptr = precomputed second TSB entry pointer (in)
720  * label = label to use to make branch targets unique (text)
721  *
722  * For trapstat, we have to explicity use these registers.
723  * g4 = tag portion of TSBE (out)
724  * g5 = data portion of TSBE (out)
725  */
726 #define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
727 	/* BEGIN CSTYLED */						\
728 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
729 	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
730 	cmp	%g4, vpg_4m						;\
731 	bne,pn	%xcc, label/**/1					;\
732 	  nop								;\
733 	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
734 	TT_TRACE(trace_tsbhit)						;\
735 	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
736 	/* trapstat expects tte in %g5 */				;\
737 	retry				/* retry faulted instruction */	;\
738 label/**/1:								\
739 	/* END CSTYLED */
740 
741 #ifndef TRAPTRACE
742 /*
743  * Same as above, with the following additions:
744  * If the TTE found is not executable, branch directly
745  * to exec_fault after checking for ITLB synthesis.
746  * If a TSB miss, branch to TSB miss handler.
747  */
748 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label)				\
749 	/* BEGIN CSTYLED */						\
750 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
751 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
752 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
753 	  or	%g0, TTE4M, %g6						;\
754 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
755 	bz,a,pn	%icc, label/**/1					;\
756 	  sllx	%g6, TTE_SZ_SHFT, %g6					;\
757 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
758 	retry				/* retry faulted instruction */ ;\
759 label/**/1:								;\
760 	andcc %g5, TTE_E_SYNTH_INT, %g0					;\
761 	bz,pn	%icc, exec_fault					;\
762 	  or	%g5, %g6, %g5						;\
763 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
764 	retry				/* retry faulted instruction */	\
765 	/* END CSTYLED */
766 #else /* TRAPTRACE */
767 /*
768  * Same as above, with the TT_TRACE and mov tsbe_ptr, %g1 additions.
769  */
770 #define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label)				\
771 	/* BEGIN CSTYLED */						\
772 	ldda	[tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */	;\
773 	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
774 	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
775 	  or	%g0, TTE4M, %g6						;\
776 	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
777 	bz,a,pn	%icc, label/**/1					;\
778 	  sllx	%g6, TTE_SZ_SHFT, %g6					;\
779 	mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
780 	TT_TRACE(trace_tsbhit)						;\
781 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
782 	retry				/* retry faulted instruction */ ;\
783 label/**/1:								;\
784 	andcc %g5, TTE_E_SYNTH_INT, %g0				;\
785 	bz,pn	%icc, exec_fault					;\
786 	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
787 	or	%g5, %g6, %g5						;\
788 	TT_TRACE(trace_tsbhit)						;\
789 	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
790 	retry				/* retry faulted instruction */	\
791 	/* END CSTYLED */
792 
793 #endif /* TRAPTRACE */
794 #endif /* _ASM */
795 
796 #ifdef	__cplusplus
797 }
798 #endif
799 
800 #endif	/* _VM_MACH_SFMMU_H */
801