xref: /titanic_41/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision d2365b013d4199b49b3a1438d57aea23423e02ad)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#if defined(lint)
32#include <sys/types.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/asm_linkage.h>
38#include <sys/machtrap.h>
39#include <sys/machasi.h>
40#include <sys/sun4asi.h>
41#include <sys/pte.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44#include <vm/seg_spt.h>
45#include <sys/machparam.h>
46#include <sys/privregs.h>
47#include <sys/scb.h>
48#include <sys/intreg.h>
49#include <sys/machthread.h>
50#include <sys/intr.h>
51#include <sys/clock.h>
52#include <sys/trapstat.h>
53
54#ifdef TRAPTRACE
55#include <sys/traptrace.h>
56
57/*
58 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
59 */
60#define	TT_TRACE(label)		\
61	ba	label		;\
62	rd	%pc, %g7
63#else
64
65#define	TT_TRACE(label)
66
67#endif /* TRAPTRACE */
68
69#ifndef	lint
70
71#if (TTE_SUSPEND_SHIFT > 0)
72#define	TTE_SUSPEND_INT_SHIFT(reg)				\
73	sllx	reg, TTE_SUSPEND_SHIFT, reg
74#else
75#define	TTE_SUSPEND_INT_SHIFT(reg)
76#endif
77
78#endif /* lint */
79
80#ifndef	lint
81
82/*
83 * Assumes TSBE_TAG is 0
84 * Assumes TSBE_INTHI is 0
85 * Assumes TSBREG.split is 0
86 */
87
88#if TSBE_TAG != 0
89#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
90#endif
91
92#if TSBTAG_INTHI != 0
93#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
94#endif
95
96/*
97 * The following code assumes the tsb is not split.
98 *
99 * With TSBs no longer shared between processes, it's no longer
100 * necessary to hash the context bits into the tsb index to get
101 * tsb coloring; the new implementation treats the TSB as a
102 * direct-mapped, virtually-addressed cache.
103 *
104 * In:
105 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
106 *    tsbbase = base address of TSB (clobbered)
107 *    tagacc = tag access register (clobbered)
108 *    szc = size code of TSB (ro)
109 *    tmp = scratch reg
110 * Out:
111 *    tsbbase = pointer to entry in TSB
112 */
113#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
114	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
115	srlx	tagacc, vpshift, tagacc 				;\
116	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
117	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
118	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
119	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
120	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
121
122/*
123 * When the kpm TSB is used it is assumed that it is direct mapped
124 * using (vaddr>>vpshift)%tsbsz as the index.
125 *
126 * Note that, for now, the kpm TSB and kernel TSB are the same for
127 * each mapping size.  However that need not always be the case.  If
128 * the trap handlers are updated to search a different TSB for kpm
129 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
130 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
131 *
132 * In:
133 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
134 *    vaddr = virtual address (clobbered)
135 *    tsbp, szc, tmp = scratch
136 * Out:
137 *    tsbp = pointer to entry in TSB
138 */
139#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
140	cmp	vpshift, MMU_PAGESHIFT					;\
141	bne,pn	%icc, 1f		/* branch if large case */	;\
142	  sethi	%hi(kpmsm_tsbsz), szc					;\
143	sethi	%hi(kpmsm_tsbbase), tsbp				;\
144	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
145	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
146	ba,pt	%icc, 2f						;\
147	  nop								;\
1481:	sethi	%hi(kpm_tsbsz), szc					;\
149	sethi	%hi(kpm_tsbbase), tsbp					;\
150	ld	[szc + %lo(kpm_tsbsz)], szc				;\
151	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1522:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
153
154/*
155 * Lock the TSBE at virtual address tsbep.
156 *
157 * tsbep = TSBE va (ro)
158 * tmp1, tmp2 = scratch registers (clobbered)
159 * label = label to jump to if we fail to lock the tsb entry
160 * %asi = ASI to use for TSB access
161 *
162 * NOTE that we flush the TSB using fast VIS instructions that
163 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
164 * not be treated as a locked entry or we'll get stuck spinning on
165 * an entry that isn't locked but really invalid.
166 */
167
168#if defined(UTSB_PHYS)
169
170#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
171	lda	[tsbep]ASI_MEM, tmp1					;\
172	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
173	cmp	tmp1, tmp2 						;\
174	be,a,pn	%icc, label		/* if locked ignore */		;\
175	  nop								;\
176	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
177	cmp	tmp1, tmp2 						;\
178	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
179	  nop								;\
180	/* tsbe lock acquired */					;\
181	membar #StoreStore
182
183#else /* UTSB_PHYS */
184
185#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
186	lda	[tsbep]%asi, tmp1					;\
187	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
188	cmp	tmp1, tmp2 						;\
189	be,a,pn	%icc, label		/* if locked ignore */		;\
190	  nop								;\
191	casa	[tsbep]%asi, tmp1, tmp2					;\
192	cmp	tmp1, tmp2 						;\
193	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
194	  nop								;\
195	/* tsbe lock acquired */					;\
196	membar #StoreStore
197
198#endif /* UTSB_PHYS */
199
200/*
201 * Atomically write TSBE at virtual address tsbep.
202 *
203 * tsbep = TSBE va (ro)
204 * tte = TSBE TTE (ro)
205 * tagtarget = TSBE tag (ro)
206 * %asi = ASI to use for TSB access
207 */
208
209#if defined(UTSB_PHYS)
210
211#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
212	add	tsbep, TSBE_TTE, tmp1					;\
213	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
214	membar #StoreStore						;\
215	add	tsbep, TSBE_TAG, tmp1					;\
216	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
217
218#else /* UTSB_PHYS */
219
220#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
221	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
222	membar #StoreStore						;\
223	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
224
225#endif /* UTSB_PHYS */
226
227/*
228 * Load an entry into the TSB at TL > 0.
229 *
230 * tsbep = pointer to the TSBE to load as va (ro)
231 * tte = value of the TTE retrieved and loaded (wo)
232 * tagtarget = tag target register.  To get TSBE tag to load,
233 *   we need to mask off the context and leave only the va (clobbered)
234 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
235 * tmp1, tmp2 = scratch registers
236 * label = label to jump to if we fail to lock the tsb entry
237 * %asi = ASI to use for TSB access
238 */
239
240#if defined(UTSB_PHYS)
241
242#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
243	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
244	/*								;\
245	 * I don't need to update the TSB then check for the valid tte.	;\
246	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
247	 * we always invalidate the hash table before we unload the TSB.;\
248	 */								;\
249	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
250	ldxa	[ttepa]ASI_MEM, tte					;\
251	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
252	sethi	%hi(TSBTAG_INVALID), tmp2				;\
253	add	tsbep, TSBE_TAG, tmp1					;\
254	brgez,a,pn tte, label						;\
255	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
256	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
257label:
258
259#else /* UTSB_PHYS */
260
261#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
262	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
263	/*								;\
264	 * I don't need to update the TSB then check for the valid tte.	;\
265	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
266	 * we always invalidate the hash table before we unload the TSB.;\
267	 */								;\
268	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
269	ldxa	[ttepa]ASI_MEM, tte					;\
270	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
271	sethi	%hi(TSBTAG_INVALID), tmp2				;\
272	brgez,a,pn tte, label						;\
273	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
274	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
275label:
276
277#endif /* UTSB_PHYS */
278
279/*
280 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
281 *   for ITLB synthesis.
282 *
283 * tsbep = pointer to the TSBE to load as va (ro)
284 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
285 *   with exec_perm turned off and exec_synth turned on
286 * tagtarget = tag target register.  To get TSBE tag to load,
287 *   we need to mask off the context and leave only the va (clobbered)
288 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
289 * tmp1, tmp2 = scratch registers
290 * label = label to use for branch (text)
291 * %asi = ASI to use for TSB access
292 */
293
294#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
295	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
296	/*								;\
297	 * I don't need to update the TSB then check for the valid tte.	;\
298	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
299	 * we always invalidate the hash table before we unload the TSB.;\
300	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
301	 * and exec_synth bit to 1.					;\
302	 */								;\
303	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
304	mov	tte, tmp1						;\
305	ldxa	[ttepa]ASI_MEM, tte					;\
306	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
307	sethi	%hi(TSBTAG_INVALID), tmp2				;\
308	brgez,a,pn tte, label						;\
309	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
310	or	tte, tmp1, tte						;\
311	andn	tte, TTE_EXECPRM_INT, tte				;\
312	or	tte, TTE_E_SYNTH_INT, tte				;\
313	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
314label:
315
316/*
317 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
318 *
319 * tte = value of the TTE, used to get tte_size bits (ro)
320 * tagaccess = tag access register, used to get 4M pfn bits (ro)
321 * pfn = 4M pfn bits shifted to offset for tte (out)
322 * tmp1 = scratch register
323 * label = label to use for branch (text)
324 */
325
326#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
327	/*								;\
328	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
329	 * Return them, shifted, in pfn.				;\
330	 */								;\
331	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
332	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
333	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
334	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
335	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
336	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
337label:									;\
338	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
339
340/*
341 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
342 * for ITLB synthesis.
343 *
344 * tte = value of the TTE, used to get tte_size bits (rw)
345 * tmp1 = scratch register
346 */
347
348#define	SET_TTE4M_PN(tte, tmp)						\
349	/*								;\
350	 * Set 4M pagesize tte bits. 					;\
351	 */								;\
352	set	TTE4M, tmp						;\
353	sllx	tmp, TTE_SZ_SHFT, tmp					;\
354	or	tte, tmp, tte
355
356/*
357 * Load an entry into the TSB at TL=0.
358 *
359 * tsbep = pointer to the TSBE to load as va (ro)
360 * tteva = pointer to the TTE to load as va (ro)
361 * tagtarget = TSBE tag to load (which contains no context), synthesized
362 * to match va of MMU tag target register only (ro)
363 * tmp1, tmp2 = scratch registers (clobbered)
364 * label = label to use for branches (text)
365 * %asi = ASI to use for TSB access
366 */
367
368#if defined(UTSB_PHYS)
369
370#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
371	/* can't rd tteva after locking tsb because it can tlb miss */	;\
372	ldx	[tteva], tteva			/* load tte */		;\
373	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
374	sethi	%hi(TSBTAG_INVALID), tmp2				;\
375	add	tsbep, TSBE_TAG, tmp1					;\
376	brgez,a,pn tteva, label						;\
377	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
378	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
379label:
380
381#else /* UTSB_PHYS */
382
383#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
384	/* can't rd tteva after locking tsb because it can tlb miss */	;\
385	ldx	[tteva], tteva			/* load tte */		;\
386	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
387	sethi	%hi(TSBTAG_INVALID), tmp2				;\
388	brgez,a,pn tteva, label						;\
389	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
390	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
391label:
392
393#endif /* UTSB_PHYS */
394
395/*
396 * Invalidate a TSB entry in the TSB.
397 *
398 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
399 *	 about this earlier to ensure this is true.  Thus when we are
400 *	 directly referencing tsbep below, we are referencing the tte_tag
401 *	 field of the TSBE.  If this  offset ever changes, the code below
402 *	 will need to be modified.
403 *
404 * tsbep = pointer to TSBE as va (ro)
405 * tag = invalidation is done if this matches the TSBE tag (ro)
406 * tmp1 - tmp3 = scratch registers (clobbered)
407 * label = label name to use for branches (text)
408 * %asi = ASI to use for TSB access
409 */
410
411#if defined(UTSB_PHYS)
412
413#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
414	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
415	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
416label/**/1:								;\
417	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
418	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
419	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
420	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
421	cmp	tag, tmp3		/* compare tags */		;\
422	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
423	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
424	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
425	cmp	tmp1, tmp3		/* if not successful */		;\
426	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
427	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
428label/**/2:
429
430#else /* UTSB_PHYS */
431
432#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
433	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
434	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
435label/**/1:								;\
436	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
437	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
438	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
439	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
440	cmp	tag, tmp3		/* compare tags */		;\
441	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
442	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
443	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
444	cmp	tmp1, tmp3		/* if not successful */		;\
445	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
446	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
447label/**/2:
448
449#endif /* UTSB_PHYS */
450
451#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
452#error	- TSB_SOFTSZ_MASK too small
453#endif
454
455
456/*
457 * An implementation of setx which will be hot patched at run time.
458 * since it is being hot patched, there is no value passed in.
459 * Thus, essentially we are implementing
460 *	setx value, tmp, dest
461 * where value is RUNTIME_PATCH (aka 0) in this case.
462 */
463#define	RUNTIME_PATCH_SETX(dest, tmp)					\
464	sethi	%hh(RUNTIME_PATCH), tmp					;\
465	sethi	%lm(RUNTIME_PATCH), dest				;\
466	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
467	or	dest, %lo(RUNTIME_PATCH), dest				;\
468	sllx	tmp, 32, tmp						;\
469	nop				/* for perf reasons */		;\
470	or	tmp, dest, dest		/* contents of patched value */
471
472#endif /* lint */
473
474
475#if defined (lint)
476
477/*
478 * sfmmu related subroutines
479 */
480uint_t
481sfmmu_disable_intrs()
482{ return(0); }
483
484/* ARGSUSED */
485void
486sfmmu_enable_intrs(uint_t pstate_save)
487{}
488
489/* ARGSUSED */
490int
491sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
492{ return(0); }
493
494/*
495 * Use cas, if tte has changed underneath us then reread and try again.
496 * In the case of a retry, it will update sttep with the new original.
497 */
498/* ARGSUSED */
499int
500sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
501{ return(0); }
502
503/*
504 * Use cas, if tte has changed underneath us then return 1, else return 0
505 */
506/* ARGSUSED */
507int
508sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
509{ return(0); }
510
511/* ARGSUSED */
512void
513sfmmu_copytte(tte_t *sttep, tte_t *dttep)
514{}
515
516/*ARGSUSED*/
517struct tsbe *
518sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
519{ return(0); }
520
521/*ARGSUSED*/
522uint64_t
523sfmmu_make_tsbtag(caddr_t va)
524{ return(0); }
525
526#else	/* lint */
527
528	.seg	".data"
529	.global	sfmmu_panic1
530sfmmu_panic1:
531	.asciz	"sfmmu_asm: interrupts already disabled"
532
533	.global	sfmmu_panic3
534sfmmu_panic3:
535	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
536
537	.global	sfmmu_panic4
538sfmmu_panic4:
539	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
540
541	.global	sfmmu_panic5
542sfmmu_panic5:
543	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
544
545	.global	sfmmu_panic6
546sfmmu_panic6:
547	.asciz	"sfmmu_asm: interrupts not disabled"
548
549	.global	sfmmu_panic7
550sfmmu_panic7:
551	.asciz	"sfmmu_asm: kernel as"
552
553	.global	sfmmu_panic8
554sfmmu_panic8:
555	.asciz	"sfmmu_asm: gnum is zero"
556
557	.global	sfmmu_panic9
558sfmmu_panic9:
559	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
560
561	.global	sfmmu_panic10
562sfmmu_panic10:
563	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
564
565	.global	sfmmu_panic11
566sfmmu_panic11:
567	.asciz	"sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
568
569        ENTRY(sfmmu_disable_intrs)
570        rdpr    %pstate, %o0
571#ifdef DEBUG
572	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
573#endif /* DEBUG */
574        retl
575          wrpr   %o0, PSTATE_IE, %pstate
576        SET_SIZE(sfmmu_disable_intrs)
577
578	ENTRY(sfmmu_enable_intrs)
579        retl
580          wrpr    %g0, %o0, %pstate
581        SET_SIZE(sfmmu_enable_intrs)
582
583/*
584 * This routine is called both by resume() and sfmmu_get_ctx() to
585 * allocate a new context for the process on a MMU.
586 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
587 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
588 * is the case when sfmmu_alloc_ctx is called from resume().
589 *
590 * The caller must disable interrupts before entering this routine.
591 * To reduce ctx switch overhead, the code contains both 'fast path' and
592 * 'slow path' code. The fast path code covers the common case where only
593 * a quick check is needed and the real ctx allocation is not required.
594 * It can be done without holding the per-process (PP) lock.
595 * The 'slow path' code must be protected by the PP Lock and performs ctx
596 * allocation.
597 * Hardware context register and HAT mmu cnum are updated accordingly.
598 *
599 * %o0 - sfmmup
600 * %o1 - allocflag
601 * %o2 - CPU
602 * %o3 - sfmmu private/shared flag
603 *
604 * ret - 0: no ctx is allocated
605 *       1: a ctx is allocated
606 */
607        ENTRY_NP(sfmmu_alloc_ctx)
608
609#ifdef DEBUG
610	sethi   %hi(ksfmmup), %g1
611	ldx     [%g1 + %lo(ksfmmup)], %g1
612	cmp     %g1, %o0
613	bne,pt   %xcc, 0f
614	  nop
615
616	sethi   %hi(panicstr), %g1		! if kernel as, panic
617        ldx     [%g1 + %lo(panicstr)], %g1
618        tst     %g1
619        bnz,pn  %icc, 7f
620          nop
621
622	sethi	%hi(sfmmu_panic7), %o0
623	call	panic
624	  or	%o0, %lo(sfmmu_panic7), %o0
625
6267:
627	retl
628	  mov	%g0, %o0			! %o0 = ret = 0
629
6300:
631	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
632#endif /* DEBUG */
633
634	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
635
636	! load global mmu_ctxp info
637	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
638
639#ifdef sun4v
640	/* During suspend on sun4v, context domains can be temporary removed */
641	brz,a,pn       %o3, 0f
642	  nop
643#endif
644
645        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
646
647	! load global mmu_ctxp gnum
648	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
649
650#ifdef DEBUG
651	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
652	bne,pt	%xcc, 3f
653	  nop
654
655	sethi   %hi(panicstr), %g1	! test if panicstr is already set
656        ldx     [%g1 + %lo(panicstr)], %g1
657        tst     %g1
658        bnz,pn  %icc, 1f
659          nop
660
661	sethi	%hi(sfmmu_panic8), %o0
662	call	panic
663	  or	%o0, %lo(sfmmu_panic8), %o0
6641:
665	retl
666	  mov	%g0, %o0			! %o0 = ret = 0
6673:
668#endif
669
670	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
671
672	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
673	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
674
675	/*
676	 * %g5 = sfmmu gnum returned
677	 * %g6 = sfmmu cnum returned
678	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
679	 * %g4 = scratch
680	 *
681	 * Fast path code, do a quick check.
682	 */
683	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
684
685	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
686	bne,pt	%icc, 1f			! valid hat cnum, check gnum
687	  nop
688
689	! cnum == INVALID, check allocflag
690	mov	%g0, %g4	! %g4 = ret = 0
691	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
692	  mov	%g6, %o1
693
694	! (invalid HAT cnum) && (allocflag == 1)
695	ba,pt	%icc, 2f
696	  nop
697#ifdef sun4v
6980:
699	set	INVALID_CONTEXT, %o1
700	membar	#LoadStore|#StoreStore
701	ba,pt	%icc, 8f
702	  mov   %g0, %g4                ! %g4 = ret = 0
703#endif
7041:
705	! valid HAT cnum, check gnum
706	cmp	%g5, %o4
707	mov	1, %g4				!%g4 = ret = 1
708	be,a,pt	%icc, 8f			! gnum unchanged, go to done
709	  mov	%g6, %o1
710
7112:
712	/*
713	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
714	 * followed by the 'slow path' code.
715	 */
716	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7173:
718	brz	%g3, 5f
719	  nop
7204:
721	brnz,a,pt       %g3, 4b				! spin if lock is 1
722	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
723	ba	%xcc, 3b				! retry the lock
724	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
725
7265:
727	membar  #LoadLoad
728	/*
729	 * %g5 = sfmmu gnum returned
730	 * %g6 = sfmmu cnum returned
731	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
732	 * %g4 = scratch
733	 */
734	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
735
736	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
737	bne,pt	%icc, 1f			! valid hat cnum, check gnum
738	  nop
739
740	! cnum == INVALID, check allocflag
741	mov	%g0, %g4	! %g4 = ret = 0
742	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
743	  mov	%g6, %o1
744
745	! (invalid HAT cnum) && (allocflag == 1)
746	ba,pt	%icc, 6f
747	  nop
7481:
749	! valid HAT cnum, check gnum
750	cmp	%g5, %o4
751	mov	1, %g4				! %g4 = ret  = 1
752	be,a,pt	%icc, 2f			! gnum unchanged, go to done
753	  mov	%g6, %o1
754
755	ba,pt	%icc, 6f
756	  nop
7572:
758	membar  #LoadStore|#StoreStore
759	ba,pt %icc, 8f
760	  clrb  [%o0 + SFMMU_CTX_LOCK]
7616:
762	/*
763	 * We get here if we do not have a valid context, or
764	 * the HAT gnum does not match global gnum. We hold
765	 * sfmmu_ctx_lock spinlock. Allocate that context.
766	 *
767	 * %o3 = mmu_ctxp
768	 */
769	add	%o3, MMU_CTX_CNUM, %g3
770	ld	[%o3 + MMU_CTX_NCTXS], %g4
771
772	/*
773         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
774         * %g3 = mmu cnum address
775	 * %g4 = mmu nctxs
776	 *
777	 * %o0 = sfmmup
778	 * %o1 = mmu current cnum value (used as new cnum)
779	 * %o4 = mmu gnum
780	 *
781	 * %o5 = scratch
782	 */
783	ld	[%g3], %o1
7840:
785	cmp	%o1, %g4
786	bl,a,pt %icc, 1f
787	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
788
789	/*
790	 * cnum reachs max, bail, so wrap around can be performed later.
791	 */
792	set	INVALID_CONTEXT, %o1
793	mov	%g0, %g4		! %g4 = ret = 0
794
795	membar  #LoadStore|#StoreStore
796	ba,pt	%icc, 8f
797	  clrb	[%o0 + SFMMU_CTX_LOCK]
7981:
799	! %g3 = addr of mmu_ctxp->cnum
800	! %o5 = mmu_ctxp->cnum + 1
801	cas	[%g3], %o1, %o5
802	cmp	%o1, %o5
803	bne,a,pn %xcc, 0b	! cas failed
804	  ld	[%g3], %o1
805
806#ifdef DEBUG
807        set	MAX_SFMMU_CTX_VAL, %o5
808	cmp	%o1, %o5
809	ble,pt %icc, 2f
810	  nop
811
812	sethi	%hi(sfmmu_panic9), %o0
813	call	panic
814	  or	%o0, %lo(sfmmu_panic9), %o0
8152:
816#endif
817	! update hat gnum and cnum
818	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
819	or	%o4, %o1, %o4
820	stx	%o4, [%g2 + SFMMU_CTXS]
821
822	membar  #LoadStore|#StoreStore
823	clrb	[%o0 + SFMMU_CTX_LOCK]
824
825	mov	1, %g4			! %g4 = ret = 1
8268:
827	/*
828	 * program the secondary context register
829	 *
830	 * %o1 = cnum
831	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
832	 */
833
834	/*
835	 * When we come here and context is invalid, we want to set both
836	 * private and shared ctx regs to INVALID. In order to
837	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
838	 * so that private ctx reg will be set to invalid.
839	 * Note that on sun4v values written to private context register are
840	 * automatically written to corresponding shared context register as
841	 * well. On sun4u SET_SECCTX() will invalidate shared context register
842	 * when it sets a private secondary context register.
843	 */
844
845	cmp	%o1, INVALID_CONTEXT
846	be,a,pn	%icc, 9f
847	  clr	%g1
8489:
849
850#ifdef	sun4u
851	ldub	[%o0 + SFMMU_CEXT], %o2
852	sll	%o2, CTXREG_EXT_SHIFT, %o2
853	or	%o1, %o2, %o1
854#endif /* sun4u */
855
856	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
857
858        retl
859          mov   %g4, %o0                        ! %o0 = ret
860
861	SET_SIZE(sfmmu_alloc_ctx)
862
863	ENTRY_NP(sfmmu_modifytte)
864	ldx	[%o2], %g3			/* current */
865	ldx	[%o0], %g1			/* original */
8662:
867	ldx	[%o1], %g2			/* modified */
868	cmp	%g2, %g3			/* is modified = current? */
869	be,a,pt	%xcc,1f				/* yes, don't write */
870	stx	%g3, [%o0]			/* update new original */
871	casx	[%o2], %g1, %g2
872	cmp	%g1, %g2
873	be,pt	%xcc, 1f			/* cas succeeded - return */
874	  nop
875	ldx	[%o2], %g3			/* new current */
876	stx	%g3, [%o0]			/* save as new original */
877	ba,pt	%xcc, 2b
878	  mov	%g3, %g1
8791:	retl
880	membar	#StoreLoad
881	SET_SIZE(sfmmu_modifytte)
882
883	ENTRY_NP(sfmmu_modifytte_try)
884	ldx	[%o1], %g2			/* modified */
885	ldx	[%o2], %g3			/* current */
886	ldx	[%o0], %g1			/* original */
887	cmp	%g3, %g2			/* is modified = current? */
888	be,a,pn %xcc,1f				/* yes, don't write */
889	mov	0, %o1				/* as if cas failed. */
890
891	casx	[%o2], %g1, %g2
892	membar	#StoreLoad
893	cmp	%g1, %g2
894	movne	%xcc, -1, %o1			/* cas failed. */
895	move	%xcc, 1, %o1			/* cas succeeded. */
8961:
897	stx	%g2, [%o0]			/* report "current" value */
898	retl
899	mov	%o1, %o0
900	SET_SIZE(sfmmu_modifytte_try)
901
902	ENTRY_NP(sfmmu_copytte)
903	ldx	[%o0], %g1
904	retl
905	stx	%g1, [%o1]
906	SET_SIZE(sfmmu_copytte)
907
908
909	/*
910	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
911	 * %o0 = TSB base address (in), pointer to TSB entry (out)
912	 * %o1 = vaddr (in)
913	 * %o2 = vpshift (in)
914	 * %o3 = tsb size code (in)
915	 * %o4 = scratch register
916	 */
917	ENTRY_NP(sfmmu_get_tsbe)
918	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
919	retl
920	nop
921	SET_SIZE(sfmmu_get_tsbe)
922
923	/*
924	 * Return a TSB tag for the given va.
925	 * %o0 = va (in/clobbered)
926	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
927	 */
928	ENTRY_NP(sfmmu_make_tsbtag)
929	retl
930	srln	%o0, TTARGET_VA_SHIFT, %o0
931	SET_SIZE(sfmmu_make_tsbtag)
932
933#endif /* lint */
934
935/*
936 * Other sfmmu primitives
937 */
938
939
940#if defined (lint)
941void
942sfmmu_patch_ktsb(void)
943{
944}
945
946void
947sfmmu_kpm_patch_tlbm(void)
948{
949}
950
951void
952sfmmu_kpm_patch_tsbm(void)
953{
954}
955
956void
957sfmmu_patch_shctx(void)
958{
959}
960
961/* ARGSUSED */
962void
963sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
964{
965}
966
967/* ARGSUSED */
968void
969sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
970{
971}
972
973/* ARGSUSED */
974void
975sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
976{
977}
978
979/* ARGSUSED */
980void
981sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
982{
983}
984
985#else /* lint */
986
987#define	I_SIZE		4
988
989	ENTRY_NP(sfmmu_fix_ktlb_traptable)
990	/*
991	 * %o0 = start of patch area
992	 * %o1 = size code of TSB to patch
993	 * %o3 = scratch
994	 */
995	/* fix sll */
996	ld	[%o0], %o3			/* get sll */
997	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
998	st	%o3, [%o0]			/* write sll */
999	flush	%o0
1000	/* fix srl */
1001	add	%o0, I_SIZE, %o0		/* goto next instr. */
1002	ld	[%o0], %o3			/* get srl */
1003	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
1004	st	%o3, [%o0]			/* write srl */
1005	retl
1006	flush	%o0
1007	SET_SIZE(sfmmu_fix_ktlb_traptable)
1008
1009	ENTRY_NP(sfmmu_fixup_ktsbbase)
1010	/*
1011	 * %o0 = start of patch area
1012	 * %o5 = kernel virtual or physical tsb base address
1013	 * %o2, %o3 are used as scratch registers.
1014	 */
1015	/* fixup sethi instruction */
1016	ld	[%o0], %o3
1017	srl	%o5, 10, %o2			! offset is bits 32:10
1018	or	%o3, %o2, %o3			! set imm22
1019	st	%o3, [%o0]
1020	/* fixup offset of lduw/ldx */
1021	add	%o0, I_SIZE, %o0		! next instr
1022	ld	[%o0], %o3
1023	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
1024	or	%o3, %o2, %o3
1025	st	%o3, [%o0]
1026	retl
1027	flush	%o0
1028	SET_SIZE(sfmmu_fixup_ktsbbase)
1029
1030	ENTRY_NP(sfmmu_fixup_setx)
1031	/*
1032	 * %o0 = start of patch area
1033	 * %o4 = 64 bit value to patch
1034	 * %o2, %o3 are used as scratch registers.
1035	 *
1036	 * Note: Assuming that all parts of the instructions which need to be
1037	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1038	 *
1039	 * Note the implementation of setx which is being patched is as follows:
1040	 *
1041	 * sethi   %hh(RUNTIME_PATCH), tmp
1042	 * sethi   %lm(RUNTIME_PATCH), dest
1043	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1044	 * or      dest, %lo(RUNTIME_PATCH), dest
1045	 * sllx    tmp, 32, tmp
1046	 * nop
1047	 * or      tmp, dest, dest
1048	 *
1049	 * which differs from the implementation in the
1050	 * "SPARC Architecture Manual"
1051	 */
1052	/* fixup sethi instruction */
1053	ld	[%o0], %o3
1054	srlx	%o4, 42, %o2			! bits [63:42]
1055	or	%o3, %o2, %o3			! set imm22
1056	st	%o3, [%o0]
1057	/* fixup sethi instruction */
1058	add	%o0, I_SIZE, %o0		! next instr
1059	ld	[%o0], %o3
1060	sllx	%o4, 32, %o2			! clear upper bits
1061	srlx	%o2, 42, %o2			! bits [31:10]
1062	or	%o3, %o2, %o3			! set imm22
1063	st	%o3, [%o0]
1064	/* fixup or instruction */
1065	add	%o0, I_SIZE, %o0		! next instr
1066	ld	[%o0], %o3
1067	srlx	%o4, 32, %o2			! bits [63:32]
1068	and	%o2, 0x3ff, %o2			! bits [41:32]
1069	or	%o3, %o2, %o3			! set imm
1070	st	%o3, [%o0]
1071	/* fixup or instruction */
1072	add	%o0, I_SIZE, %o0		! next instr
1073	ld	[%o0], %o3
1074	and	%o4, 0x3ff, %o2			! bits [9:0]
1075	or	%o3, %o2, %o3			! set imm
1076	st	%o3, [%o0]
1077	retl
1078	flush	%o0
1079	SET_SIZE(sfmmu_fixup_setx)
1080
1081	ENTRY_NP(sfmmu_fixup_or)
1082	/*
1083	 * %o0 = start of patch area
1084	 * %o4 = 32 bit value to patch
1085	 * %o2, %o3 are used as scratch registers.
1086	 * Note: Assuming that all parts of the instructions which need to be
1087	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1088	 */
1089	ld	[%o0], %o3
1090	and	%o4, 0x3ff, %o2			! bits [9:0]
1091	or	%o3, %o2, %o3			! set imm
1092	st	%o3, [%o0]
1093	retl
1094	flush	%o0
1095	SET_SIZE(sfmmu_fixup_or)
1096
1097	ENTRY_NP(sfmmu_fixup_shiftx)
1098	/*
1099	 * %o0 = start of patch area
1100	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1101	 * %o2, %o3 are used as scratch registers.
1102	 *
1103	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1104	 * so we do a simple add.  The caller must be careful to prevent
1105	 * overflow, which could easily occur if the initial value is nonzero!
1106	 */
1107	ld	[%o0], %o3			! %o3 = instruction to patch
1108	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1109	add	%o2, %o4, %o2			! %o2 = new imm value
1110	andn	%o3, 0x3f, %o3			! clear old imm value
1111	and	%o2, 0x3f, %o2			! truncate new imm value
1112	or	%o3, %o2, %o3			! set new imm value
1113	st	%o3, [%o0]			! store updated instruction
1114	retl
1115	flush	%o0
1116	SET_SIZE(sfmmu_fixup_shiftx)
1117
1118	ENTRY_NP(sfmmu_fixup_mmu_asi)
1119	/*
1120	 * Patch imm_asi of all ldda instructions in the MMU
1121	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1122	 * starting from the itlb miss handler (trap 0x64).
1123	 * %o0 = address of tt[0,1]_itlbmiss
1124	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1125	 * %o3 = number of instructions to search
1126	 * %o4 = reserved by caller: called from leaf routine
1127	 */
11281:	ldsw	[%o0], %o2			! load instruction to %o2
1129	brgez,pt %o2, 2f
1130	  srl	%o2, 30, %o5
1131	btst	1, %o5				! test bit 30; skip if not set
1132	bz,pt	%icc, 2f
1133	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1134	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1135	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1136	brnz,pt	%o5, 2f				! skip if not a match
1137	  or	%o2, %o1, %o2			! or in imm_asi
1138	st	%o2, [%o0]			! write patched instruction
11392:	dec	%o3
1140	brnz,a,pt %o3, 1b			! loop until we're done
1141	  add	%o0, I_SIZE, %o0
1142	retl
1143	flush	%o0
1144	SET_SIZE(sfmmu_fixup_mmu_asi)
1145
1146	/*
1147	 * Patch immediate ASI used to access the TSB in the
1148	 * trap table.
1149	 * inputs: %o0 = value of ktsb_phys
1150	 */
1151	ENTRY_NP(sfmmu_patch_mmu_asi)
1152	mov	%o7, %o4			! save return pc in %o4
1153	mov	ASI_QUAD_LDD_PHYS, %o3		! set QUAD_LDD_PHYS by default
1154
1155#ifdef sun4v
1156
1157	/*
1158	 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
1159	 */
1160
1161	brnz,pt %o0, do_patch
1162	nop
1163
1164	sethi	%hi(sfmmu_panic11), %o0
1165	call	panic
1166	  or	%o0, %lo(sfmmu_panic11), %o0
1167do_patch:
1168
1169#else /* sun4v */
1170	/*
1171	 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
1172	 * Note that ASI_NQUAD_LD is not defined/used for sun4v
1173	 */
1174	movrz	%o0, ASI_NQUAD_LD, %o3
1175
1176#endif /* sun4v */
1177
1178	sll	%o3, 5, %o1			! imm_asi offset
1179	mov	6, %o3				! number of instructions
1180	sethi	%hi(dktsb), %o0			! to search
1181	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1182	  or	%o0, %lo(dktsb), %o0
1183	mov	6, %o3				! number of instructions
1184	sethi	%hi(dktsb4m), %o0		! to search
1185	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1186	  or	%o0, %lo(dktsb4m), %o0
1187	mov	6, %o3				! number of instructions
1188	sethi	%hi(iktsb), %o0			! to search
1189	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1190	  or	%o0, %lo(iktsb), %o0
1191	mov	6, %o3				! number of instructions
1192	sethi	%hi(iktsb4m), %o0		! to search
1193	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1194	  or	%o0, %lo(iktsb4m), %o0
1195	mov	%o4, %o7			! retore return pc -- leaf
1196	retl
1197	nop
1198	SET_SIZE(sfmmu_patch_mmu_asi)
1199
1200
1201	ENTRY_NP(sfmmu_patch_ktsb)
1202	/*
1203	 * We need to fix iktsb, dktsb, et. al.
1204	 */
1205	save	%sp, -SA(MINFRAME), %sp
1206	set	ktsb_phys, %o1
1207	ld	[%o1], %o4
1208	set	ktsb_base, %o5
1209	set	ktsb4m_base, %l1
1210	brz,pt	%o4, 1f
1211	  nop
1212	set	ktsb_pbase, %o5
1213	set	ktsb4m_pbase, %l1
12141:
1215	sethi	%hi(ktsb_szcode), %o1
1216	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1217
1218	sethi	%hi(iktsb), %o0
1219	call	sfmmu_fix_ktlb_traptable
1220	  or	%o0, %lo(iktsb), %o0
1221
1222	sethi	%hi(dktsb), %o0
1223	call	sfmmu_fix_ktlb_traptable
1224	  or	%o0, %lo(dktsb), %o0
1225
1226	sethi	%hi(ktsb4m_szcode), %o1
1227	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1228
1229	sethi	%hi(iktsb4m), %o0
1230	call	sfmmu_fix_ktlb_traptable
1231	  or	%o0, %lo(iktsb4m), %o0
1232
1233	sethi	%hi(dktsb4m), %o0
1234	call	sfmmu_fix_ktlb_traptable
1235	  or	%o0, %lo(dktsb4m), %o0
1236
1237#ifndef sun4v
1238	mov	ASI_N, %o2
1239	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1240	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1241	sethi	%hi(tsb_kernel_patch_asi), %o0
1242	call	sfmmu_fixup_or
1243	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1244#endif /* !sun4v */
1245
1246	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1247
1248	sethi	%hi(dktsbbase), %o0
1249	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1250	  or	%o0, %lo(dktsbbase), %o0
1251
1252	sethi	%hi(iktsbbase), %o0
1253	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1254	  or	%o0, %lo(iktsbbase), %o0
1255
1256	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1257	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1258	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1259
1260#ifdef sun4v
1261	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1262	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1263	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1264#endif /* sun4v */
1265
1266	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1267
1268	sethi	%hi(dktsb4mbase), %o0
1269	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1270	  or	%o0, %lo(dktsb4mbase), %o0
1271
1272	sethi	%hi(iktsb4mbase), %o0
1273	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1274	  or	%o0, %lo(iktsb4mbase), %o0
1275
1276	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1277	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1278	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1279
1280#ifdef sun4v
1281	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1282	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1283	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1284#endif /* sun4v */
1285
1286	set	ktsb_szcode, %o4
1287	ld	[%o4], %o4
1288	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1289	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1290	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1291
1292#ifdef sun4v
1293	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1294	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1295	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1296#endif /* sun4v */
1297
1298	set	ktsb4m_szcode, %o4
1299	ld	[%o4], %o4
1300	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1301	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1302	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1303
1304#ifdef sun4v
1305	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1306	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1307	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1308#endif /* sun4v */
1309
1310	ret
1311	restore
1312	SET_SIZE(sfmmu_patch_ktsb)
1313
1314	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1315	/*
1316	 * Fixup trap handlers in common segkpm case.  This is reserved
1317	 * for future use should kpm TSB be changed to be other than the
1318	 * kernel TSB.
1319	 */
1320	retl
1321	nop
1322	SET_SIZE(sfmmu_kpm_patch_tlbm)
1323
1324	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1325	/*
1326	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1327	 * in the case where we are using large pages for
1328	 * seg_kpm (and hence must probe the second TSB for
1329	 * seg_kpm VAs)
1330	 */
1331	set	dktsb4m_kpmcheck_small, %o0
1332	MAKE_NOP_INSTR(%o1)
1333	st	%o1, [%o0]
1334	flush	%o0
1335	retl
1336	nop
1337	SET_SIZE(sfmmu_kpm_patch_tsbm)
1338
1339	ENTRY_NP(sfmmu_patch_utsb)
1340#ifdef UTSB_PHYS
1341	retl
1342	nop
1343#else /* UTSB_PHYS */
1344	/*
1345	 * We need to hot patch utsb_vabase and utsb4m_vabase
1346	 */
1347	save	%sp, -SA(MINFRAME), %sp
1348
1349	/* patch value of utsb_vabase */
1350	set	utsb_vabase, %o1
1351	ldx	[%o1], %o4
1352	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1353	call	sfmmu_fixup_setx
1354	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1355	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1356	call	sfmmu_fixup_setx
1357	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1358	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1359	call	sfmmu_fixup_setx
1360	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1361
1362	/* patch value of utsb4m_vabase */
1363	set	utsb4m_vabase, %o1
1364	ldx	[%o1], %o4
1365	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1366	call	sfmmu_fixup_setx
1367	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1368	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1369	call	sfmmu_fixup_setx
1370	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1371	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1372	call	sfmmu_fixup_setx
1373	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1374
1375	/*
1376	 * Patch TSB base register masks and shifts if needed.
1377	 * By default the TSB base register contents are set up for 4M slab.
1378	 * If we're using a smaller slab size and reserved VA range we need
1379	 * to patch up those values here.
1380	 */
1381	set	tsb_slab_shift, %o1
1382	set	MMU_PAGESHIFT4M, %o4
1383	lduw	[%o1], %o3
1384	subcc	%o4, %o3, %o4
1385	bz,pt	%icc, 1f
1386	  /* delay slot safe */
1387
1388	/* patch reserved VA range size if needed. */
1389	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1390	call	sfmmu_fixup_shiftx
1391	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1392	call	sfmmu_fixup_shiftx
1393	  add	%o0, I_SIZE, %o0
1394	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1395	call	sfmmu_fixup_shiftx
1396	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1397	call	sfmmu_fixup_shiftx
1398	  add	%o0, I_SIZE, %o0
13991:
1400	/* patch TSBREG_VAMASK used to set up TSB base register */
1401	set	tsb_slab_mask, %o1
1402	ldx	[%o1], %o4
1403	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1404	call	sfmmu_fixup_or
1405	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1406	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1407	call	sfmmu_fixup_or
1408	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1409
1410	ret
1411	restore
1412#endif /* UTSB_PHYS */
1413	SET_SIZE(sfmmu_patch_utsb)
1414
1415	ENTRY_NP(sfmmu_patch_shctx)
1416#ifdef sun4u
1417	retl
1418	  nop
1419#else /* sun4u */
1420	set	sfmmu_shctx_cpu_mondo_patch, %o0
1421	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1422	st	%o1, [%o0]
1423	flush	%o0
1424	MAKE_NOP_INSTR(%o1)
1425	add	%o0, I_SIZE, %o0	! next instr
1426	st	%o1, [%o0]
1427	flush	%o0
1428
1429	set	sfmmu_shctx_user_rtt_patch, %o0
1430	st      %o1, [%o0]		! nop 1st instruction
1431	flush	%o0
1432	add     %o0, I_SIZE, %o0
1433	st      %o1, [%o0]		! nop 2nd instruction
1434	flush	%o0
1435	add     %o0, I_SIZE, %o0
1436	st      %o1, [%o0]		! nop 3rd instruction
1437	flush	%o0
1438	add     %o0, I_SIZE, %o0
1439	st      %o1, [%o0]		! nop 4th instruction
1440	flush	%o0
1441	add     %o0, I_SIZE, %o0
1442	st      %o1, [%o0]		! nop 5th instruction
1443	flush	%o0
1444	add     %o0, I_SIZE, %o0
1445	st      %o1, [%o0]		! nop 6th instruction
1446	retl
1447	flush	%o0
1448#endif /* sun4u */
1449	SET_SIZE(sfmmu_patch_shctx)
1450
1451	/*
1452	 * Routine that loads an entry into a tsb using virtual addresses.
1453	 * Locking is required since all cpus can use the same TSB.
1454	 * Note that it is no longer required to have a valid context
1455	 * when calling this function.
1456	 */
1457	ENTRY_NP(sfmmu_load_tsbe)
1458	/*
1459	 * %o0 = pointer to tsbe to load
1460	 * %o1 = tsb tag
1461	 * %o2 = virtual pointer to TTE
1462	 * %o3 = 1 if physical address in %o0 else 0
1463	 */
1464	rdpr	%pstate, %o5
1465#ifdef DEBUG
1466	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1467#endif /* DEBUG */
1468
1469	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1470
1471	SETUP_TSB_ASI(%o3, %g3)
1472	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
1473
1474	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1475
1476	retl
1477	membar	#StoreStore|#StoreLoad
1478	SET_SIZE(sfmmu_load_tsbe)
1479
1480	/*
1481	 * Flush TSB of a given entry if the tag matches.
1482	 */
1483	ENTRY(sfmmu_unload_tsbe)
1484	/*
1485	 * %o0 = pointer to tsbe to be flushed
1486	 * %o1 = tag to match
1487	 * %o2 = 1 if physical address in %o0 else 0
1488	 */
1489	SETUP_TSB_ASI(%o2, %g1)
1490	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1491	retl
1492	membar	#StoreStore|#StoreLoad
1493	SET_SIZE(sfmmu_unload_tsbe)
1494
1495	/*
1496	 * Routine that loads a TTE into the kpm TSB from C code.
1497	 * Locking is required since kpm TSB is shared among all CPUs.
1498	 */
1499	ENTRY_NP(sfmmu_kpm_load_tsb)
1500	/*
1501	 * %o0 = vaddr
1502	 * %o1 = ttep
1503	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1504	 */
1505	rdpr	%pstate, %o5			! %o5 = saved pstate
1506#ifdef DEBUG
1507	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1508#endif /* DEBUG */
1509	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1510
1511#ifndef sun4v
1512	sethi	%hi(ktsb_phys), %o4
1513	mov	ASI_N, %o3
1514	ld	[%o4 + %lo(ktsb_phys)], %o4
1515	movrnz	%o4, ASI_MEM, %o3
1516	mov	%o3, %asi
1517#endif /* !sun4v */
1518	mov	%o0, %g1			! %g1 = vaddr
1519
1520	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1521	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1522	/* %g2 = tsbep, %g1 clobbered */
1523
1524	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1525	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1526	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
1527
1528	wrpr	%g0, %o5, %pstate		! enable interrupts
1529	retl
1530	  membar #StoreStore|#StoreLoad
1531	SET_SIZE(sfmmu_kpm_load_tsb)
1532
1533	/*
1534	 * Routine that shoots down a TTE in the kpm TSB or in the
1535	 * kernel TSB depending on virtpg. Locking is required since
1536	 * kpm/kernel TSB is shared among all CPUs.
1537	 */
1538	ENTRY_NP(sfmmu_kpm_unload_tsb)
1539	/*
1540	 * %o0 = vaddr
1541	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1542	 */
1543#ifndef sun4v
1544	sethi	%hi(ktsb_phys), %o4
1545	mov	ASI_N, %o3
1546	ld	[%o4 + %lo(ktsb_phys)], %o4
1547	movrnz	%o4, ASI_MEM, %o3
1548	mov	%o3, %asi
1549#endif /* !sun4v */
1550	mov	%o0, %g1			! %g1 = vaddr
1551
1552	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1553	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1554	/* %g2 = tsbep, %g1 clobbered */
1555
1556	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1557	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1558	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1559
1560	retl
1561	  membar	#StoreStore|#StoreLoad
1562	SET_SIZE(sfmmu_kpm_unload_tsb)
1563
1564#endif /* lint */
1565
1566
1567#if defined (lint)
1568
1569/*ARGSUSED*/
1570pfn_t
1571sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1572{ return(0); }
1573
1574#else /* lint */
1575
1576	ENTRY_NP(sfmmu_ttetopfn)
1577	ldx	[%o0], %g1			/* read tte */
1578	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1579	/*
1580	 * g1 = pfn
1581	 */
1582	retl
1583	mov	%g1, %o0
1584	SET_SIZE(sfmmu_ttetopfn)
1585
1586#endif /* !lint */
1587
1588/*
1589 * These macros are used to update global sfmmu hme hash statistics
1590 * in perf critical paths. It is only enabled in debug kernels or
1591 * if SFMMU_STAT_GATHER is defined
1592 */
1593#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1594#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1595	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1596	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1597	cmp	tmp1, hatid						;\
1598	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1599	set	sfmmu_global_stat, tmp1					;\
1600	add	tmp1, tmp2, tmp1					;\
1601	ld	[tmp1], tmp2						;\
1602	inc	tmp2							;\
1603	st	tmp2, [tmp1]
1604
1605#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1606	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1607	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1608	cmp	tmp1, hatid						;\
1609	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1610	set	sfmmu_global_stat, tmp1					;\
1611	add	tmp1, tmp2, tmp1					;\
1612	ld	[tmp1], tmp2						;\
1613	inc	tmp2							;\
1614	st	tmp2, [tmp1]
1615
1616
1617#else /* DEBUG || SFMMU_STAT_GATHER */
1618
1619#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1620
1621#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1622
1623#endif  /* DEBUG || SFMMU_STAT_GATHER */
1624
1625/*
1626 * This macro is used to update global sfmmu kstas in non
1627 * perf critical areas so they are enabled all the time
1628 */
1629#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1630	sethi	%hi(sfmmu_global_stat), tmp1				;\
1631	add	tmp1, statname, tmp1					;\
1632	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1633	inc	tmp2							;\
1634	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1635
1636/*
1637 * These macros are used to update per cpu stats in non perf
1638 * critical areas so they are enabled all the time
1639 */
1640#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1641	ld	[tsbarea + stat], tmp1					;\
1642	inc	tmp1							;\
1643	st	tmp1, [tsbarea + stat]
1644
1645/*
1646 * These macros are used to update per cpu stats in non perf
1647 * critical areas so they are enabled all the time
1648 */
1649#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1650	lduh	[tsbarea + stat], tmp1					;\
1651	inc	tmp1							;\
1652	stuh	tmp1, [tsbarea + stat]
1653
1654#if defined(KPM_TLBMISS_STATS_GATHER)
1655	/*
1656	 * Count kpm dtlb misses separately to allow a different
1657	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1658	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1659	 */
1660#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1661	brgez	tagacc, label	/* KPM VA? */				;\
1662	nop								;\
1663	CPU_INDEX(tmp1, tsbma)						;\
1664	sethi	%hi(kpmtsbm_area), tsbma				;\
1665	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1666	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1667	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1668	/* VA range check */						;\
1669	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1670	cmp	tagacc, val						;\
1671	blu,pn	%xcc, label						;\
1672	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1673	cmp	tagacc, tmp1						;\
1674	bgeu,pn	%xcc, label						;\
1675	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1676	inc	val							;\
1677	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1678label:
1679#else
1680#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1681#endif	/* KPM_TLBMISS_STATS_GATHER */
1682
1683#if defined (lint)
1684/*
1685 * The following routines are jumped to from the mmu trap handlers to do
1686 * the setting up to call systrap.  They are separate routines instead of
1687 * being part of the handlers because the handlers would exceed 32
1688 * instructions and since this is part of the slow path the jump
1689 * cost is irrelevant.
1690 */
1691void
1692sfmmu_pagefault(void)
1693{
1694}
1695
1696void
1697sfmmu_mmu_trap(void)
1698{
1699}
1700
1701void
1702sfmmu_window_trap(void)
1703{
1704}
1705
1706void
1707sfmmu_kpm_exception(void)
1708{
1709}
1710
1711#else /* lint */
1712
1713#ifdef	PTL1_PANIC_DEBUG
1714	.seg	".data"
1715	.global	test_ptl1_panic
1716test_ptl1_panic:
1717	.word	0
1718	.align	8
1719
1720	.seg	".text"
1721	.align	4
1722#endif	/* PTL1_PANIC_DEBUG */
1723
1724
1725	ENTRY_NP(sfmmu_pagefault)
1726	SET_GL_REG(1)
1727	USE_ALTERNATE_GLOBALS(%g5)
1728	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1729	rdpr	%tt, %g6
1730	cmp	%g6, FAST_IMMU_MISS_TT
1731	be,a,pn	%icc, 1f
1732	  mov	T_INSTR_MMU_MISS, %g3
1733	cmp	%g6, T_INSTR_MMU_MISS
1734	be,a,pn	%icc, 1f
1735	  mov	T_INSTR_MMU_MISS, %g3
1736	mov	%g5, %g2
1737	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1738	cmp	%g6, FAST_DMMU_MISS_TT
1739	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1740	cmp	%g6, T_DATA_MMU_MISS
1741	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1742
1743#ifdef  PTL1_PANIC_DEBUG
1744	/* check if we want to test the tl1 panic */
1745	sethi	%hi(test_ptl1_panic), %g4
1746	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1747	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1748	cmp	%g1, %g0
1749	bne,a,pn %icc, ptl1_panic
1750	  or	%g0, PTL1_BAD_DEBUG, %g1
1751#endif	/* PTL1_PANIC_DEBUG */
17521:
1753	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1754	/*
1755	 * g2 = tag access reg
1756	 * g3.l = type
1757	 * g3.h = 0
1758	 */
1759	sethi	%hi(trap), %g1
1760	or	%g1, %lo(trap), %g1
17612:
1762	ba,pt	%xcc, sys_trap
1763	  mov	-1, %g4
1764	SET_SIZE(sfmmu_pagefault)
1765
1766	ENTRY_NP(sfmmu_mmu_trap)
1767	SET_GL_REG(1)
1768	USE_ALTERNATE_GLOBALS(%g5)
1769	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1770	rdpr	%tt, %g6
1771	cmp	%g6, FAST_IMMU_MISS_TT
1772	be,a,pn	%icc, 1f
1773	  mov	T_INSTR_MMU_MISS, %g3
1774	cmp	%g6, T_INSTR_MMU_MISS
1775	be,a,pn	%icc, 1f
1776	  mov	T_INSTR_MMU_MISS, %g3
1777	mov	%g5, %g2
1778	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1779	cmp	%g6, FAST_DMMU_MISS_TT
1780	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1781	cmp	%g6, T_DATA_MMU_MISS
1782	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17831:
1784	/*
1785	 * g2 = tag access reg
1786	 * g3 = type
1787	 */
1788	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1789	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1790	ba,pt	%xcc, sys_trap
1791	  mov	-1, %g4
1792	/*NOTREACHED*/
1793	SET_SIZE(sfmmu_mmu_trap)
1794
1795	ENTRY_NP(sfmmu_suspend_tl)
1796	SET_GL_REG(1)
1797	USE_ALTERNATE_GLOBALS(%g5)
1798	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1799	rdpr	%tt, %g6
1800	cmp	%g6, FAST_IMMU_MISS_TT
1801	be,a,pn	%icc, 1f
1802	  mov	T_INSTR_MMU_MISS, %g3
1803	mov	%g5, %g2
1804	cmp	%g6, FAST_DMMU_MISS_TT
1805	move	%icc, T_DATA_MMU_MISS, %g3
1806	movne	%icc, T_DATA_PROT, %g3
18071:
1808	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1809	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1810	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1811	ba,pt	%xcc, sys_trap
1812	  mov	PIL_15, %g4
1813	/*NOTREACHED*/
1814	SET_SIZE(sfmmu_suspend_tl)
1815
1816	/*
1817	 * No %g registers in use at this point.
1818	 */
1819	ENTRY_NP(sfmmu_window_trap)
1820	rdpr	%tpc, %g1
1821#ifdef sun4v
1822#ifdef DEBUG
1823	/* We assume previous %gl was 1 */
1824	rdpr	%tstate, %g4
1825	srlx	%g4, TSTATE_GL_SHIFT, %g4
1826	and	%g4, TSTATE_GL_MASK, %g4
1827	cmp	%g4, 1
1828	bne,a,pn %icc, ptl1_panic
1829	  mov	PTL1_BAD_WTRAP, %g1
1830#endif /* DEBUG */
1831	/* user miss at tl>1. better be the window handler or user_rtt */
1832	/* in user_rtt? */
1833	set	rtt_fill_start, %g4
1834	cmp	%g1, %g4
1835	blu,pn %xcc, 6f
1836	 .empty
1837	set	rtt_fill_end, %g4
1838	cmp	%g1, %g4
1839	bgeu,pn %xcc, 6f
1840	 nop
1841	set	fault_rtt_fn1, %g1
1842	wrpr	%g0, %g1, %tnpc
1843	ba,a	7f
18446:
1845	! must save this trap level before descending trap stack
1846	! no need to save %tnpc, either overwritten or discarded
1847	! already got it: rdpr	%tpc, %g1
1848	rdpr	%tstate, %g6
1849	rdpr	%tt, %g7
1850	! trap level saved, go get underlying trap type
1851	rdpr	%tl, %g5
1852	sub	%g5, 1, %g3
1853	wrpr	%g3, %tl
1854	rdpr	%tt, %g2
1855	wrpr	%g5, %tl
1856	! restore saved trap level
1857	wrpr	%g1, %tpc
1858	wrpr	%g6, %tstate
1859	wrpr	%g7, %tt
1860#else /* sun4v */
1861	/* user miss at tl>1. better be the window handler */
1862	rdpr	%tl, %g5
1863	sub	%g5, 1, %g3
1864	wrpr	%g3, %tl
1865	rdpr	%tt, %g2
1866	wrpr	%g5, %tl
1867#endif /* sun4v */
1868	and	%g2, WTRAP_TTMASK, %g4
1869	cmp	%g4, WTRAP_TYPE
1870	bne,pn	%xcc, 1f
1871	 nop
1872	/* tpc should be in the trap table */
1873	set	trap_table, %g4
1874	cmp	%g1, %g4
1875	blt,pn %xcc, 1f
1876	 .empty
1877	set	etrap_table, %g4
1878	cmp	%g1, %g4
1879	bge,pn %xcc, 1f
1880	 .empty
1881	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1882	add	%g1, WTRAP_FAULTOFF, %g1
1883	wrpr	%g0, %g1, %tnpc
18847:
1885	/*
1886	 * some wbuf handlers will call systrap to resolve the fault
1887	 * we pass the trap type so they figure out the correct parameters.
1888	 * g5 = trap type, g6 = tag access reg
1889	 */
1890
1891	/*
1892	 * only use g5, g6, g7 registers after we have switched to alternate
1893	 * globals.
1894	 */
1895	SET_GL_REG(1)
1896	USE_ALTERNATE_GLOBALS(%g5)
1897	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1898	rdpr	%tt, %g7
1899	cmp	%g7, FAST_IMMU_MISS_TT
1900	be,a,pn	%icc, ptl1_panic
1901	  mov	PTL1_BAD_WTRAP, %g1
1902	cmp	%g7, T_INSTR_MMU_MISS
1903	be,a,pn	%icc, ptl1_panic
1904	  mov	PTL1_BAD_WTRAP, %g1
1905	mov	T_DATA_PROT, %g5
1906	cmp	%g7, FAST_DMMU_MISS_TT
1907	move	%icc, T_DATA_MMU_MISS, %g5
1908	cmp	%g7, T_DATA_MMU_MISS
1909	move	%icc, T_DATA_MMU_MISS, %g5
1910	! XXXQ AGS re-check out this one
1911	done
19121:
1913	CPU_PADDR(%g1, %g4)
1914	add	%g1, CPU_TL1_HDLR, %g1
1915	lda	[%g1]ASI_MEM, %g4
1916	brnz,a,pt %g4, sfmmu_mmu_trap
1917	  sta	%g0, [%g1]ASI_MEM
1918	ba,pt	%icc, ptl1_panic
1919	  mov	PTL1_BAD_TRAP, %g1
1920	SET_SIZE(sfmmu_window_trap)
1921
1922	ENTRY_NP(sfmmu_kpm_exception)
1923	/*
1924	 * We have accessed an unmapped segkpm address or a legal segkpm
1925	 * address which is involved in a VAC alias conflict prevention.
1926	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1927	 * set. If it is, we will instead note that a fault has occurred
1928	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1929	 * a "retry"). This will step over the faulting instruction.
1930	 * Note that this means that a legal segkpm address involved in
1931	 * a VAC alias conflict prevention (a rare case to begin with)
1932	 * cannot be used in DTrace.
1933	 */
1934	CPU_INDEX(%g1, %g2)
1935	set	cpu_core, %g2
1936	sllx	%g1, CPU_CORE_SHIFT, %g1
1937	add	%g1, %g2, %g1
1938	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1939	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1940	bz	0f
1941	or	%g2, CPU_DTRACE_BADADDR, %g2
1942	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1943	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1944	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1945	done
19460:
1947	TSTAT_CHECK_TL1(1f, %g1, %g2)
19481:
1949	SET_GL_REG(1)
1950	USE_ALTERNATE_GLOBALS(%g5)
1951	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1952	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1953	/*
1954	 * g2=tagacc g3.l=type g3.h=0
1955	 */
1956	sethi	%hi(trap), %g1
1957	or	%g1, %lo(trap), %g1
1958	ba,pt	%xcc, sys_trap
1959	mov	-1, %g4
1960	SET_SIZE(sfmmu_kpm_exception)
1961
1962#endif /* lint */
1963
1964#if defined (lint)
1965
1966void
1967sfmmu_tsb_miss(void)
1968{
1969}
1970
1971void
1972sfmmu_kpm_dtsb_miss(void)
1973{
1974}
1975
1976void
1977sfmmu_kpm_dtsb_miss_small(void)
1978{
1979}
1980
1981#else /* lint */
1982
1983#if (IMAP_SEG != 0)
1984#error - ism_map->ism_seg offset is not zero
1985#endif
1986
1987/*
1988 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1989 * tlb miss and branches to label "ismhit". If this is not an ISM
1990 * process or an ISM tlb miss it falls thru.
1991 *
1992 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1993 * this process.
1994 * If so, it will branch to label "ismhit".  If not, it will fall through.
1995 *
1996 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1997 * so that any other threads of this process will not try and walk the ism
1998 * maps while they are being changed.
1999 *
2000 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2001 *       will make sure of that. This means we can terminate our search on
2002 *       the first zero mapping we find.
2003 *
2004 * Parameters:
2005 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
2006 * tsbmiss	= address of tsb miss area (in)
2007 * ismseg	= contents of ism_seg for this ism map (out)
2008 * ismhat	= physical address of imap_ismhat for this ism map (out)
2009 * tmp1		= scratch reg (CLOBBERED)
2010 * tmp2		= scratch reg (CLOBBERED)
2011 * tmp3		= scratch reg (CLOBBERED)
2012 * label:    temporary labels
2013 * ismhit:   label where to jump to if an ism dtlb miss
2014 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2015 */
2016#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2017	label, ismhit)							\
2018	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
2019	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
2020	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
2021label/**/1:								;\
2022	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
2023	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
2024label/**/2:								;\
2025	brz,pt  ismseg, label/**/3		/* no mapping */	;\
2026	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
2027	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
2028	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
2029	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
2030	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
2031	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
2032	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
2033	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
2034	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
2035	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
2036	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
2037									;\
2038	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
2039	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2040	cmp	ismhat, tmp1						;\
2041	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2042	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2043									;\
2044	add	tmp3, IBLK_NEXTPA, tmp1					;\
2045	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2046	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2047	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2048label/**/3:
2049
2050/*
2051 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2052 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2053 * Parameters:
2054 * tagacc = reg containing virtual address
2055 * hatid = reg containing sfmmu pointer
2056 * hmeshift = constant/register to shift vaddr to obtain vapg
2057 * hmebp = register where bucket pointer will be stored
2058 * vapg = register where virtual page will be stored
2059 * tmp1, tmp2 = tmp registers
2060 */
2061
2062
2063#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2064	vapg, label, tmp1, tmp2)					\
2065	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2066	brnz,a,pt tmp1, label/**/1					;\
2067	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2068	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2069	ba,pt	%xcc, label/**/2					;\
2070	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2071label/**/1:								;\
2072	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2073label/**/2:								;\
2074	srlx	tagacc, hmeshift, vapg					;\
2075	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2076	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2077	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2078	add	hmebp, tmp1, hmebp
2079
2080/*
2081 * hashtag includes bspage + hashno (64 bits).
2082 */
2083
2084#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2085	sllx	vapg, hmeshift, vapg					;\
2086	mov	hashno, hblktag						;\
2087	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
2088	or	vapg, hblktag, hblktag
2089
2090/*
2091 * Function to traverse hmeblk hash link list and find corresponding match.
2092 * The search is done using physical pointers. It returns the physical address
2093 * pointer to the hmeblk that matches with the tag provided.
2094 * Parameters:
2095 * hmebp	= register that points to hme hash bucket, also used as
2096 *		  tmp reg (clobbered)
2097 * hmeblktag	= register with hmeblk tag match
2098 * hatid	= register with hatid
2099 * hmeblkpa	= register where physical ptr will be stored
2100 * tmp1		= tmp reg
2101 * label: temporary label
2102 */
2103
2104#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, 	\
2105	tmp1, label)							\
2106	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2107	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2108	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2109label/**/1:								;\
2110	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2111	be,pn   %xcc, label/**/2					;\
2112	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2113	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2114	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2115	add	hmebp, CLONGSIZE, hmebp					;\
2116	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2117	xor	tmp1, hmeblktag, tmp1					;\
2118	xor	hmebp, hatid, hmebp					;\
2119	or	hmebp, tmp1, hmebp					;\
2120	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2121	  add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2122	ba,pt	%xcc, label/**/1					;\
2123	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2124label/**/2:
2125
2126/*
2127 * Function to traverse hmeblk hash link list and find corresponding match.
2128 * The search is done using physical pointers. It returns the physical address
2129 * pointer to the hmeblk that matches with the tag
2130 * provided.
2131 * Parameters:
2132 * hmeblktag	= register with hmeblk tag match (rid field is 0)
2133 * hatid	= register with hatid (pointer to SRD)
2134 * hmeblkpa	= register where physical ptr will be stored
2135 * tmp1		= tmp reg
2136 * tmp2		= tmp reg
2137 * label: temporary label
2138 */
2139
2140#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea,	\
2141	tmp1, tmp2, label)			 			\
2142label/**/1:								;\
2143	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2144	be,pn   %xcc, label/**/4					;\
2145	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
2146	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
2147	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2148	add	tmp2, CLONGSIZE, tmp2					;\
2149	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
2150	xor	tmp1, hmeblktag, tmp1					;\
2151	xor	tmp2, hatid, tmp2					;\
2152	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
2153	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2154label/**/2:								;\
2155	ba,pt	%xcc, label/**/1					;\
2156	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
2157label/**/3:								;\
2158	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
2159	bgeu,pt	%xcc, label/**/2					;\
2160	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2161	and	tmp1, BT_ULMASK, tmp2					;\
2162	srlx	tmp1, BT_ULSHIFT, tmp1					;\
2163	sllx	tmp1, CLONGSHIFT, tmp1					;\
2164	add	tsbarea, tmp1, tmp1					;\
2165	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
2166	srlx	tmp1, tmp2, tmp1					;\
2167	btst	0x1, tmp1						;\
2168	bz,pn	%xcc, label/**/2					;\
2169	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2170label/**/4:
2171
2172#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2173#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2174#endif
2175
2176/*
2177 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2178 * he offset for the corresponding hment.
2179 * Parameters:
2180 * In:
2181 *	vaddr = register with virtual address
2182 *	hmeblkpa = physical pointer to hme_blk
2183 * Out:
2184 *	hmentoff = register where hment offset will be stored
2185 *	hmemisc = hblk_misc
2186 * Scratch:
2187 *	tmp1
2188 */
2189#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2190	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2191	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2192	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2193	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2194	  or	%g0, HMEBLK_HME1, hmentoff				;\
2195	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2196	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2197	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2198	add	tmp1, HMEBLK_HME1, hmentoff				;\
2199label1:
2200
2201/*
2202 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2203 *
2204 * tagacc	= (pseudo-)tag access register (in)
2205 * hatid	= sfmmu pointer for TSB miss (in)
2206 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2207 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2208 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2209 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2210 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2211 *		  for this page size.
2212 * hashno	= constant/register hash number
2213 * tmp		= temp value - clobbered
2214 * label	= temporary label for branching within macro.
2215 * foundlabel	= label to jump to when tte is found.
2216 * suspendlabel= label to jump to when tte is suspended.
2217 * exitlabel	= label to jump to when tte is not found.
2218 *
2219 */
2220#define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
2221		 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
2222									;\
2223	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2224	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2225	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2226		hmeblkpa, label/**/5, hmemisc, tmp)			;\
2227									;\
2228	/*								;\
2229	 * tagacc = tagacc						;\
2230	 * hatid = hatid						;\
2231	 * tsbarea = tsbarea						;\
2232	 * tte   = hmebp (hme bucket pointer)				;\
2233	 * hmeblkpa  = vapg  (virtual page)				;\
2234	 * hmemisc, tmp = scratch					;\
2235	 */								;\
2236	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2237	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2238									;\
2239	/*								;\
2240	 * tagacc = tagacc						;\
2241	 * hatid = hatid						;\
2242	 * tte   = hmebp						;\
2243	 * hmeblkpa  = CLOBBERED					;\
2244	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2245	 * tmp  = scratch						;\
2246	 */								;\
2247	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2248	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, 	 		\
2249		tsbarea, tagacc, label/**/1)				;\
2250	/*								;\
2251	 * tagacc = CLOBBERED						;\
2252	 * tte = CLOBBERED						;\
2253	 * hmeblkpa = hmeblkpa						;\
2254	 * tmp = scratch						;\
2255	 */								;\
2256	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2257	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
2258	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2259	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2260	  nop								;\
2261label/**/4:								;\
2262	/*								;\
2263	 * We have found the hmeblk containing the hment.		;\
2264	 * Now we calculate the corresponding tte.			;\
2265	 *								;\
2266	 * tagacc = tagacc						;\
2267	 * hatid = hatid						;\
2268	 * tte   = clobbered						;\
2269	 * hmeblkpa  = hmeblkpa						;\
2270	 * hmemisc  = hblktag						;\
2271	 * tmp = scratch						;\
2272	 */								;\
2273	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2274		label/**/2)						;\
2275									;\
2276	/*								;\
2277	 * tagacc = tagacc						;\
2278	 * hatid = hmentoff						;\
2279	 * tte   = clobbered						;\
2280	 * hmeblkpa  = hmeblkpa						;\
2281	 * hmemisc  = hblk_misc						;\
2282	 * tmp = scratch						;\
2283	 */								;\
2284									;\
2285	add	hatid, SFHME_TTE, hatid					;\
2286	add	hmeblkpa, hatid, hmeblkpa				;\
2287	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2288	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2289	set	TTE_SUSPEND, hatid					;\
2290	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2291	btst	tte, hatid						;\
2292	bz,pt	%xcc, foundlabel					;\
2293	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2294									;\
2295	/*								;\
2296	 * Mapping is suspended, so goto suspend label.			;\
2297	 */								;\
2298	ba,pt	%xcc, suspendlabel					;\
2299	  nop
2300
2301/*
2302 * GET_SHME_TTE is similar to GET_TTE() except it searches
2303 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2304 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2305 * either 0 (not part of scd) or 1 (part of scd).
2306 */
2307#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, 	\
2308		hmeshift, hashno, tmp, label, foundlabel,		\
2309		suspendlabel, exitlabel)				\
2310									;\
2311	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2312	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2313	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2314		hmeblkpa, label/**/5, hmemisc, tmp)			;\
2315									;\
2316	/*								;\
2317	 * tagacc = tagacc						;\
2318	 * hatid = hatid						;\
2319	 * tsbarea = tsbarea						;\
2320	 * tte   = hmebp (hme bucket pointer)				;\
2321	 * hmeblkpa  = vapg  (virtual page)				;\
2322	 * hmemisc, tmp = scratch					;\
2323	 */								;\
2324	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2325									;\
2326	/*								;\
2327	 * tagacc = tagacc						;\
2328	 * hatid = hatid						;\
2329	 * tsbarea = tsbarea						;\
2330	 * tte   = hmebp						;\
2331	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2332	 * hmeblkpa  = CLOBBERED					;\
2333	 * tmp = scratch						;\
2334	 */								;\
2335	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2336									;\
2337	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2338	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2339	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2340									;\
2341label/**/8:								;\
2342	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa,			\
2343		tsbarea, tagacc, tte, label/**/1)			;\
2344	/*								;\
2345	 * tagacc = CLOBBERED						;\
2346	 * tte = CLOBBERED						;\
2347	 * hmeblkpa = hmeblkpa						;\
2348	 * tmp = scratch						;\
2349	 */								;\
2350	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2351	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
2352	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2353	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2354	  nop								;\
2355label/**/4:								;\
2356	/*								;\
2357	 * We have found the hmeblk containing the hment.		;\
2358	 * Now we calculate the corresponding tte.			;\
2359	 *								;\
2360	 * tagacc = tagacc						;\
2361	 * hatid = hatid						;\
2362	 * tte   = clobbered						;\
2363	 * hmeblkpa  = hmeblkpa						;\
2364	 * hmemisc  = hblktag						;\
2365	 * tsbarea = tsbmiss area					;\
2366	 * tmp = scratch						;\
2367	 */								;\
2368	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2369		label/**/2)						;\
2370									;\
2371	/*								;\
2372	 * tagacc = tagacc						;\
2373	 * hatid = hmentoff						;\
2374	 * tte = clobbered						;\
2375	 * hmeblkpa  = hmeblkpa						;\
2376	 * hmemisc  = hblk_misc						;\
2377	 * tsbarea = tsbmiss area					;\
2378	 * tmp = scratch						;\
2379	 */								;\
2380									;\
2381	add	hatid, SFHME_TTE, hatid					;\
2382	add	hmeblkpa, hatid, hmeblkpa				;\
2383	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2384	brlz,pt tte, label/**/6						;\
2385	  nop								;\
2386	btst	HBLK_SZMASK, hmemisc					;\
2387	bnz,a,pt %icc, label/**/7					;\
2388	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2389									;\
2390	/*								;\
2391 	 * We found an invalid 8K tte in shme.				;\
2392	 * it may not belong to shme's region since			;\
2393	 * region size/alignment granularity is 8K but different	;\
2394	 * regions don't share hmeblks. Continue the search.		;\
2395	 */								;\
2396	sub	hmeblkpa, hatid, hmeblkpa				;\
2397	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2398	srlx	tagacc, hmeshift, tte					;\
2399	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2400	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2401	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2402	ba,a,pt	%xcc, label/**/8					;\
2403label/**/6:								;\
2404	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2405	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2406label/**/7:								;\
2407	set	TTE_SUSPEND, hatid					;\
2408	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2409	btst	tte, hatid						;\
2410	bz,pt	%xcc, foundlabel					;\
2411	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2412									;\
2413	/*								;\
2414	 * Mapping is suspended, so goto suspend label.			;\
2415	 */								;\
2416	ba,pt	%xcc, suspendlabel					;\
2417	  nop
2418
2419	/*
2420	 * KERNEL PROTECTION HANDLER
2421	 *
2422	 * g1 = tsb8k pointer register (clobbered)
2423	 * g2 = tag access register (ro)
2424	 * g3 - g7 = scratch registers
2425	 *
2426	 * Note: This function is patched at runtime for performance reasons.
2427	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2428	 */
2429	ENTRY_NP(sfmmu_kprot_trap)
2430	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2431sfmmu_kprot_patch_ktsb_base:
2432	RUNTIME_PATCH_SETX(%g1, %g6)
2433	/* %g1 = contents of ktsb_base or ktsb_pbase */
2434sfmmu_kprot_patch_ktsb_szcode:
2435	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2436
2437	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2438	! %g1 = First TSB entry pointer, as TSB miss handler expects
2439
2440	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2441sfmmu_kprot_patch_ktsb4m_base:
2442	RUNTIME_PATCH_SETX(%g3, %g6)
2443	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2444sfmmu_kprot_patch_ktsb4m_szcode:
2445	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2446
2447	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2448	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2449
2450        CPU_TSBMISS_AREA(%g6, %g7)
2451        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2452	ba,pt	%xcc, sfmmu_tsb_miss_tt
2453	  nop
2454
2455	/*
2456	 * USER PROTECTION HANDLER
2457	 *
2458	 * g1 = tsb8k pointer register (ro)
2459	 * g2 = tag access register (ro)
2460	 * g3 = faulting context (clobbered, currently not used)
2461	 * g4 - g7 = scratch registers
2462	 */
2463	ALTENTRY(sfmmu_uprot_trap)
2464#ifdef sun4v
2465	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2466	/* %g1 = first TSB entry ptr now, %g2 preserved */
2467
2468	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2469	brlz,pt %g3, 9f				/* check for 2nd TSB */
2470	  nop
2471
2472	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2473	/* %g3 = second TSB entry ptr now, %g2 preserved */
2474
2475#else /* sun4v */
2476#ifdef UTSB_PHYS
2477	/* g1 = first TSB entry ptr */
2478	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2479	brlz,pt %g3, 9f			/* check for 2nd TSB */
2480	  nop
2481
2482	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2483	/* %g3 = second TSB entry ptr now, %g2 preserved */
2484#else /* UTSB_PHYS */
2485	brgez,pt %g1, 9f		/* check for 2nd TSB */
2486	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2487
2488	mov	%g2, %g7
2489	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2490	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2491	mov	%g1, %g7
2492	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2493#endif /* UTSB_PHYS */
2494#endif /* sun4v */
24959:
2496	CPU_TSBMISS_AREA(%g6, %g7)
2497	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2498	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2499	  nop
2500
2501	/*
2502	 * Kernel 8K page iTLB miss.  We also get here if we took a
2503	 * fast instruction access mmu miss trap while running in
2504	 * invalid context.
2505	 *
2506	 * %g1 = 8K TSB pointer register (not used, clobbered)
2507	 * %g2 = tag access register (used)
2508	 * %g3 = faulting context id (used)
2509	 * %g7 = TSB tag to match (used)
2510	 */
2511	.align	64
2512	ALTENTRY(sfmmu_kitlb_miss)
2513	brnz,pn %g3, tsb_tl0_noctxt
2514	  nop
2515
2516	/* kernel miss */
2517	/* get kernel tsb pointer */
2518	/* we patch the next set of instructions at run time */
2519	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2520iktsbbase:
2521	RUNTIME_PATCH_SETX(%g4, %g5)
2522	/* %g4 = contents of ktsb_base or ktsb_pbase */
2523
2524iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2525	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2526	or	%g4, %g1, %g1			! form tsb ptr
2527	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2528	cmp	%g4, %g7
2529	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2530	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2531
2532	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2533	bz,pn	%icc, exec_fault
2534	  nop
2535	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2536	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2537	retry
2538
2539iktsb4mbase:
2540        RUNTIME_PATCH_SETX(%g4, %g6)
2541        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2542iktsb4m:
2543	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2544        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2545	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2546	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2547	cmp	%g4, %g7
2548	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2549	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2550	bz,pn	%icc, exec_fault
2551	  nop
2552	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2553	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2554	retry
2555
2556	/*
2557	 * Kernel dTLB miss.  We also get here if we took a fast data
2558	 * access mmu miss trap while running in invalid context.
2559	 *
2560	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2561	 *	We select the TSB miss handler to branch to depending on
2562	 *	the virtual address of the access.  In the future it may
2563	 *	be desirable to separate kpm TTEs into their own TSB,
2564	 *	in which case all that needs to be done is to set
2565	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2566	 *	early in the miss if we detect a kpm VA to a new handler.
2567	 *
2568	 * %g1 = 8K TSB pointer register (not used, clobbered)
2569	 * %g2 = tag access register (used)
2570	 * %g3 = faulting context id (used)
2571	 */
2572	.align	64
2573	ALTENTRY(sfmmu_kdtlb_miss)
2574	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2575	  nop
2576
2577	/* Gather some stats for kpm misses in the TLB. */
2578	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2579	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2580
2581	/*
2582	 * Get first TSB offset and look for 8K/64K/512K mapping
2583	 * using the 8K virtual page as the index.
2584	 *
2585	 * We patch the next set of instructions at run time;
2586	 * any changes here require sfmmu_patch_ktsb changes too.
2587	 */
2588dktsbbase:
2589	RUNTIME_PATCH_SETX(%g7, %g6)
2590	/* %g7 = contents of ktsb_base or ktsb_pbase */
2591
2592dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2593	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2594
2595	/*
2596	 * At this point %g1 is our index into the TSB.
2597	 * We just masked off enough bits of the VA depending
2598	 * on our TSB size code.
2599	 */
2600	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2601	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2602	cmp	%g6, %g4			! compare tag
2603	bne,pn	%xcc, dktsb4m_kpmcheck_small
2604	  add	%g7, %g1, %g1			/* form tsb ptr */
2605	TT_TRACE(trace_tsbhit)
2606	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2607	/* trapstat expects tte in %g5 */
2608	retry
2609
2610	/*
2611	 * If kpm is using large pages, the following instruction needs
2612	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2613	 * so that we will probe the 4M TSB regardless of the VA.  In
2614	 * the case kpm is using small pages, we know no large kernel
2615	 * mappings are located above 0x80000000.00000000 so we skip the
2616	 * probe as an optimization.
2617	 */
2618dktsb4m_kpmcheck_small:
2619	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2620	  /* delay slot safe, below */
2621
2622	/*
2623	 * Get second TSB offset and look for 4M mapping
2624	 * using 4M virtual page as the TSB index.
2625	 *
2626	 * Here:
2627	 * %g1 = 8K TSB pointer.  Don't squash it.
2628	 * %g2 = tag access register (we still need it)
2629	 */
2630	srlx	%g2, MMU_PAGESHIFT4M, %g3
2631
2632	/*
2633	 * We patch the next set of instructions at run time;
2634	 * any changes here require sfmmu_patch_ktsb changes too.
2635	 */
2636dktsb4mbase:
2637	RUNTIME_PATCH_SETX(%g7, %g6)
2638	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2639dktsb4m:
2640	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2641	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2642
2643	/*
2644	 * At this point %g3 is our index into the TSB.
2645	 * We just masked off enough bits of the VA depending
2646	 * on our TSB size code.
2647	 */
2648	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2649	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2650	cmp	%g6, %g4			! compare tag
2651
2652dktsb4m_tsbmiss:
2653	bne,pn	%xcc, dktsb4m_kpmcheck
2654	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2655	TT_TRACE(trace_tsbhit)
2656	/* we don't check TTE size here since we assume 4M TSB is separate */
2657	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2658	/* trapstat expects tte in %g5 */
2659	retry
2660
2661	/*
2662	 * So, we failed to find a valid TTE to match the faulting
2663	 * address in either TSB.  There are a few cases that could land
2664	 * us here:
2665	 *
2666	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2667	 *    to sfmmu_tsb_miss_tt to handle the miss.
2668	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2669	 *    4M TSB.  Let segkpm handle it.
2670	 *
2671	 * Note that we shouldn't land here in the case of a kpm VA when
2672	 * kpm_smallpages is active -- we handled that case earlier at
2673	 * dktsb4m_kpmcheck_small.
2674	 *
2675	 * At this point:
2676	 *  g1 = 8K-indexed primary TSB pointer
2677	 *  g2 = tag access register
2678	 *  g3 = 4M-indexed secondary TSB pointer
2679	 */
2680dktsb4m_kpmcheck:
2681	cmp	%g2, %g0
2682	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2683	  nop
2684	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2685	  nop
2686
2687#ifdef sun4v
2688	/*
2689	 * User instruction miss w/ single TSB.
2690	 * The first probe covers 8K, 64K, and 512K page sizes,
2691	 * because 64K and 512K mappings are replicated off 8K
2692	 * pointer.
2693	 *
2694	 * g1 = tsb8k pointer register
2695	 * g2 = tag access register
2696	 * g3 - g6 = scratch registers
2697	 * g7 = TSB tag to match
2698	 */
2699	.align	64
2700	ALTENTRY(sfmmu_uitlb_fastpath)
2701
2702	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2703	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2704	ba,pn	%xcc, sfmmu_tsb_miss_tt
2705	  mov	-1, %g3
2706
2707	/*
2708	 * User data miss w/ single TSB.
2709	 * The first probe covers 8K, 64K, and 512K page sizes,
2710	 * because 64K and 512K mappings are replicated off 8K
2711	 * pointer.
2712	 *
2713	 * g1 = tsb8k pointer register
2714	 * g2 = tag access register
2715	 * g3 - g6 = scratch registers
2716	 * g7 = TSB tag to match
2717	 */
2718	.align 64
2719	ALTENTRY(sfmmu_udtlb_fastpath)
2720
2721	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2722	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2723	ba,pn	%xcc, sfmmu_tsb_miss_tt
2724	  mov	-1, %g3
2725
2726	/*
2727	 * User instruction miss w/ multiple TSBs (sun4v).
2728	 * The first probe covers 8K, 64K, and 512K page sizes,
2729	 * because 64K and 512K mappings are replicated off 8K
2730	 * pointer.  Second probe covers 4M page size only.
2731	 *
2732	 * Just like sfmmu_udtlb_slowpath, except:
2733	 *   o Uses ASI_ITLB_IN
2734	 *   o checks for execute permission
2735	 *   o No ISM prediction.
2736	 *
2737	 * g1 = tsb8k pointer register
2738	 * g2 = tag access register
2739	 * g3 - g6 = scratch registers
2740	 * g7 = TSB tag to match
2741	 */
2742	.align	64
2743	ALTENTRY(sfmmu_uitlb_slowpath)
2744
2745	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2746	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2747	/* g4 - g5 = clobbered here */
2748
2749	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2750	/* g1 = first TSB pointer, g3 = second TSB pointer */
2751	srlx	%g2, TAG_VALO_SHIFT, %g7
2752	PROBE_2ND_ITSB(%g3, %g7)
2753	/* NOT REACHED */
2754
2755#else /* sun4v */
2756
2757	/*
2758	 * User instruction miss w/ multiple TSBs (sun4u).
2759	 * The first probe covers 8K, 64K, and 512K page sizes,
2760	 * because 64K and 512K mappings are replicated off 8K
2761	 * pointer.  Probe of 1st TSB has already been done prior to entry
2762	 * into this routine. For the UTSB_PHYS case we probe up to 3
2763	 * valid other TSBs in the following order:
2764	 * 1) shared TSB for 4M-256M pages
2765	 * 2) private TSB for 4M-256M pages
2766	 * 3) shared TSB for 8K-512K pages
2767	 *
2768	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
2769	 * 4M-256M pages.
2770	 *
2771	 * Just like sfmmu_udtlb_slowpath, except:
2772	 *   o Uses ASI_ITLB_IN
2773	 *   o checks for execute permission
2774	 *   o No ISM prediction.
2775	 *
2776	 * g1 = tsb8k pointer register
2777	 * g2 = tag access register
2778	 * g4 - g6 = scratch registers
2779	 * g7 = TSB tag to match
2780	 */
2781	.align	64
2782	ALTENTRY(sfmmu_uitlb_slowpath)
2783
2784#ifdef UTSB_PHYS
2785
2786       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2787        brlz,pt %g6, 1f
2788          nop
2789        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2790        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
27911:
2792        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2793        brlz,pt %g3, 2f
2794          nop
2795        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2796        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
27972:
2798        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2799        brlz,pt %g6, sfmmu_tsb_miss_tt
2800          nop
2801        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2802        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
2803        ba,pn   %xcc, sfmmu_tsb_miss_tt
2804          nop
2805
2806#else /* UTSB_PHYS */
2807	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2808	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2809	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2810	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2811	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2812	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2813       /* g1 = first TSB pointer, g3 = second TSB pointer */
2814        srlx    %g2, TAG_VALO_SHIFT, %g7
2815        PROBE_2ND_ITSB(%g3, %g7, isynth)
2816	ba,pn	%xcc, sfmmu_tsb_miss_tt
2817	  nop
2818
2819#endif /* UTSB_PHYS */
2820#endif /* sun4v */
2821
2822#if defined(sun4u) && defined(UTSB_PHYS)
2823
2824        /*
2825	 * We come here for ism predict DTLB_MISS case or if
2826	 * if probe in first TSB failed.
2827         */
2828
2829        .align 64
2830        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
2831
2832	/*
2833         * g1 = tsb8k pointer register
2834         * g2 = tag access register
2835         * g4 - %g6 = scratch registers
2836         * g7 = TSB tag to match
2837	 */
2838
2839	/*
2840	 * ISM non-predict probe order
2841         * probe 1ST_TSB (8K index)
2842         * probe 2ND_TSB (4M index)
2843         * probe 4TH_TSB (4M index)
2844         * probe 3RD_TSB (8K index)
2845	 *
2846	 * We already probed first TSB in DTLB_MISS handler.
2847	 */
2848
2849        /*
2850         * Private 2ND TSB 4M-256 pages
2851         */
2852	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2853	brlz,pt %g3, 1f
2854	  nop
2855        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2856        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2857
2858	/*
2859	 * Shared Context 4TH TSB 4M-256 pages
2860	 */
28611:
2862	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2863	brlz,pt %g6, 2f
2864	  nop
2865        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2866        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
2867
2868        /*
2869         * Shared Context 3RD TSB 8K-512K pages
2870         */
28712:
2872	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2873	brlz,pt %g6, sfmmu_tsb_miss_tt
2874	  nop
2875        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2876        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
2877	ba,pn	%xcc, sfmmu_tsb_miss_tt
2878	  nop
2879
2880	.align 64
2881        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
2882
2883	/*
2884         * g1 = tsb8k pointer register
2885         * g2 = tag access register
2886         * g4 - g6 = scratch registers
2887         * g7 = TSB tag to match
2888	 */
2889
2890	/*
2891	 * ISM predict probe order
2892	 * probe 4TH_TSB (4M index)
2893	 * probe 2ND_TSB (4M index)
2894	 * probe 1ST_TSB (8K index)
2895	 * probe 3RD_TSB (8K index)
2896
2897	/*
2898	 * Shared Context 4TH TSB 4M-256 pages
2899	 */
2900	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2901	brlz,pt %g6, 4f
2902	  nop
2903        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2904        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
2905
2906        /*
2907         * Private 2ND TSB 4M-256 pages
2908         */
29094:
2910	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2911	brlz,pt %g3, 5f
2912	  nop
2913        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2914        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
2915
29165:
2917        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
2918
2919        /*
2920         * Shared Context 3RD TSB 8K-512K pages
2921         */
2922	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2923	brlz,pt %g6, 6f
2924	  nop
2925        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2926        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
29276:
2928	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
2929	  nop
2930
2931#else /* sun4u && UTSB_PHYS */
2932
2933       .align 64
2934        ALTENTRY(sfmmu_udtlb_slowpath)
2935
2936	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2937	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2938	  mov	%g1, %g3
2939
2940udtlb_miss_probefirst:
2941	/*
2942	 * g1 = 8K TSB pointer register
2943	 * g2 = tag access register
2944	 * g3 = (potentially) second TSB entry ptr
2945	 * g6 = ism pred.
2946	 * g7 = vpg_4m
2947	 */
2948#ifdef sun4v
2949	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2950	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2951
2952	/*
2953	 * Here:
2954	 *   g1 = first TSB pointer
2955	 *   g2 = tag access reg
2956	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2957	 */
2958	brgz,pn	%g6, sfmmu_tsb_miss_tt
2959	  nop
2960#else /* sun4v */
2961	mov	%g1, %g4
2962	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2963	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2964
2965	/*
2966	 * Here:
2967	 *   g1 = first TSB pointer
2968	 *   g2 = tag access reg
2969	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2970	 */
2971	brgz,pn	%g6, sfmmu_tsb_miss_tt
2972	  nop
2973	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2974	/* fall through in 8K->4M probe order */
2975#endif /* sun4v */
2976
2977udtlb_miss_probesecond:
2978	/*
2979	 * Look in the second TSB for the TTE
2980	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2981	 * g2 = tag access reg
2982	 * g3 = 8K TSB pointer register
2983	 * g6 = ism pred.
2984	 * g7 = vpg_4m
2985	 */
2986#ifdef sun4v
2987	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2988	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2989	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2990#else /* sun4v */
2991	mov	%g3, %g7
2992	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2993	/* %g2 clobbered, %g3 =second tsbe ptr */
2994	mov	MMU_TAG_ACCESS, %g2
2995	ldxa	[%g2]ASI_DMMU, %g2
2996#endif /* sun4v */
2997
2998	srlx	%g2, TAG_VALO_SHIFT, %g7
2999	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3000	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
3001	brgz,pn	%g6, udtlb_miss_probefirst
3002	  nop
3003
3004	/* fall through to sfmmu_tsb_miss_tt */
3005#endif /* sun4u && UTSB_PHYS */
3006
3007
3008	ALTENTRY(sfmmu_tsb_miss_tt)
3009	TT_TRACE(trace_tsbmiss)
3010	/*
3011	 * We get here if there is a TSB miss OR a write protect trap.
3012	 *
3013	 * g1 = First TSB entry pointer
3014	 * g2 = tag access register
3015	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
3016	 * g4 - g7 = scratch registers
3017	 */
3018
3019	ALTENTRY(sfmmu_tsb_miss)
3020
3021	/*
3022	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3023	 * point to trapstat's TSB miss return code (note that trapstat
3024	 * itself will patch the correct offset to add).
3025	 */
3026	rdpr	%tl, %g7
3027	cmp	%g7, 1
3028	ble,pt	%xcc, 0f
3029	  sethi	%hi(KERNELBASE), %g6
3030	rdpr	%tpc, %g7
3031	or	%g6, %lo(KERNELBASE), %g6
3032	cmp	%g7, %g6
3033	bgeu,pt	%xcc, 0f
3034	/* delay slot safe */
3035
3036	ALTENTRY(tsbmiss_trapstat_patch_point)
3037	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3038	wrpr	%g7, %tpc
3039	add	%g7, 4, %g7
3040	wrpr	%g7, %tnpc
30410:
3042	CPU_TSBMISS_AREA(%g6, %g7)
3043	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
3044	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
3045
3046	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
3047	brz,a,pn %g3, 1f			/* skip ahead if kernel */
3048	  ldn	[%g6 + TSBMISS_KHATID], %g7
3049	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
3050	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
3051
3052	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3053
3054	cmp	%g3, INVALID_CONTEXT
3055	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
3056	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3057
3058#if defined(sun4v) || defined(UTSB_PHYS)
3059        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
3060        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
3061        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
3062#endif /* sun4v || UTSB_PHYS */
3063
3064	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3065	/*
3066	 * The miss wasn't in an ISM segment.
3067	 *
3068	 * %g1 %g3, %g4, %g5, %g7 all clobbered
3069	 * %g2 = (pseudo) tag access
3070	 */
3071
3072	ba,pt	%icc, 2f
3073	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3074
30751:
3076	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3077	/*
3078	 * 8K and 64K hash.
3079	 */
30802:
3081
3082	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3083		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
3084		sfmmu_suspend_tl, tsb_512K)
3085	/* NOT REACHED */
3086
3087tsb_512K:
3088	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3089	brz,pn	%g5, 3f
3090	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3091	and	%g4, HAT_512K_FLAG, %g5
3092
3093	/*
3094	 * Note that there is a small window here where we may have
3095	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3096	 * flag yet, so we will skip searching the 512k hash list.
3097	 * In this case we will end up in pagefault which will find
3098	 * the mapping and return.  So, in this instance we will end up
3099	 * spending a bit more time resolving this TSB miss, but it can
3100	 * only happen once per process and even then, the chances of that
3101	 * are very small, so it's not worth the extra overhead it would
3102	 * take to close this window.
3103	 */
3104	brz,pn	%g5, tsb_4M
3105	  nop
31063:
3107	/*
3108	 * 512K hash
3109	 */
3110
3111	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3112		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
3113		sfmmu_suspend_tl, tsb_4M)
3114	/* NOT REACHED */
3115
3116tsb_4M:
3117	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3118	brz,pn	%g5, 4f
3119	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3120	and	%g4, HAT_4M_FLAG, %g5
3121	brz,pn	%g5, tsb_32M
3122	  nop
31234:
3124	/*
3125	 * 4M hash
3126	 */
3127
3128	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3129		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
3130		sfmmu_suspend_tl, tsb_32M)
3131	/* NOT REACHED */
3132
3133tsb_32M:
3134	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3135#ifdef	sun4v
3136        brz,pn	%g5, 6f
3137#else
3138	brz,pn  %g5, tsb_pagefault
3139#endif
3140	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3141	and	%g4, HAT_32M_FLAG, %g5
3142	brz,pn	%g5, tsb_256M
3143	  nop
31445:
3145	/*
3146	 * 32M hash
3147	 */
3148
3149	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3150		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
3151		sfmmu_suspend_tl, tsb_256M)
3152	/* NOT REACHED */
3153
3154#if defined(sun4u) && !defined(UTSB_PHYS)
3155#define tsb_shme        tsb_pagefault
3156#endif
3157tsb_256M:
3158	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3159	and	%g4, HAT_256M_FLAG, %g5
3160	brz,pn	%g5, tsb_shme
3161	  nop
31626:
3163	/*
3164	 * 256M hash
3165	 */
3166
3167	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3168	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
3169	    sfmmu_suspend_tl, tsb_shme)
3170	/* NOT REACHED */
3171
3172tsb_checktte:
3173	/*
3174	 * g1 = hblk_misc
3175	 * g2 = tagacc
3176	 * g3 = tte
3177	 * g4 = tte pa
3178	 * g6 = tsbmiss area
3179	 * g7 = hatid
3180	 */
3181	brlz,a,pt %g3, tsb_validtte
3182	  rdpr	%tt, %g7
3183
3184#if defined(sun4u) && !defined(UTSB_PHYS)
3185#undef tsb_shme
3186	ba      tsb_pagefault
3187	  nop
3188#else /* sun4u && !UTSB_PHYS */
3189
3190tsb_shme:
3191	/*
3192	 * g2 = tagacc
3193	 * g6 = tsbmiss area
3194	 */
3195	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3196	brz,pn	%g5, tsb_pagefault
3197	  nop
3198	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3199	brz,pn	%g7, tsb_pagefault
3200	  nop
3201
3202	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3203		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
3204		sfmmu_suspend_tl, tsb_shme_512K)
3205	/* NOT REACHED */
3206
3207tsb_shme_512K:
3208	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3209	and	%g4, HAT_512K_FLAG, %g5
3210	brz,pn	%g5, tsb_shme_4M
3211	  nop
3212
3213	/*
3214	 * 512K hash
3215	 */
3216
3217	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3218		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
3219		sfmmu_suspend_tl, tsb_shme_4M)
3220	/* NOT REACHED */
3221
3222tsb_shme_4M:
3223	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3224	and	%g4, HAT_4M_FLAG, %g5
3225	brz,pn	%g5, tsb_shme_32M
3226	  nop
32274:
3228	/*
3229	 * 4M hash
3230	 */
3231	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3232		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
3233		sfmmu_suspend_tl, tsb_shme_32M)
3234	/* NOT REACHED */
3235
3236tsb_shme_32M:
3237	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3238	and	%g4, HAT_32M_FLAG, %g5
3239	brz,pn	%g5, tsb_shme_256M
3240	  nop
3241
3242	/*
3243	 * 32M hash
3244	 */
3245
3246	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3247		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
3248		sfmmu_suspend_tl, tsb_shme_256M)
3249	/* NOT REACHED */
3250
3251tsb_shme_256M:
3252	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3253	and	%g4, HAT_256M_FLAG, %g5
3254	brz,pn	%g5, tsb_pagefault
3255	  nop
3256
3257	/*
3258	 * 256M hash
3259	 */
3260
3261	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3262	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
3263	    sfmmu_suspend_tl, tsb_pagefault)
3264	/* NOT REACHED */
3265
3266tsb_shme_checktte:
3267
3268	brgez,pn %g3, tsb_pagefault
3269	  rdpr	%tt, %g7
3270	/*
3271	 * g1 = ctx1 flag
3272	 * g3 = tte
3273	 * g4 = tte pa
3274	 * g6 = tsbmiss area
3275	 * g7 = tt
3276	 */
3277
3278	brz,pt  %g1, tsb_validtte
3279	  nop
3280	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3281	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3282	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3283
3284	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3285#endif /* sun4u && !UTSB_PHYS */
3286
3287tsb_validtte:
3288	/*
3289	 * g3 = tte
3290	 * g4 = tte pa
3291	 * g6 = tsbmiss area
3292	 * g7 = tt
3293	 */
3294
3295	/*
3296	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3297	 */
3298	cmp	%g7, FAST_PROT_TT
3299	bne,pt	%icc, 4f
3300	  nop
3301
3302	TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
3303	    tsb_protfault)
3304
3305	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3306#ifdef sun4v
3307	MMU_FAULT_STATUS_AREA(%g7)
3308	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
3309#else /* sun4v */
3310	mov     MMU_TAG_ACCESS, %g5
3311	ldxa    [%g5]ASI_DMMU, %g5
3312#endif /* sun4v */
3313	ba,pt	%xcc, tsb_update_tl1
3314	  nop
33154:
3316	/*
3317	 * If ITLB miss check exec bit.
3318	 * If not set treat as invalid TTE.
3319	 */
3320	cmp     %g7, T_INSTR_MMU_MISS
3321	be,pn	%icc, 5f
3322	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3323	cmp     %g7, FAST_IMMU_MISS_TT
3324	bne,pt %icc, 3f
3325	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
33265:
3327	bz,pn %icc, tsb_protfault
3328	  nop
3329
33303:
3331	/*
3332	 * Set reference bit if not already set
3333	 */
3334	TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
3335
3336	/*
3337	 * Now, load into TSB/TLB.  At this point:
3338	 * g3 = tte
3339	 * g4 = patte
3340	 * g6 = tsbmiss area
3341	 */
3342	rdpr	%tt, %g7
3343#ifdef sun4v
3344	MMU_FAULT_STATUS_AREA(%g2)
3345	cmp	%g7, T_INSTR_MMU_MISS
3346	be,a,pt	%icc, 9f
3347	  nop
3348	cmp	%g7, FAST_IMMU_MISS_TT
3349	be,a,pt	%icc, 9f
3350	  nop
3351	add	%g2, MMFSA_D_, %g2
33529:
3353	ldx	[%g2 + MMFSA_CTX_], %g7
3354	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3355	ldx	[%g2 + MMFSA_ADDR_], %g2
3356	mov	%g2, %g5		! load the fault addr for later use
3357	srlx	%g2, TTARGET_VA_SHIFT, %g2
3358	or	%g2, %g7, %g2
3359#else /* sun4v */
3360	mov     MMU_TAG_ACCESS, %g5
3361	cmp     %g7, FAST_IMMU_MISS_TT
3362	be,a,pt %icc, 9f
3363	   ldxa  [%g0]ASI_IMMU, %g2
3364	ldxa    [%g0]ASI_DMMU, %g2
3365	ba,pt   %icc, tsb_update_tl1
3366	   ldxa  [%g5]ASI_DMMU, %g5
33679:
3368	ldxa    [%g5]ASI_IMMU, %g5
3369#endif /* sun4v */
3370
3371tsb_update_tl1:
3372	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3373	brz,pn	%g7, tsb_kernel
3374#ifdef sun4v
3375	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3376#else  /* sun4v */
3377	  srlx	%g3, TTE_SZ_SHFT, %g7
3378#endif /* sun4v */
3379
3380tsb_user:
3381#ifdef sun4v
3382	cmp	%g7, TTE4M
3383	bge,pn	%icc, tsb_user4m
3384	  nop
3385#else /* sun4v */
3386	cmp	%g7, TTESZ_VALID | TTE4M
3387	be,pn	%icc, tsb_user4m
3388	  srlx	%g3, TTE_SZ2_SHFT, %g7
3389	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3390#ifdef ITLB_32M_256M_SUPPORT
3391	bnz,pn	%icc, tsb_user4m
3392	  nop
3393#else /* ITLB_32M_256M_SUPPORT */
3394	bnz,a,pn %icc, tsb_user_pn_synth
3395	 nop
3396#endif /* ITLB_32M_256M_SUPPORT */
3397#endif /* sun4v */
3398
3399tsb_user8k:
3400#if defined(sun4v) || defined(UTSB_PHYS)
3401	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3402	and	%g7, HAT_CHKCTX1_FLAG, %g1
3403	brz,a,pn %g1, 1f
3404	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
3405	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3406	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
3407	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3408        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
34091:
3410#else /* defined(sun4v) || defined(UTSB_PHYS) */
3411	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
3412#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3413
3414#ifndef UTSB_PHYS
3415	mov	ASI_N, %g7	! user TSBs accessed by VA
3416	mov	%g7, %asi
3417#endif /* !UTSB_PHYS */
3418
3419	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
3420
3421	rdpr    %tt, %g5
3422#ifdef sun4v
3423	cmp	%g5, T_INSTR_MMU_MISS
3424	be,a,pn	%xcc, 9f
3425	  mov	%g3, %g5
3426#endif /* sun4v */
3427	cmp	%g5, FAST_IMMU_MISS_TT
3428	be,pn	%xcc, 9f
3429	  mov	%g3, %g5
3430
3431	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3432	! trapstat wants TTE in %g5
3433	retry
34349:
3435	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3436	! trapstat wants TTE in %g5
3437	retry
3438
3439tsb_user4m:
3440#if defined(sun4v) || defined(UTSB_PHYS)
3441	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3442	and	%g7, HAT_CHKCTX1_FLAG, %g1
3443	brz,a,pn %g1, 4f
3444	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
3445	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3446	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
3447	  nop
3448        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3449
3450#else /* defined(sun4v) || defined(UTSB_PHYS) */
3451	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
3452#endif /* defined(sun4v) || defined(UTSB_PHYS) */
34534:
3454	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3455	  nop
3456
3457#ifndef UTSB_PHYS
3458	mov	ASI_N, %g7	! user TSBs accessed by VA
3459	mov	%g7, %asi
3460#endif /* UTSB_PHYS */
3461
3462        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
3463
34645:
3465	rdpr    %tt, %g5
3466#ifdef sun4v
3467        cmp     %g5, T_INSTR_MMU_MISS
3468        be,a,pn %xcc, 9f
3469          mov   %g3, %g5
3470#endif /* sun4v */
3471        cmp     %g5, FAST_IMMU_MISS_TT
3472        be,pn   %xcc, 9f
3473        mov     %g3, %g5
3474
3475        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3476        ! trapstat wants TTE in %g5
3477        retry
34789:
3479        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3480        ! trapstat wants TTE in %g5
3481        retry
3482
3483#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3484	/*
3485	 * Panther ITLB synthesis.
3486	 * The Panther 32M and 256M ITLB code simulates these two large page
3487	 * sizes with 4M pages, to provide support for programs, for example
3488	 * Java, that may copy instructions into a 32M or 256M data page and
3489	 * then execute them. The code below generates the 4M pfn bits and
3490	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3491	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3492	 * are ignored by the hardware.
3493	 *
3494	 * Now, load into TSB/TLB.  At this point:
3495	 * g2 = tagtarget
3496	 * g3 = tte
3497	 * g4 = patte
3498	 * g5 = tt
3499	 * g6 = tsbmiss area
3500	 */
3501tsb_user_pn_synth:
3502	rdpr %tt, %g5
3503	cmp    %g5, FAST_IMMU_MISS_TT
3504	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3505	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3506	bz,pn %icc, 4b				/* if not, been here before */
3507	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3508	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3509	  mov	%g3, %g5
3510
3511	mov	MMU_TAG_ACCESS, %g7
3512	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3513	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3514
3515	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3516	mov	%g7, %asi
3517	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
35185:
3519        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3520        retry
3521
3522tsb_user_itlb_synth:
3523	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3524
3525	mov	MMU_TAG_ACCESS, %g7
3526	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3527	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3528	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3529	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3530
3531	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3532	mov	%g7, %asi
3533	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
35347:
3535	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3536        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3537        retry
3538#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3539
3540tsb_kernel:
3541	rdpr	%tt, %g5
3542#ifdef sun4v
3543	cmp	%g7, TTE4M
3544	bge,pn	%icc, 5f
3545#else
3546	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3547	be,pn	%icc, 5f
3548#endif /* sun4v */
3549	  nop
3550	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
3551	ba,pt	%xcc, 6f
3552	  nop
35535:
3554	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
3555	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
3556	  nop
35576:
3558#ifndef sun4v
3559tsb_kernel_patch_asi:
3560	or	%g0, RUNTIME_PATCH, %g6
3561	mov	%g6, %asi	! XXX avoid writing to %asi !!
3562#endif
3563	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
35643:
3565#ifdef sun4v
3566	cmp	%g5, T_INSTR_MMU_MISS
3567	be,a,pn	%icc, 1f
3568	  mov	%g3, %g5			! trapstat wants TTE in %g5
3569#endif /* sun4v */
3570	cmp	%g5, FAST_IMMU_MISS_TT
3571	be,pn	%icc, 1f
3572	  mov	%g3, %g5			! trapstat wants TTE in %g5
3573	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3574	! trapstat wants TTE in %g5
3575	retry
35761:
3577	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3578	! trapstat wants TTE in %g5
3579	retry
3580
3581tsb_ism:
3582	/*
3583	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3584	 * page size down to smallest.
3585	 *
3586	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3587	 *	register
3588	 * g3 = ismmap->ism_seg
3589	 * g4 = physical address of ismmap->ism_sfmmu
3590	 * g6 = tsbmiss area
3591	 */
3592	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3593	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3594	  mov	PTL1_BAD_ISM, %g1
3595						/* g5 = pa of imap_vb_shift */
3596	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3597	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3598	srlx	%g3, %g4, %g3			/* clr size field */
3599	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3600	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3601	and     %g2, %g1, %g4                   /* g4 = ctx number */
3602	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3603	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3604	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3605	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3606	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3607#if defined(sun4v) || defined(UTSB_PHYS)
3608	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3609	brz,pt %g5, tsb_chk4M_ism
3610	  nop
3611	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3612	or      %g5, HAT_CHKCTX1_FLAG, %g5
3613	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3614	rdpr    %tt, %g5
3615	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3616#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3617
3618	/*
3619	 * ISM pages are always locked down.
3620	 * If we can't find the tte then pagefault
3621	 * and let the spt segment driver resolve it.
3622	 *
3623	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3624	 * g4 = imap_hatflags
3625	 * g6 = tsb miss area
3626	 * g7 = ISM hatid
3627	 */
3628
3629tsb_chk4M_ism:
3630	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3631	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3632	  nop
3633
3634tsb_ism_32M:
3635	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3636	brz,pn	%g5, tsb_ism_256M
3637	  nop
3638
3639	/*
3640	 * 32M hash.
3641	 */
3642
3643	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
3644	    TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3645	    tsb_ism_4M)
3646	/* NOT REACHED */
3647
3648tsb_ism_32M_found:
3649	brlz,a,pt %g3, tsb_validtte
3650	  rdpr	%tt, %g7
3651	ba,pt	%xcc, tsb_ism_4M
3652	  nop
3653
3654tsb_ism_256M:
3655	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3656	brz,a,pn %g5, ptl1_panic
3657	  mov	PTL1_BAD_ISM, %g1
3658
3659	/*
3660	 * 256M hash.
3661	 */
3662	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
3663	    TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3664	    tsb_ism_4M)
3665
3666tsb_ism_256M_found:
3667	brlz,a,pt %g3, tsb_validtte
3668	  rdpr	%tt, %g7
3669
3670tsb_ism_4M:
3671	/*
3672	 * 4M hash.
3673	 */
3674	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
3675	    TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3676	    tsb_ism_8K)
3677	/* NOT REACHED */
3678
3679tsb_ism_4M_found:
3680	brlz,a,pt %g3, tsb_validtte
3681	  rdpr	%tt, %g7
3682
3683tsb_ism_8K:
3684	/*
3685	 * 8K and 64K hash.
3686	 */
3687
3688	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
3689	    TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3690	    tsb_pagefault)
3691	/* NOT REACHED */
3692
3693tsb_ism_8K_found:
3694	brlz,a,pt %g3, tsb_validtte
3695	  rdpr	%tt, %g7
3696
3697tsb_pagefault:
3698	rdpr	%tt, %g7
3699	cmp	%g7, FAST_PROT_TT
3700	be,a,pn	%icc, tsb_protfault
3701	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3702
3703tsb_protfault:
3704	/*
3705	 * we get here if we couldn't find a valid tte in the hash.
3706	 *
3707	 * If user and we are at tl>1 we go to window handling code.
3708	 *
3709	 * If kernel and the fault is on the same page as our stack
3710	 * pointer, then we know the stack is bad and the trap handler
3711	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3712	 *
3713	 * If this is a kernel trap and tl>1, panic.
3714	 *
3715	 * Otherwise we call pagefault.
3716	 */
3717	cmp	%g7, FAST_IMMU_MISS_TT
3718#ifdef sun4v
3719	MMU_FAULT_STATUS_AREA(%g4)
3720	ldx	[%g4 + MMFSA_I_CTX], %g5
3721	ldx	[%g4 + MMFSA_D_CTX], %g4
3722	move	%icc, %g5, %g4
3723	cmp	%g7, T_INSTR_MMU_MISS
3724	move	%icc, %g5, %g4
3725#else
3726	mov	MMU_TAG_ACCESS, %g4
3727	ldxa	[%g4]ASI_DMMU, %g2
3728	ldxa	[%g4]ASI_IMMU, %g5
3729	move	%icc, %g5, %g2
3730	cmp	%g7, T_INSTR_MMU_MISS
3731	move	%icc, %g5, %g2
3732	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3733#endif /* sun4v */
3734	brnz,pn	%g4, 3f				/* skip if not kernel */
3735	  rdpr	%tl, %g5
3736
3737	add	%sp, STACK_BIAS, %g3
3738	srlx	%g3, MMU_PAGESHIFT, %g3
3739	srlx	%g2, MMU_PAGESHIFT, %g4
3740	cmp	%g3, %g4
3741	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3742	  mov	PTL1_BAD_STACK, %g1
3743
3744	cmp	%g5, 1
3745	ble,pt	%icc, 2f
3746	  nop
3747	TSTAT_CHECK_TL1(2f, %g1, %g2)
3748	rdpr	%tt, %g2
3749	cmp	%g2, FAST_PROT_TT
3750	mov	PTL1_BAD_KPROT_FAULT, %g1
3751	movne	%icc, PTL1_BAD_KMISS, %g1
3752	ba,pt	%icc, ptl1_panic
3753	  nop
3754
37552:
3756	/*
3757	 * We are taking a pagefault in the kernel on a kernel address.  If
3758	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3759	 * want to call sfmmu_pagefault -- we will instead note that a fault
3760	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3761	 * (instead of a "retry").  This will step over the faulting
3762	 * instruction.
3763	 */
3764	CPU_INDEX(%g1, %g2)
3765	set	cpu_core, %g2
3766	sllx	%g1, CPU_CORE_SHIFT, %g1
3767	add	%g1, %g2, %g1
3768	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3769	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3770	bz	sfmmu_pagefault
3771	or	%g2, CPU_DTRACE_BADADDR, %g2
3772	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3773	GET_MMU_D_ADDR(%g3, %g4)
3774	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3775	done
3776
37773:
3778	cmp	%g5, 1
3779	ble,pt	%icc, 4f
3780	  nop
3781	TSTAT_CHECK_TL1(4f, %g1, %g2)
3782	ba,pt	%icc, sfmmu_window_trap
3783	  nop
3784
37854:
3786	/*
3787	 * We are taking a pagefault on a non-kernel address.  If we are in
3788	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3789	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3790	 */
3791	CPU_INDEX(%g1, %g2)
3792	set	cpu_core, %g2
3793	sllx	%g1, CPU_CORE_SHIFT, %g1
3794	add	%g1, %g2, %g1
3795	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3796	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3797	bz	sfmmu_mmu_trap
3798	or	%g2, CPU_DTRACE_BADADDR, %g2
3799	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3800	GET_MMU_D_ADDR(%g3, %g4)
3801	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3802
3803	/*
3804	 * Be sure that we're actually taking this miss from the kernel --
3805	 * otherwise we have managed to return to user-level with
3806	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3807	 */
3808	rdpr	%tstate, %g2
3809	btst	TSTATE_PRIV, %g2
3810	bz,a	ptl1_panic
3811	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3812	done
3813
3814	ALTENTRY(tsb_tl0_noctxt)
3815	/*
3816	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3817	 * if it is, indicated that we have faulted and issue a done.
3818	 */
3819	CPU_INDEX(%g5, %g6)
3820	set	cpu_core, %g6
3821	sllx	%g5, CPU_CORE_SHIFT, %g5
3822	add	%g5, %g6, %g5
3823	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3824	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3825	bz	1f
3826	or	%g6, CPU_DTRACE_BADADDR, %g6
3827	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3828	GET_MMU_D_ADDR(%g3, %g4)
3829	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3830
3831	/*
3832	 * Be sure that we're actually taking this miss from the kernel --
3833	 * otherwise we have managed to return to user-level with
3834	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3835	 */
3836	rdpr	%tstate, %g5
3837	btst	TSTATE_PRIV, %g5
3838	bz,a	ptl1_panic
3839	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3840	TSTAT_CHECK_TL1(2f, %g1, %g2);
38412:
3842	done
3843
38441:
3845	rdpr	%tt, %g5
3846	cmp	%g5, FAST_IMMU_MISS_TT
3847#ifdef sun4v
3848	MMU_FAULT_STATUS_AREA(%g2)
3849	be,a,pt	%icc, 2f
3850	  ldx	[%g2 + MMFSA_I_CTX], %g3
3851	cmp	%g5, T_INSTR_MMU_MISS
3852	be,a,pt	%icc, 2f
3853	  ldx	[%g2 + MMFSA_I_CTX], %g3
3854	ldx	[%g2 + MMFSA_D_CTX], %g3
38552:
3856#else
3857	mov	MMU_TAG_ACCESS, %g2
3858	be,a,pt	%icc, 2f
3859	  ldxa	[%g2]ASI_IMMU, %g3
3860	ldxa	[%g2]ASI_DMMU, %g3
38612:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3862#endif /* sun4v */
3863	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3864	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3865	rdpr	%tl, %g5
3866	cmp	%g5, 1
3867	ble,pt	%icc, sfmmu_mmu_trap
3868	  nop
3869	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3870	ba,pt	%icc, sfmmu_window_trap
3871	  nop
3872	SET_SIZE(sfmmu_tsb_miss)
3873#endif  /* lint */
3874
3875#if defined (lint)
3876/*
3877 * This routine will look for a user or kernel vaddr in the hash
3878 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3879 * grab any locks.  It should only be used by other sfmmu routines.
3880 */
3881/* ARGSUSED */
3882pfn_t
3883sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3884{
3885	return(0);
3886}
3887
3888/* ARGSUSED */
3889pfn_t
3890sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3891{
3892	return(0);
3893}
3894
3895#else /* lint */
3896
3897	ENTRY_NP(sfmmu_vatopfn)
3898 	/*
3899 	 * disable interrupts
3900 	 */
3901 	rdpr	%pstate, %o3
3902#ifdef DEBUG
3903	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3904#endif
3905	/*
3906	 * disable interrupts to protect the TSBMISS area
3907	 */
3908	andn    %o3, PSTATE_IE, %o5
3909	wrpr    %o5, 0, %pstate
3910
3911	/*
3912	 * o0 = vaddr
3913	 * o1 = sfmmup
3914	 * o2 = ttep
3915	 */
3916	CPU_TSBMISS_AREA(%g1, %o5)
3917	ldn	[%g1 + TSBMISS_KHATID], %o4
3918	cmp	%o4, %o1
3919	bne,pn	%ncc, vatopfn_nokernel
3920	  mov	TTE64K, %g5			/* g5 = rehash # */
3921	mov %g1,%o5				/* o5 = tsbmiss_area */
3922	/*
3923	 * o0 = vaddr
3924	 * o1 & o4 = hatid
3925	 * o2 = ttep
3926	 * o5 = tsbmiss area
3927	 */
3928	mov	HBLK_RANGE_SHIFT, %g6
39291:
3930
3931	/*
3932	 * o0 = vaddr
3933	 * o1 = sfmmup
3934	 * o2 = ttep
3935	 * o3 = old %pstate
3936	 * o4 = hatid
3937	 * o5 = tsbmiss
3938	 * g5 = rehash #
3939	 * g6 = hmeshift
3940	 *
3941	 * The first arg to GET_TTE is actually tagaccess register
3942	 * not just vaddr. Since this call is for kernel we need to clear
3943	 * any lower vaddr bits that would be interpreted as ctx bits.
3944	 */
3945	set     TAGACC_CTX_MASK, %g1
3946	andn    %o0, %g1, %o0
3947	GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
3948		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3949
3950kvtop_hblk_found:
3951	/*
3952	 * o0 = vaddr
3953	 * o1 = sfmmup
3954	 * o2 = ttep
3955	 * g1 = tte
3956	 * g2 = tte pa
3957	 * g3 = scratch
3958	 * o2 = tsbmiss area
3959	 * o1 = hat id
3960	 */
3961	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3962	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3963	stx %g1,[%o2]				/* put tte into *ttep */
3964	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3965	/*
3966	 * o0 = vaddr
3967	 * o1 = sfmmup
3968	 * o2 = ttep
3969	 * g1 = pfn
3970	 */
3971	ba,pt	%xcc, 6f
3972	  mov	%g1, %o0
3973
3974kvtop_nohblk:
3975	/*
3976	 * we get here if we couldn't find valid hblk in hash.  We rehash
3977	 * if neccesary.
3978	 */
3979	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3980#ifdef sun4v
3981	cmp	%g5, MAX_HASHCNT
3982#else
3983	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3984#endif /* sun4v */
3985	be,a,pn	%icc, 6f
3986	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3987	mov	%o1, %o4			/* restore hatid */
3988#ifdef sun4v
3989        add	%g5, 2, %g5
3990	cmp	%g5, 3
3991	move	%icc, MMU_PAGESHIFT4M, %g6
3992	ba,pt	%icc, 1b
3993	movne	%icc, MMU_PAGESHIFT256M, %g6
3994#else
3995        inc	%g5
3996	cmp	%g5, 2
3997	move	%icc, MMU_PAGESHIFT512K, %g6
3998	ba,pt	%icc, 1b
3999	movne	%icc, MMU_PAGESHIFT4M, %g6
4000#endif /* sun4v */
40016:
4002	retl
4003 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4004
4005tsb_suspend:
4006	/*
4007	 * o0 = vaddr
4008	 * o1 = sfmmup
4009	 * o2 = ttep
4010	 * g1 = tte
4011	 * g2 = tte pa
4012	 * g3 = tte va
4013	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
4014	 */
4015	stx %g1,[%o2]				/* put tte into *ttep */
4016	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
4017	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
4018	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
40198:
4020	retl
4021	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
4022
4023vatopfn_nokernel:
4024	/*
4025	 * This routine does NOT support user addresses
4026	 * There is a routine in C that supports this.
4027	 * The only reason why we don't have the C routine
4028	 * support kernel addresses as well is because
4029	 * we do va_to_pa while holding the hashlock.
4030	 */
4031 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4032	save	%sp, -SA(MINFRAME), %sp
4033	sethi	%hi(sfmmu_panic3), %o0
4034	call	panic
4035	 or	%o0, %lo(sfmmu_panic3), %o0
4036
4037	SET_SIZE(sfmmu_vatopfn)
4038
4039	/*
4040	 * %o0 = vaddr
4041	 * %o1 = hashno (aka szc)
4042	 *
4043	 *
4044	 * This routine is similar to sfmmu_vatopfn() but will only look for
4045	 * a kernel vaddr in the hash structure for the specified rehash value.
4046	 * It's just an optimization for the case when pagesize for a given
4047	 * va range is already known (e.g. large page heap) and we don't want
4048	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4049	 *
4050	 * Returns valid pfn or PFN_INVALID if
4051	 * tte for specified rehash # is not found, invalid or suspended.
4052	 */
4053	ENTRY_NP(sfmmu_kvaszc2pfn)
4054 	/*
4055 	 * disable interrupts
4056 	 */
4057 	rdpr	%pstate, %o3
4058#ifdef DEBUG
4059	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4060#endif
4061	/*
4062	 * disable interrupts to protect the TSBMISS area
4063	 */
4064	andn    %o3, PSTATE_IE, %o5
4065	wrpr    %o5, 0, %pstate
4066
4067	CPU_TSBMISS_AREA(%g1, %o5)
4068	ldn	[%g1 + TSBMISS_KHATID], %o4
4069	sll	%o1, 1, %g6
4070	add	%g6, %o1, %g6
4071	add	%g6, MMU_PAGESHIFT, %g6
4072	/*
4073	 * %o0 = vaddr
4074	 * %o1 = hashno
4075	 * %o3 = old %pstate
4076	 * %o4 = ksfmmup
4077	 * %g1 = tsbmiss area
4078	 * %g6 = hmeshift
4079	 */
4080
4081	/*
4082	 * The first arg to GET_TTE is actually tagaccess register
4083	 * not just vaddr. Since this call is for kernel we need to clear
4084	 * any lower vaddr bits that would be interpreted as ctx bits.
4085	 */
4086	srlx	%o0, MMU_PAGESHIFT, %o0
4087	sllx	%o0, MMU_PAGESHIFT, %o0
4088	GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
4089		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4090		kvaszc2pfn_nohblk)
4091
4092kvaszc2pfn_hblk_found:
4093	/*
4094	 * %g3 = tte
4095	 * %o0 = vaddr
4096	 */
4097	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4098	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4099	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4100	/*
4101	 * g3 = pfn
4102	 */
4103	ba,pt	%xcc, 1f
4104	  mov	%g3, %o0
4105
4106kvaszc2pfn_nohblk:
4107	mov	-1, %o0
4108
41091:
4110	retl
4111 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4112
4113	SET_SIZE(sfmmu_kvaszc2pfn)
4114
4115#endif /* lint */
4116
4117
4118
4119#if !defined(lint)
4120
4121/*
4122 * kpm lock used between trap level tsbmiss handler and kpm C level.
4123 */
4124#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
4125	mov     0xff, tmp1						;\
4126label1:									;\
4127	casa    [kpmlckp]asi, %g0, tmp1					;\
4128	brnz,pn tmp1, label1						;\
4129	mov     0xff, tmp1						;\
4130	membar  #LoadLoad
4131
4132#define KPMLOCK_EXIT(kpmlckp, asi)					\
4133	membar  #LoadStore|#StoreStore					;\
4134	sta     %g0, [kpmlckp]asi
4135
4136/*
4137 * Lookup a memseg for a given pfn and if found, return the physical
4138 * address of the corresponding struct memseg in mseg, otherwise
4139 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4140 * tsbmp, %asi is assumed to be ASI_MEM.
4141 * This lookup is done by strictly traversing only the physical memseg
4142 * linkage. The more generic approach, to check the virtual linkage
4143 * before using the physical (used e.g. with hmehash buckets), cannot
4144 * be used here. Memory DR operations can run in parallel to this
4145 * lookup w/o any locks and updates of the physical and virtual linkage
4146 * cannot be done atomically wrt. to each other. Because physical
4147 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4148 * as "physical NULL" pointer.
4149 */
4150#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4151	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
4152	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
4153	udivx	pfn, mseg, mseg						;\
4154	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
4155	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
4156	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
4157	add	tmp1, mseg, tmp1					;\
4158	ldxa	[tmp1]%asi, mseg					;\
4159	cmp	mseg, MSEG_NULLPTR_PA					;\
4160	be,pn	%xcc, label/**/1		/* if not found */	;\
4161	  nop								;\
4162	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4163	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4164	blu,pn	%xcc, label/**/1					;\
4165	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4166	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4167	bgeu,pn	%xcc, label/**/1					;\
4168	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
4169	mulx	tmp1, PAGE_SIZE, tmp1					;\
4170	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
4171	add	tmp2, tmp1, tmp1			/* pp */	;\
4172	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
4173	cmp	tmp2, pfn						;\
4174	be,pt	%xcc, label/**/_ok			/* found */	;\
4175label/**/1:								;\
4176	/* brute force lookup */					;\
4177	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
4178	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
4179label/**/2:								;\
4180	cmp	mseg, MSEG_NULLPTR_PA					;\
4181	be,pn	%xcc, label/**/_ok		/* if not found */	;\
4182	  nop								;\
4183	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4184	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4185	blu,a,pt %xcc, label/**/2					;\
4186	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4187	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4188	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4189	bgeu,a,pt %xcc, label/**/2					;\
4190	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4191label/**/_ok:
4192
4193	/*
4194	 * kpm tsb miss handler large pages
4195	 * g1 = 8K kpm TSB entry pointer
4196	 * g2 = tag access register
4197	 * g3 = 4M kpm TSB entry pointer
4198	 */
4199	ALTENTRY(sfmmu_kpm_dtsb_miss)
4200	TT_TRACE(trace_tsbmiss)
4201
4202	CPU_INDEX(%g7, %g6)
4203	sethi	%hi(kpmtsbm_area), %g6
4204	sllx	%g7, KPMTSBM_SHIFT, %g7
4205	or	%g6, %lo(kpmtsbm_area), %g6
4206	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4207
4208	/* check enable flag */
4209	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4210	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4211	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4212	  nop
4213
4214	/* VA range check */
4215	ldx	[%g6 + KPMTSBM_VBASE], %g7
4216	cmp	%g2, %g7
4217	blu,pn	%xcc, sfmmu_tsb_miss
4218	  ldx	[%g6 + KPMTSBM_VEND], %g5
4219	cmp	%g2, %g5
4220	bgeu,pn	%xcc, sfmmu_tsb_miss
4221	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4222
4223	/*
4224	 * check TL tsbmiss handling flag
4225	 * bump tsbmiss counter
4226	 */
4227	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4228#ifdef	DEBUG
4229	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4230	inc	%g5
4231	brz,pn	%g3, sfmmu_kpm_exception
4232	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4233#else
4234	inc	%g5
4235	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4236#endif
4237	/*
4238	 * At this point:
4239	 *  g1 = 8K kpm TSB pointer (not used)
4240	 *  g2 = tag access register
4241	 *  g3 = clobbered
4242	 *  g6 = per-CPU kpm tsbmiss area
4243	 *  g7 = kpm_vbase
4244	 */
4245
4246	/* vaddr2pfn */
4247	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4248	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4249	srax    %g4, %g3, %g2			/* which alias range (r) */
4250	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4251	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4252
4253	/*
4254	 * Setup %asi
4255	 * mseg_pa = page_numtomemseg_nolock(pfn)
4256	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4257	 * g2=pfn
4258	 */
4259	mov	ASI_MEM, %asi
4260	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4261	cmp	%g3, MSEG_NULLPTR_PA
4262	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4263	  nop
4264
4265	/*
4266	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4267	 * g2=pfn g3=mseg_pa
4268	 */
4269	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4270	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4271	srlx	%g2, %g5, %g4
4272	sllx	%g4, %g5, %g4
4273	sub	%g4, %g7, %g4
4274	srlx	%g4, %g5, %g4
4275
4276	/*
4277	 * Validate inx value
4278	 * g2=pfn g3=mseg_pa g4=inx
4279	 */
4280#ifdef	DEBUG
4281	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4282	cmp	%g4, %g5			/* inx - nkpmpgs */
4283	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4284	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4285#else
4286	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4287#endif
4288	/*
4289	 * kp = &mseg_pa->kpm_pages[inx]
4290	 */
4291	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4292	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4293	add	%g5, %g4, %g5			/* kp */
4294
4295	/*
4296	 * KPMP_HASH(kp)
4297	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4298	 */
4299	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4300	sub	%g7, 1, %g7			/* mask */
4301	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4302	add	%g5, %g1, %g5			/* y = ksp + x */
4303	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4304
4305	/*
4306	 * Calculate physical kpm_page pointer
4307	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4308	 */
4309	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4310	add	%g1, %g4, %g1			/* kp_pa */
4311
4312	/*
4313	 * Calculate physical hash lock address
4314	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4315	 */
4316	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4317	sllx	%g5, KPMHLK_SHIFT, %g5
4318	add	%g4, %g5, %g3
4319	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4320
4321	/*
4322	 * Assemble tte
4323	 * g1=kp_pa g2=pfn g3=hlck_pa
4324	 */
4325#ifdef sun4v
4326	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4327	sllx	%g5, 32, %g5
4328	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4329	or	%g4, TTE4M, %g4
4330	or	%g5, %g4, %g5
4331#else
4332	sethi	%hi(TTE_VALID_INT), %g4
4333	mov	TTE4M, %g5
4334	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4335	or	%g5, %g4, %g5			/* upper part */
4336	sllx	%g5, 32, %g5
4337	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4338	or	%g5, %g4, %g5
4339#endif
4340	sllx	%g2, MMU_PAGESHIFT, %g4
4341	or	%g5, %g4, %g5			/* tte */
4342	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4343	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4344
4345	/*
4346	 * tsb dropin
4347	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4348	 */
4349
4350	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4351	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4352
4353	/* use C-handler if there's no go for dropin */
4354	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4355	cmp	%g7, -1
4356	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4357	  nop
4358
4359#ifdef	DEBUG
4360	/* double check refcnt */
4361	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4362	brz,pn	%g7, 5f			/* let C-handler deal with this */
4363	  nop
4364#endif
4365
4366#ifndef sun4v
4367	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4368	mov	ASI_N, %g1
4369	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4370	movnz	%icc, ASI_MEM, %g1
4371	mov	%g1, %asi
4372#endif
4373
4374	/*
4375	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4376	 * If we fail to lock the TSB entry then just load the tte into the
4377	 * TLB.
4378	 */
4379	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
4380
4381	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4382	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4383locked_tsb_l1:
4384	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4385
4386	/* KPMLOCK_EXIT(kpmlckp, asi) */
4387	KPMLOCK_EXIT(%g3, ASI_MEM)
4388
4389	/*
4390	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4391	 * point to trapstat's TSB miss return code (note that trapstat
4392	 * itself will patch the correct offset to add).
4393	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4394	 */
4395	rdpr	%tl, %g7
4396	cmp	%g7, 1
4397	ble	%icc, 0f
4398	sethi	%hi(KERNELBASE), %g6
4399	rdpr	%tpc, %g7
4400	or	%g6, %lo(KERNELBASE), %g6
4401	cmp	%g7, %g6
4402	bgeu	%xcc, 0f
4403	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4404	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4405	wrpr	%g7, %tpc
4406	add	%g7, 4, %g7
4407	wrpr	%g7, %tnpc
44080:
4409	retry
44105:
4411	/* g3=hlck_pa */
4412	KPMLOCK_EXIT(%g3, ASI_MEM)
4413	ba,pt	%icc, sfmmu_kpm_exception
4414	  nop
4415	SET_SIZE(sfmmu_kpm_dtsb_miss)
4416
4417	/*
4418	 * kpm tsbmiss handler for smallpages
4419	 * g1 = 8K kpm TSB pointer
4420	 * g2 = tag access register
4421	 * g3 = 4M kpm TSB pointer
4422	 */
4423	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4424	TT_TRACE(trace_tsbmiss)
4425	CPU_INDEX(%g7, %g6)
4426	sethi	%hi(kpmtsbm_area), %g6
4427	sllx	%g7, KPMTSBM_SHIFT, %g7
4428	or	%g6, %lo(kpmtsbm_area), %g6
4429	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4430
4431	/* check enable flag */
4432	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4433	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4434	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4435	  nop
4436
4437	/*
4438	 * VA range check
4439	 * On fail: goto sfmmu_tsb_miss
4440	 */
4441	ldx	[%g6 + KPMTSBM_VBASE], %g7
4442	cmp	%g2, %g7
4443	blu,pn	%xcc, sfmmu_tsb_miss
4444	  ldx	[%g6 + KPMTSBM_VEND], %g5
4445	cmp	%g2, %g5
4446	bgeu,pn	%xcc, sfmmu_tsb_miss
4447	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4448
4449	/*
4450	 * check TL tsbmiss handling flag
4451	 * bump tsbmiss counter
4452	 */
4453	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4454#ifdef	DEBUG
4455	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4456	inc	%g5
4457	brz,pn	%g1, sfmmu_kpm_exception
4458	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4459#else
4460	inc	%g5
4461	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4462#endif
4463	/*
4464	 * At this point:
4465	 *  g1 = clobbered
4466	 *  g2 = tag access register
4467	 *  g3 = 4M kpm TSB pointer (not used)
4468	 *  g6 = per-CPU kpm tsbmiss area
4469	 *  g7 = kpm_vbase
4470	 */
4471
4472	/*
4473	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4474	 * which is defined in mach_kpm.h. Any changes in that macro
4475	 * should also be ported back to this assembly code.
4476	 */
4477	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
4478	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4479	srax    %g4, %g3, %g7			/* which alias range (r) */
4480	brz,pt	%g7, 2f
4481	  sethi   %hi(vac_colors_mask), %g5
4482	ld	[%g5 + %lo(vac_colors_mask)], %g5
4483
4484	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
4485	and	%g1, %g5, %g1			/* g1 = v */
4486	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
4487	cmp	%g7, %g1			/* if (r > v) */
4488	bleu,pn %xcc, 1f
4489	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
4490	sub	%g7, %g1, %g5			/* g5 = r - v */
4491	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
4492	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
4493	ba	2f
4494	  nop
44951:
4496	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
4497	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
4498
4499	/*
4500	 * paddr2pfn
4501	 *  g1 = vcolor (not used)
4502	 *  g2 = tag access register
4503	 *  g3 = clobbered
4504	 *  g4 = paddr
4505	 *  g5 = clobbered
4506	 *  g6 = per-CPU kpm tsbmiss area
4507	 *  g7 = clobbered
4508	 */
45092:
4510	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
4511
4512	/*
4513	 * Setup %asi
4514	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4515	 * if (mseg not found) sfmmu_kpm_exception
4516	 * g2=pfn g6=per-CPU kpm tsbmiss area
4517	 * g4 g5 g7 for scratch use.
4518	 */
4519	mov	ASI_MEM, %asi
4520	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4521	cmp	%g3, MSEG_NULLPTR_PA
4522	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4523	  nop
4524
4525	/*
4526	 * inx = pfn - mseg_pa->kpm_pbase
4527	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
4528	 */
4529	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4530	sub	%g2, %g7, %g4
4531
4532#ifdef	DEBUG
4533	/*
4534	 * Validate inx value
4535	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
4536	 */
4537	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4538	cmp	%g4, %g5			/* inx - nkpmpgs */
4539	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4540	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4541#else
4542	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4543#endif
4544	/* ksp = &mseg_pa->kpm_spages[inx] */
4545	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4546	add	%g5, %g4, %g5			/* ksp */
4547
4548	/*
4549	 * KPMP_SHASH(kp)
4550	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4551	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
4552	 */
4553	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4554	sub	%g7, 1, %g7			/* mask */
4555	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4556	add	%g5, %g1, %g5			/* y = ksp + x */
4557	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4558
4559	/*
4560	 * Calculate physical kpm_spage pointer
4561	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4562	 * g6=per-CPU kpm tsbmiss area
4563	 */
4564	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4565	add	%g1, %g4, %g1			/* ksp_pa */
4566
4567	/*
4568	 * Calculate physical hash lock address.
4569	 * Note: Changes in kpm_shlk_t must be reflected here.
4570	 * g1=ksp_pa g2=pfn g5=hashinx
4571	 * g6=per-CPU kpm tsbmiss area
4572	 */
4573	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4574	sllx	%g5, KPMSHLK_SHIFT, %g5
4575	add	%g4, %g5, %g3			/* hlck_pa */
4576
4577	/*
4578	 * Assemble non-cacheable tte initially
4579	 * g1=ksp_pa g2=pfn g3=hlck_pa
4580	 * g6=per-CPU kpm tsbmiss area
4581	 */
4582	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4583	sllx	%g5, 32, %g5
4584	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4585	or	%g5, %g4, %g5
4586	sllx	%g2, MMU_PAGESHIFT, %g4
4587	or	%g5, %g4, %g5			/* tte */
4588	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4589	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4590
4591	/*
4592	 * tsb dropin
4593	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4594	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
4595	 */
4596
4597	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4598	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4599
4600	/* use C-handler if there's no go for dropin */
4601	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
4602	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
4603	bz,pt	%icc, 5f				/* no go */
4604	  nop
4605	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
4606	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
4607	be,a,pn	%xcc, 3f
4608	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
46093:
4610#ifndef sun4v
4611	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4612	mov	ASI_N, %g1
4613	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4614	movnz	%icc, ASI_MEM, %g1
4615	mov	%g1, %asi
4616#endif
4617
4618	/*
4619	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4620	 * If we fail to lock the TSB entry then just load the tte into the
4621	 * TLB.
4622	 */
4623	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
4624
4625	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4626	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4627locked_tsb_l2:
4628	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4629
4630	/* KPMLOCK_EXIT(kpmlckp, asi) */
4631	KPMLOCK_EXIT(%g3, ASI_MEM)
4632
4633	/*
4634	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4635	 * point to trapstat's TSB miss return code (note that trapstat
4636	 * itself will patch the correct offset to add).
4637	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4638	 */
4639	rdpr	%tl, %g7
4640	cmp	%g7, 1
4641	ble	%icc, 0f
4642	sethi	%hi(KERNELBASE), %g6
4643	rdpr	%tpc, %g7
4644	or	%g6, %lo(KERNELBASE), %g6
4645	cmp	%g7, %g6
4646	bgeu	%xcc, 0f
4647	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4648	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4649	wrpr	%g7, %tpc
4650	add	%g7, 4, %g7
4651	wrpr	%g7, %tnpc
46520:
4653	retry
46545:
4655	/* g3=hlck_pa */
4656	KPMLOCK_EXIT(%g3, ASI_MEM)
4657	ba,pt	%icc, sfmmu_kpm_exception
4658	  nop
4659	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4660
4661#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4662#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4663#endif
4664
4665#endif /* lint */
4666
4667#ifdef	lint
4668/*
4669 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4670 * Called from C-level, sets/clears "go" indication for trap level handler.
4671 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4672 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4673 * Assumes khl_mutex is held when called from C-level.
4674 */
4675/* ARGSUSED */
4676void
4677sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4678{
4679}
4680
4681/*
4682 * kpm_smallpages: stores val to byte at address mapped within
4683 * low level lock brackets. The old value is returned.
4684 * Called from C-level.
4685 */
4686/* ARGSUSED */
4687int
4688sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4689{
4690	return (0);
4691}
4692
4693#else /* lint */
4694
4695	.seg	".data"
4696sfmmu_kpm_tsbmtl_panic:
4697	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4698	.byte	0
4699sfmmu_kpm_stsbmtl_panic:
4700	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4701	.byte	0
4702	.align	4
4703	.seg	".text"
4704
4705	ENTRY_NP(sfmmu_kpm_tsbmtl)
4706	rdpr	%pstate, %o3
4707	/*
4708	 * %o0 = &kp_refcntc
4709	 * %o1 = &khl_lock
4710	 * %o2 = 0/1 (off/on)
4711	 * %o3 = pstate save
4712	 */
4713#ifdef DEBUG
4714	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4715	bnz,pt %icc, 1f				/* disabled, panic	 */
4716	  nop
4717	save	%sp, -SA(MINFRAME), %sp
4718	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4719	call	panic
4720	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4721	ret
4722	restore
47231:
4724#endif /* DEBUG */
4725	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4726
4727	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4728	mov	-1, %o5
4729	brz,a	%o2, 2f
4730	  mov	0, %o5
47312:
4732	sth	%o5, [%o0]
4733	KPMLOCK_EXIT(%o1, ASI_N)
4734
4735	retl
4736	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4737	SET_SIZE(sfmmu_kpm_tsbmtl)
4738
4739	ENTRY_NP(sfmmu_kpm_stsbmtl)
4740	rdpr	%pstate, %o3
4741	/*
4742	 * %o0 = &mapped
4743	 * %o1 = &kshl_lock
4744	 * %o2 = val
4745	 * %o3 = pstate save
4746	 */
4747#ifdef DEBUG
4748	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4749	bnz,pt %icc, 1f				/* disabled, panic	 */
4750	  nop
4751	save	%sp, -SA(MINFRAME), %sp
4752	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4753	call	panic
4754	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4755	ret
4756	restore
47571:
4758#endif /* DEBUG */
4759	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4760
4761	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4762	ldsb	[%o0], %o5
4763	stb	%o2, [%o0]
4764	KPMLOCK_EXIT(%o1, ASI_N)
4765
4766	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
4767	retl
4768	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4769	SET_SIZE(sfmmu_kpm_stsbmtl)
4770
4771#endif /* lint */
4772
4773#ifndef lint
4774#ifdef sun4v
4775	/*
4776	 * User/kernel data miss w// multiple TSBs
4777	 * The first probe covers 8K, 64K, and 512K page sizes,
4778	 * because 64K and 512K mappings are replicated off 8K
4779	 * pointer.  Second probe covers 4M page size only.
4780	 *
4781	 * MMU fault area contains miss address and context.
4782	 */
4783	ALTENTRY(sfmmu_slow_dmmu_miss)
4784	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4785
4786slow_miss_common:
4787	/*
4788	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4789	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4790	 */
4791	brnz,pt	%g3, 8f			! check for user context
4792	  nop
4793
4794	/*
4795	 * Kernel miss
4796	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4797	 * branch to sfmmu_tsb_miss_tt to handle it.
4798	 */
4799	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4800sfmmu_dslow_patch_ktsb_base:
4801	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4802sfmmu_dslow_patch_ktsb_szcode:
4803	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4804
4805	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4806	! %g1 = First TSB entry pointer, as TSB miss handler expects
4807
4808	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4809sfmmu_dslow_patch_ktsb4m_base:
4810	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4811sfmmu_dslow_patch_ktsb4m_szcode:
4812	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4813
4814	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4815	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4816	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4817	.empty
4818
48198:
4820	/*
4821	 * User miss
4822	 * Get first TSB pointer in %g1
4823	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4824	 * Branch to sfmmu_tsb_miss_tt to handle it
4825	 */
4826	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4827	/* %g1 = first TSB entry ptr now, %g2 preserved */
4828
4829	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4830	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
4831	  nop
4832
4833	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4834	/* %g3 = second TSB entry ptr now, %g2 preserved */
48359:
4836	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4837	.empty
4838	SET_SIZE(sfmmu_slow_dmmu_miss)
4839
4840
4841	/*
4842	 * User/kernel instruction miss w/ multiple TSBs
4843	 * The first probe covers 8K, 64K, and 512K page sizes,
4844	 * because 64K and 512K mappings are replicated off 8K
4845	 * pointer.  Second probe covers 4M page size only.
4846	 *
4847	 * MMU fault area contains miss address and context.
4848	 */
4849	ALTENTRY(sfmmu_slow_immu_miss)
4850	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4851	ba,a,pt	%xcc, slow_miss_common
4852	SET_SIZE(sfmmu_slow_immu_miss)
4853
4854#endif /* sun4v */
4855#endif	/* lint */
4856
4857#ifndef lint
4858
4859/*
4860 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4861 */
4862	.seg	".data"
4863	.align	64
4864	.global tsbmiss_area
4865tsbmiss_area:
4866	.skip	(TSBMISS_SIZE * NCPU)
4867
4868	.align	64
4869	.global kpmtsbm_area
4870kpmtsbm_area:
4871	.skip	(KPMTSBM_SIZE * NCPU)
4872#endif	/* lint */
4873