xref: /illumos-gate/usr/src/uts/sfmmu/ml/sfmmu_asm.S (revision 9b9d39d2a32ff806d2431dbcc50968ef1e6d46b2)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 */
26
27/*
28 * SFMMU primitives.  These primitives should only be used by sfmmu
29 * routines.
30 */
31
32#include "assym.h"
33
34#include <sys/asm_linkage.h>
35#include <sys/machtrap.h>
36#include <sys/machasi.h>
37#include <sys/sun4asi.h>
38#include <sys/pte.h>
39#include <sys/mmu.h>
40#include <vm/hat_sfmmu.h>
41#include <vm/seg_spt.h>
42#include <sys/machparam.h>
43#include <sys/privregs.h>
44#include <sys/scb.h>
45#include <sys/intreg.h>
46#include <sys/machthread.h>
47#include <sys/intr.h>
48#include <sys/clock.h>
49#include <sys/trapstat.h>
50
51#ifdef TRAPTRACE
52#include <sys/traptrace.h>
53
54/*
55 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
56 */
57#define	TT_TRACE(label)		\
58	ba	label		;\
59	rd	%pc, %g7
60#else
61
62#define	TT_TRACE(label)
63
64#endif /* TRAPTRACE */
65
66#if (TTE_SUSPEND_SHIFT > 0)
67#define	TTE_SUSPEND_INT_SHIFT(reg)				\
68	sllx	reg, TTE_SUSPEND_SHIFT, reg
69#else
70#define	TTE_SUSPEND_INT_SHIFT(reg)
71#endif
72
73/*
74 * Assumes TSBE_TAG is 0
75 * Assumes TSBE_INTHI is 0
76 * Assumes TSBREG.split is 0
77 */
78
79#if TSBE_TAG != 0
80#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
81#endif
82
83#if TSBTAG_INTHI != 0
84#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
85#endif
86
87/*
88 * The following code assumes the tsb is not split.
89 *
90 * With TSBs no longer shared between processes, it's no longer
91 * necessary to hash the context bits into the tsb index to get
92 * tsb coloring; the new implementation treats the TSB as a
93 * direct-mapped, virtually-addressed cache.
94 *
95 * In:
96 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
97 *    tsbbase = base address of TSB (clobbered)
98 *    tagacc = tag access register (clobbered)
99 *    szc = size code of TSB (ro)
100 *    tmp = scratch reg
101 * Out:
102 *    tsbbase = pointer to entry in TSB
103 */
104#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
105	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
106	srlx	tagacc, vpshift, tagacc 				;\
107	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
108	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
109	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
110	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
111	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
112
113/*
114 * When the kpm TSB is used it is assumed that it is direct mapped
115 * using (vaddr>>vpshift)%tsbsz as the index.
116 *
117 * Note that, for now, the kpm TSB and kernel TSB are the same for
118 * each mapping size.  However that need not always be the case.  If
119 * the trap handlers are updated to search a different TSB for kpm
120 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
121 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
122 *
123 * In:
124 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
125 *    vaddr = virtual address (clobbered)
126 *    tsbp, szc, tmp = scratch
127 * Out:
128 *    tsbp = pointer to entry in TSB
129 */
130#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
131	cmp	vpshift, MMU_PAGESHIFT					;\
132	bne,pn	%icc, 1f		/* branch if large case */	;\
133	  sethi	%hi(kpmsm_tsbsz), szc					;\
134	sethi	%hi(kpmsm_tsbbase), tsbp				;\
135	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
136	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
137	ba,pt	%icc, 2f						;\
138	  nop								;\
1391:	sethi	%hi(kpm_tsbsz), szc					;\
140	sethi	%hi(kpm_tsbbase), tsbp					;\
141	ld	[szc + %lo(kpm_tsbsz)], szc				;\
142	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1432:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
144
145/*
146 * Lock the TSBE at virtual address tsbep.
147 *
148 * tsbep = TSBE va (ro)
149 * tmp1, tmp2 = scratch registers (clobbered)
150 * label = label to jump to if we fail to lock the tsb entry
151 * %asi = ASI to use for TSB access
152 *
153 * NOTE that we flush the TSB using fast VIS instructions that
154 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
155 * not be treated as a locked entry or we'll get stuck spinning on
156 * an entry that isn't locked but really invalid.
157 */
158
159#if defined(UTSB_PHYS)
160
161#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
162	lda	[tsbep]ASI_MEM, tmp1					;\
163	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
164	cmp	tmp1, tmp2 						;\
165	be,a,pn	%icc, label		/* if locked ignore */		;\
166	  nop								;\
167	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
168	cmp	tmp1, tmp2 						;\
169	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
170	  nop								;\
171	/* tsbe lock acquired */					;\
172	membar #StoreStore
173
174#else /* UTSB_PHYS */
175
176#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
177	lda	[tsbep]%asi, tmp1					;\
178	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
179	cmp	tmp1, tmp2 						;\
180	be,a,pn	%icc, label		/* if locked ignore */		;\
181	  nop								;\
182	casa	[tsbep]%asi, tmp1, tmp2					;\
183	cmp	tmp1, tmp2 						;\
184	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
185	  nop								;\
186	/* tsbe lock acquired */					;\
187	membar #StoreStore
188
189#endif /* UTSB_PHYS */
190
191/*
192 * Atomically write TSBE at virtual address tsbep.
193 *
194 * tsbep = TSBE va (ro)
195 * tte = TSBE TTE (ro)
196 * tagtarget = TSBE tag (ro)
197 * %asi = ASI to use for TSB access
198 */
199
200#if defined(UTSB_PHYS)
201
202#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
203	add	tsbep, TSBE_TTE, tmp1					;\
204	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
205	membar #StoreStore						;\
206	add	tsbep, TSBE_TAG, tmp1					;\
207	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
208
209#else /* UTSB_PHYS */
210
211#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
212	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
213	membar #StoreStore						;\
214	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
215
216#endif /* UTSB_PHYS */
217
218/*
219 * Load an entry into the TSB at TL > 0.
220 *
221 * tsbep = pointer to the TSBE to load as va (ro)
222 * tte = value of the TTE retrieved and loaded (wo)
223 * tagtarget = tag target register.  To get TSBE tag to load,
224 *   we need to mask off the context and leave only the va (clobbered)
225 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
226 * tmp1, tmp2 = scratch registers
227 * label = label to jump to if we fail to lock the tsb entry
228 * %asi = ASI to use for TSB access
229 */
230
231#if defined(UTSB_PHYS)
232
233#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
234	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
235	/*								;\
236	 * I don't need to update the TSB then check for the valid tte.	;\
237	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
238	 * we always invalidate the hash table before we unload the TSB.;\
239	 */								;\
240	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
241	ldxa	[ttepa]ASI_MEM, tte					;\
242	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
243	sethi	%hi(TSBTAG_INVALID), tmp2				;\
244	add	tsbep, TSBE_TAG, tmp1					;\
245	brgez,a,pn tte, label						;\
246	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
247	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
248label:
249
250#else /* UTSB_PHYS */
251
252#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
253	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
254	/*								;\
255	 * I don't need to update the TSB then check for the valid tte.	;\
256	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
257	 * we always invalidate the hash table before we unload the TSB.;\
258	 */								;\
259	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
260	ldxa	[ttepa]ASI_MEM, tte					;\
261	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
262	sethi	%hi(TSBTAG_INVALID), tmp2				;\
263	brgez,a,pn tte, label						;\
264	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
265	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
266label:
267
268#endif /* UTSB_PHYS */
269
270/*
271 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
272 *   for ITLB synthesis.
273 *
274 * tsbep = pointer to the TSBE to load as va (ro)
275 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
276 *   with exec_perm turned off and exec_synth turned on
277 * tagtarget = tag target register.  To get TSBE tag to load,
278 *   we need to mask off the context and leave only the va (clobbered)
279 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
280 * tmp1, tmp2 = scratch registers
281 * label = label to use for branch (text)
282 * %asi = ASI to use for TSB access
283 */
284
285#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
286	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
287	/*								;\
288	 * I don't need to update the TSB then check for the valid tte.	;\
289	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
290	 * we always invalidate the hash table before we unload the TSB.;\
291	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
292	 * and exec_synth bit to 1.					;\
293	 */								;\
294	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
295	mov	tte, tmp1						;\
296	ldxa	[ttepa]ASI_MEM, tte					;\
297	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
298	sethi	%hi(TSBTAG_INVALID), tmp2				;\
299	brgez,a,pn tte, label						;\
300	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
301	or	tte, tmp1, tte						;\
302	andn	tte, TTE_EXECPRM_INT, tte				;\
303	or	tte, TTE_E_SYNTH_INT, tte				;\
304	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
305label:
306
307/*
308 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
309 *
310 * tte = value of the TTE, used to get tte_size bits (ro)
311 * tagaccess = tag access register, used to get 4M pfn bits (ro)
312 * pfn = 4M pfn bits shifted to offset for tte (out)
313 * tmp1 = scratch register
314 * label = label to use for branch (text)
315 */
316
317#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
318	/*								;\
319	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
320	 * Return them, shifted, in pfn.				;\
321	 */								;\
322	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
323	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
324	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
325	bz,a,pt %icc, label##f		/* if 0, is */		;\
326	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
327	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
328label:									;\
329	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
330
331/*
332 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
333 * for ITLB synthesis.
334 *
335 * tte = value of the TTE, used to get tte_size bits (rw)
336 * tmp1 = scratch register
337 */
338
339#define	SET_TTE4M_PN(tte, tmp)						\
340	/*								;\
341	 * Set 4M pagesize tte bits. 					;\
342	 */								;\
343	set	TTE4M, tmp						;\
344	sllx	tmp, TTE_SZ_SHFT, tmp					;\
345	or	tte, tmp, tte
346
347/*
348 * Load an entry into the TSB at TL=0.
349 *
350 * tsbep = pointer to the TSBE to load as va (ro)
351 * tteva = pointer to the TTE to load as va (ro)
352 * tagtarget = TSBE tag to load (which contains no context), synthesized
353 * to match va of MMU tag target register only (ro)
354 * tmp1, tmp2 = scratch registers (clobbered)
355 * label = label to use for branches (text)
356 * %asi = ASI to use for TSB access
357 */
358
359#if defined(UTSB_PHYS)
360
361#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
362	/* can't rd tteva after locking tsb because it can tlb miss */	;\
363	ldx	[tteva], tteva			/* load tte */		;\
364	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
365	sethi	%hi(TSBTAG_INVALID), tmp2				;\
366	add	tsbep, TSBE_TAG, tmp1					;\
367	brgez,a,pn tteva, label						;\
368	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
369	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
370label:
371
372#else /* UTSB_PHYS */
373
374#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
375	/* can't rd tteva after locking tsb because it can tlb miss */	;\
376	ldx	[tteva], tteva			/* load tte */		;\
377	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
378	sethi	%hi(TSBTAG_INVALID), tmp2				;\
379	brgez,a,pn tteva, label						;\
380	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
381	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
382label:
383
384#endif /* UTSB_PHYS */
385
386/*
387 * Invalidate a TSB entry in the TSB.
388 *
389 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
390 *	 about this earlier to ensure this is true.  Thus when we are
391 *	 directly referencing tsbep below, we are referencing the tte_tag
392 *	 field of the TSBE.  If this  offset ever changes, the code below
393 *	 will need to be modified.
394 *
395 * tsbep = pointer to TSBE as va (ro)
396 * tag = invalidation is done if this matches the TSBE tag (ro)
397 * tmp1 - tmp3 = scratch registers (clobbered)
398 * label = label name to use for branches (text)
399 * %asi = ASI to use for TSB access
400 */
401
402#if defined(UTSB_PHYS)
403
404#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
405	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
406	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
407label##1:								;\
408	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
409	be,a,pn	%icc, label##1	/* so, loop until unlocked */	;\
410	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
411	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
412	cmp	tag, tmp3		/* compare tags */		;\
413	bne,pt	%xcc, label##2	/* if different, do nothing */	;\
414	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
415	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
416	cmp	tmp1, tmp3		/* if not successful */		;\
417	bne,a,pn %icc, label##1	/* start over from the top */	;\
418	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
419label##2:
420
421#else /* UTSB_PHYS */
422
423#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
424	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
425	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
426label##1:								;\
427	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
428	be,a,pn	%icc, label##1	/* so, loop until unlocked */	;\
429	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
430	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
431	cmp	tag, tmp3		/* compare tags */		;\
432	bne,pt	%xcc, label##2	/* if different, do nothing */	;\
433	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
434	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
435	cmp	tmp1, tmp3		/* if not successful */		;\
436	bne,a,pn %icc, label##1	/* start over from the top */	;\
437	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
438label##2:
439
440#endif /* UTSB_PHYS */
441
442#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
443#error	- TSB_SOFTSZ_MASK too small
444#endif
445
446
447/*
448 * An implementation of setx which will be hot patched at run time.
449 * since it is being hot patched, there is no value passed in.
450 * Thus, essentially we are implementing
451 *	setx value, tmp, dest
452 * where value is RUNTIME_PATCH (aka 0) in this case.
453 */
454#define	RUNTIME_PATCH_SETX(dest, tmp)					\
455	sethi	%hh(RUNTIME_PATCH), tmp					;\
456	sethi	%lm(RUNTIME_PATCH), dest				;\
457	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
458	or	dest, %lo(RUNTIME_PATCH), dest				;\
459	sllx	tmp, 32, tmp						;\
460	nop				/* for perf reasons */		;\
461	or	tmp, dest, dest		/* contents of patched value */
462
463
464	.seg	".data"
465	.global	sfmmu_panic1
466sfmmu_panic1:
467	.asciz	"sfmmu_asm: interrupts already disabled"
468
469	.global	sfmmu_panic3
470sfmmu_panic3:
471	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
472
473	.global	sfmmu_panic4
474sfmmu_panic4:
475	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
476
477	.global	sfmmu_panic5
478sfmmu_panic5:
479	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
480
481	.global	sfmmu_panic6
482sfmmu_panic6:
483	.asciz	"sfmmu_asm: interrupts not disabled"
484
485	.global	sfmmu_panic7
486sfmmu_panic7:
487	.asciz	"sfmmu_asm: kernel as"
488
489	.global	sfmmu_panic8
490sfmmu_panic8:
491	.asciz	"sfmmu_asm: gnum is zero"
492
493	.global	sfmmu_panic9
494sfmmu_panic9:
495	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
496
497	.global	sfmmu_panic10
498sfmmu_panic10:
499	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
500
501	.global	sfmmu_panic11
502sfmmu_panic11:
503	.asciz	"sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
504
505        ENTRY(sfmmu_disable_intrs)
506        rdpr    %pstate, %o0
507#ifdef DEBUG
508	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
509#endif /* DEBUG */
510        retl
511          wrpr   %o0, PSTATE_IE, %pstate
512        SET_SIZE(sfmmu_disable_intrs)
513
514	ENTRY(sfmmu_enable_intrs)
515        retl
516          wrpr    %g0, %o0, %pstate
517        SET_SIZE(sfmmu_enable_intrs)
518
519/*
520 * This routine is called both by resume() and sfmmu_get_ctx() to
521 * allocate a new context for the process on a MMU.
522 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
523 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
524 * is the case when sfmmu_alloc_ctx is called from resume().
525 *
526 * The caller must disable interrupts before entering this routine.
527 * To reduce ctx switch overhead, the code contains both 'fast path' and
528 * 'slow path' code. The fast path code covers the common case where only
529 * a quick check is needed and the real ctx allocation is not required.
530 * It can be done without holding the per-process (PP) lock.
531 * The 'slow path' code must be protected by the PP Lock and performs ctx
532 * allocation.
533 * Hardware context register and HAT mmu cnum are updated accordingly.
534 *
535 * %o0 - sfmmup
536 * %o1 - allocflag
537 * %o2 - CPU
538 * %o3 - sfmmu private/shared flag
539 *
540 * ret - 0: no ctx is allocated
541 *       1: a ctx is allocated
542 */
543        ENTRY_NP(sfmmu_alloc_ctx)
544
545#ifdef DEBUG
546	sethi   %hi(ksfmmup), %g1
547	ldx     [%g1 + %lo(ksfmmup)], %g1
548	cmp     %g1, %o0
549	bne,pt   %xcc, 0f
550	  nop
551
552	sethi   %hi(panicstr), %g1		! if kernel as, panic
553        ldx     [%g1 + %lo(panicstr)], %g1
554        tst     %g1
555        bnz,pn  %icc, 7f
556          nop
557
558	sethi	%hi(sfmmu_panic7), %o0
559	call	panic
560	  or	%o0, %lo(sfmmu_panic7), %o0
561
5627:
563	retl
564	  mov	%g0, %o0			! %o0 = ret = 0
565
5660:
567	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
568#endif /* DEBUG */
569
570	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
571
572	! load global mmu_ctxp info
573	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
574
575#ifdef sun4v
576	/* During suspend on sun4v, context domains can be temporary removed */
577	brz,a,pn       %o3, 0f
578	  nop
579#endif
580
581        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
582
583	! load global mmu_ctxp gnum
584	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
585
586#ifdef DEBUG
587	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
588	bne,pt	%xcc, 3f
589	  nop
590
591	sethi   %hi(panicstr), %g1	! test if panicstr is already set
592        ldx     [%g1 + %lo(panicstr)], %g1
593        tst     %g1
594        bnz,pn  %icc, 1f
595          nop
596
597	sethi	%hi(sfmmu_panic8), %o0
598	call	panic
599	  or	%o0, %lo(sfmmu_panic8), %o0
6001:
601	retl
602	  mov	%g0, %o0			! %o0 = ret = 0
6033:
604#endif
605
606	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
607
608	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
609	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
610
611	/*
612	 * %g5 = sfmmu gnum returned
613	 * %g6 = sfmmu cnum returned
614	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
615	 * %g4 = scratch
616	 *
617	 * Fast path code, do a quick check.
618	 */
619	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
620
621	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
622	bne,pt	%icc, 1f			! valid hat cnum, check gnum
623	  nop
624
625	! cnum == INVALID, check allocflag
626	mov	%g0, %g4	! %g4 = ret = 0
627	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
628	  mov	%g6, %o1
629
630	! (invalid HAT cnum) && (allocflag == 1)
631	ba,pt	%icc, 2f
632	  nop
633#ifdef sun4v
6340:
635	set	INVALID_CONTEXT, %o1
636	membar	#LoadStore|#StoreStore
637	ba,pt	%icc, 8f
638	  mov   %g0, %g4                ! %g4 = ret = 0
639#endif
6401:
641	! valid HAT cnum, check gnum
642	cmp	%g5, %o4
643	mov	1, %g4				!%g4 = ret = 1
644	be,a,pt	%icc, 8f			! gnum unchanged, go to done
645	  mov	%g6, %o1
646
6472:
648	/*
649	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
650	 * followed by the 'slow path' code.
651	 */
652	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
6533:
654	brz	%g3, 5f
655	  nop
6564:
657	brnz,a,pt       %g3, 4b				! spin if lock is 1
658	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
659	ba	%xcc, 3b				! retry the lock
660	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
661
6625:
663	membar  #LoadLoad
664	/*
665	 * %g5 = sfmmu gnum returned
666	 * %g6 = sfmmu cnum returned
667	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
668	 * %g4 = scratch
669	 */
670	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
671
672	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
673	bne,pt	%icc, 1f			! valid hat cnum, check gnum
674	  nop
675
676	! cnum == INVALID, check allocflag
677	mov	%g0, %g4	! %g4 = ret = 0
678	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
679	  mov	%g6, %o1
680
681	! (invalid HAT cnum) && (allocflag == 1)
682	ba,pt	%icc, 6f
683	  nop
6841:
685	! valid HAT cnum, check gnum
686	cmp	%g5, %o4
687	mov	1, %g4				! %g4 = ret  = 1
688	be,a,pt	%icc, 2f			! gnum unchanged, go to done
689	  mov	%g6, %o1
690
691	ba,pt	%icc, 6f
692	  nop
6932:
694	membar  #LoadStore|#StoreStore
695	ba,pt %icc, 8f
696	  clrb  [%o0 + SFMMU_CTX_LOCK]
6976:
698	/*
699	 * We get here if we do not have a valid context, or
700	 * the HAT gnum does not match global gnum. We hold
701	 * sfmmu_ctx_lock spinlock. Allocate that context.
702	 *
703	 * %o3 = mmu_ctxp
704	 */
705	add	%o3, MMU_CTX_CNUM, %g3
706	ld	[%o3 + MMU_CTX_NCTXS], %g4
707
708	/*
709         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
710         * %g3 = mmu cnum address
711	 * %g4 = mmu nctxs
712	 *
713	 * %o0 = sfmmup
714	 * %o1 = mmu current cnum value (used as new cnum)
715	 * %o4 = mmu gnum
716	 *
717	 * %o5 = scratch
718	 */
719	ld	[%g3], %o1
7200:
721	cmp	%o1, %g4
722	bl,a,pt %icc, 1f
723	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
724
725	/*
726	 * cnum reachs max, bail, so wrap around can be performed later.
727	 */
728	set	INVALID_CONTEXT, %o1
729	mov	%g0, %g4		! %g4 = ret = 0
730
731	membar  #LoadStore|#StoreStore
732	ba,pt	%icc, 8f
733	  clrb	[%o0 + SFMMU_CTX_LOCK]
7341:
735	! %g3 = addr of mmu_ctxp->cnum
736	! %o5 = mmu_ctxp->cnum + 1
737	cas	[%g3], %o1, %o5
738	cmp	%o1, %o5
739	bne,a,pn %xcc, 0b	! cas failed
740	  ld	[%g3], %o1
741
742#ifdef DEBUG
743        set	MAX_SFMMU_CTX_VAL, %o5
744	cmp	%o1, %o5
745	ble,pt %icc, 2f
746	  nop
747
748	sethi	%hi(sfmmu_panic9), %o0
749	call	panic
750	  or	%o0, %lo(sfmmu_panic9), %o0
7512:
752#endif
753	! update hat gnum and cnum
754	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
755	or	%o4, %o1, %o4
756	stx	%o4, [%g2 + SFMMU_CTXS]
757
758	membar  #LoadStore|#StoreStore
759	clrb	[%o0 + SFMMU_CTX_LOCK]
760
761	mov	1, %g4			! %g4 = ret = 1
7628:
763	/*
764	 * program the secondary context register
765	 *
766	 * %o1 = cnum
767	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
768	 */
769
770	/*
771	 * When we come here and context is invalid, we want to set both
772	 * private and shared ctx regs to INVALID. In order to
773	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
774	 * so that private ctx reg will be set to invalid.
775	 * Note that on sun4v values written to private context register are
776	 * automatically written to corresponding shared context register as
777	 * well. On sun4u SET_SECCTX() will invalidate shared context register
778	 * when it sets a private secondary context register.
779	 */
780
781	cmp	%o1, INVALID_CONTEXT
782	be,a,pn	%icc, 9f
783	  clr	%g1
7849:
785
786#ifdef	sun4u
787	ldub	[%o0 + SFMMU_CEXT], %o2
788	sll	%o2, CTXREG_EXT_SHIFT, %o2
789	or	%o1, %o2, %o1
790#endif /* sun4u */
791
792	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
793
794        retl
795          mov   %g4, %o0                        ! %o0 = ret
796
797	SET_SIZE(sfmmu_alloc_ctx)
798
799	ENTRY_NP(sfmmu_modifytte)
800	ldx	[%o2], %g3			/* current */
801	ldx	[%o0], %g1			/* original */
8022:
803	ldx	[%o1], %g2			/* modified */
804	cmp	%g2, %g3			/* is modified = current? */
805	be,a,pt	%xcc,1f				/* yes, don't write */
806	stx	%g3, [%o0]			/* update new original */
807	casx	[%o2], %g1, %g2
808	cmp	%g1, %g2
809	be,pt	%xcc, 1f			/* cas succeeded - return */
810	  nop
811	ldx	[%o2], %g3			/* new current */
812	stx	%g3, [%o0]			/* save as new original */
813	ba,pt	%xcc, 2b
814	  mov	%g3, %g1
8151:	retl
816	membar	#StoreLoad
817	SET_SIZE(sfmmu_modifytte)
818
819	ENTRY_NP(sfmmu_modifytte_try)
820	ldx	[%o1], %g2			/* modified */
821	ldx	[%o2], %g3			/* current */
822	ldx	[%o0], %g1			/* original */
823	cmp	%g3, %g2			/* is modified = current? */
824	be,a,pn %xcc,1f				/* yes, don't write */
825	mov	0, %o1				/* as if cas failed. */
826
827	casx	[%o2], %g1, %g2
828	membar	#StoreLoad
829	cmp	%g1, %g2
830	movne	%xcc, -1, %o1			/* cas failed. */
831	move	%xcc, 1, %o1			/* cas succeeded. */
8321:
833	stx	%g2, [%o0]			/* report "current" value */
834	retl
835	mov	%o1, %o0
836	SET_SIZE(sfmmu_modifytte_try)
837
838	ENTRY_NP(sfmmu_copytte)
839	ldx	[%o0], %g1
840	retl
841	stx	%g1, [%o1]
842	SET_SIZE(sfmmu_copytte)
843
844
845	/*
846	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
847	 * %o0 = TSB base address (in), pointer to TSB entry (out)
848	 * %o1 = vaddr (in)
849	 * %o2 = vpshift (in)
850	 * %o3 = tsb size code (in)
851	 * %o4 = scratch register
852	 */
853	ENTRY_NP(sfmmu_get_tsbe)
854	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
855	retl
856	nop
857	SET_SIZE(sfmmu_get_tsbe)
858
859	/*
860	 * Return a TSB tag for the given va.
861	 * %o0 = va (in/clobbered)
862	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
863	 */
864	ENTRY_NP(sfmmu_make_tsbtag)
865	retl
866	srln	%o0, TTARGET_VA_SHIFT, %o0
867	SET_SIZE(sfmmu_make_tsbtag)
868
869/*
870 * Other sfmmu primitives
871 */
872
873
874#define	I_SIZE		4
875
876	ENTRY_NP(sfmmu_fix_ktlb_traptable)
877	/*
878	 * %o0 = start of patch area
879	 * %o1 = size code of TSB to patch
880	 * %o3 = scratch
881	 */
882	/* fix sll */
883	ld	[%o0], %o3			/* get sll */
884	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
885	st	%o3, [%o0]			/* write sll */
886	flush	%o0
887	/* fix srl */
888	add	%o0, I_SIZE, %o0		/* goto next instr. */
889	ld	[%o0], %o3			/* get srl */
890	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
891	st	%o3, [%o0]			/* write srl */
892	retl
893	flush	%o0
894	SET_SIZE(sfmmu_fix_ktlb_traptable)
895
896	ENTRY_NP(sfmmu_fixup_ktsbbase)
897	/*
898	 * %o0 = start of patch area
899	 * %o5 = kernel virtual or physical tsb base address
900	 * %o2, %o3 are used as scratch registers.
901	 */
902	/* fixup sethi instruction */
903	ld	[%o0], %o3
904	srl	%o5, 10, %o2			! offset is bits 32:10
905	or	%o3, %o2, %o3			! set imm22
906	st	%o3, [%o0]
907	/* fixup offset of lduw/ldx */
908	add	%o0, I_SIZE, %o0		! next instr
909	ld	[%o0], %o3
910	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
911	or	%o3, %o2, %o3
912	st	%o3, [%o0]
913	retl
914	flush	%o0
915	SET_SIZE(sfmmu_fixup_ktsbbase)
916
917	ENTRY_NP(sfmmu_fixup_setx)
918	/*
919	 * %o0 = start of patch area
920	 * %o4 = 64 bit value to patch
921	 * %o2, %o3 are used as scratch registers.
922	 *
923	 * Note: Assuming that all parts of the instructions which need to be
924	 *	 patched correspond to RUNTIME_PATCH (aka 0)
925	 *
926	 * Note the implementation of setx which is being patched is as follows:
927	 *
928	 * sethi   %hh(RUNTIME_PATCH), tmp
929	 * sethi   %lm(RUNTIME_PATCH), dest
930	 * or      tmp, %hm(RUNTIME_PATCH), tmp
931	 * or      dest, %lo(RUNTIME_PATCH), dest
932	 * sllx    tmp, 32, tmp
933	 * nop
934	 * or      tmp, dest, dest
935	 *
936	 * which differs from the implementation in the
937	 * "SPARC Architecture Manual"
938	 */
939	/* fixup sethi instruction */
940	ld	[%o0], %o3
941	srlx	%o4, 42, %o2			! bits [63:42]
942	or	%o3, %o2, %o3			! set imm22
943	st	%o3, [%o0]
944	/* fixup sethi instruction */
945	add	%o0, I_SIZE, %o0		! next instr
946	ld	[%o0], %o3
947	sllx	%o4, 32, %o2			! clear upper bits
948	srlx	%o2, 42, %o2			! bits [31:10]
949	or	%o3, %o2, %o3			! set imm22
950	st	%o3, [%o0]
951	/* fixup or instruction */
952	add	%o0, I_SIZE, %o0		! next instr
953	ld	[%o0], %o3
954	srlx	%o4, 32, %o2			! bits [63:32]
955	and	%o2, 0x3ff, %o2			! bits [41:32]
956	or	%o3, %o2, %o3			! set imm
957	st	%o3, [%o0]
958	/* fixup or instruction */
959	add	%o0, I_SIZE, %o0		! next instr
960	ld	[%o0], %o3
961	and	%o4, 0x3ff, %o2			! bits [9:0]
962	or	%o3, %o2, %o3			! set imm
963	st	%o3, [%o0]
964	retl
965	flush	%o0
966	SET_SIZE(sfmmu_fixup_setx)
967
968	ENTRY_NP(sfmmu_fixup_or)
969	/*
970	 * %o0 = start of patch area
971	 * %o4 = 32 bit value to patch
972	 * %o2, %o3 are used as scratch registers.
973	 * Note: Assuming that all parts of the instructions which need to be
974	 *	 patched correspond to RUNTIME_PATCH (aka 0)
975	 */
976	ld	[%o0], %o3
977	and	%o4, 0x3ff, %o2			! bits [9:0]
978	or	%o3, %o2, %o3			! set imm
979	st	%o3, [%o0]
980	retl
981	flush	%o0
982	SET_SIZE(sfmmu_fixup_or)
983
984	ENTRY_NP(sfmmu_fixup_shiftx)
985	/*
986	 * %o0 = start of patch area
987	 * %o4 = signed int immediate value to add to sllx/srlx imm field
988	 * %o2, %o3 are used as scratch registers.
989	 *
990	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
991	 * so we do a simple add.  The caller must be careful to prevent
992	 * overflow, which could easily occur if the initial value is nonzero!
993	 */
994	ld	[%o0], %o3			! %o3 = instruction to patch
995	and	%o3, 0x3f, %o2			! %o2 = existing imm value
996	add	%o2, %o4, %o2			! %o2 = new imm value
997	andn	%o3, 0x3f, %o3			! clear old imm value
998	and	%o2, 0x3f, %o2			! truncate new imm value
999	or	%o3, %o2, %o3			! set new imm value
1000	st	%o3, [%o0]			! store updated instruction
1001	retl
1002	flush	%o0
1003	SET_SIZE(sfmmu_fixup_shiftx)
1004
1005	ENTRY_NP(sfmmu_fixup_mmu_asi)
1006	/*
1007	 * Patch imm_asi of all ldda instructions in the MMU
1008	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1009	 * starting from the itlb miss handler (trap 0x64).
1010	 * %o0 = address of tt[0,1]_itlbmiss
1011	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1012	 * %o3 = number of instructions to search
1013	 * %o4 = reserved by caller: called from leaf routine
1014	 */
10151:	ldsw	[%o0], %o2			! load instruction to %o2
1016	brgez,pt %o2, 2f
1017	  srl	%o2, 30, %o5
1018	btst	1, %o5				! test bit 30; skip if not set
1019	bz,pt	%icc, 2f
1020	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1021	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1022	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1023	brnz,pt	%o5, 2f				! skip if not a match
1024	  or	%o2, %o1, %o2			! or in imm_asi
1025	st	%o2, [%o0]			! write patched instruction
10262:	dec	%o3
1027	brnz,a,pt %o3, 1b			! loop until we're done
1028	  add	%o0, I_SIZE, %o0
1029	retl
1030	flush	%o0
1031	SET_SIZE(sfmmu_fixup_mmu_asi)
1032
1033	/*
1034	 * Patch immediate ASI used to access the TSB in the
1035	 * trap table.
1036	 * inputs: %o0 = value of ktsb_phys
1037	 */
1038	ENTRY_NP(sfmmu_patch_mmu_asi)
1039	mov	%o7, %o4			! save return pc in %o4
1040	mov	ASI_QUAD_LDD_PHYS, %o3		! set QUAD_LDD_PHYS by default
1041
1042#ifdef sun4v
1043
1044	/*
1045	 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
1046	 */
1047
1048	brnz,pt %o0, do_patch
1049	nop
1050
1051	sethi	%hi(sfmmu_panic11), %o0
1052	call	panic
1053	  or	%o0, %lo(sfmmu_panic11), %o0
1054do_patch:
1055
1056#else /* sun4v */
1057	/*
1058	 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
1059	 * Note that ASI_NQUAD_LD is not defined/used for sun4v
1060	 */
1061	movrz	%o0, ASI_NQUAD_LD, %o3
1062
1063#endif /* sun4v */
1064
1065	sll	%o3, 5, %o1			! imm_asi offset
1066	mov	6, %o3				! number of instructions
1067	sethi	%hi(dktsb), %o0			! to search
1068	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1069	  or	%o0, %lo(dktsb), %o0
1070	mov	6, %o3				! number of instructions
1071	sethi	%hi(dktsb4m), %o0		! to search
1072	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1073	  or	%o0, %lo(dktsb4m), %o0
1074	mov	6, %o3				! number of instructions
1075	sethi	%hi(iktsb), %o0			! to search
1076	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1077	  or	%o0, %lo(iktsb), %o0
1078	mov	6, %o3				! number of instructions
1079	sethi	%hi(iktsb4m), %o0		! to search
1080	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1081	  or	%o0, %lo(iktsb4m), %o0
1082	mov	%o4, %o7			! retore return pc -- leaf
1083	retl
1084	nop
1085	SET_SIZE(sfmmu_patch_mmu_asi)
1086
1087
1088	ENTRY_NP(sfmmu_patch_ktsb)
1089	/*
1090	 * We need to fix iktsb, dktsb, et. al.
1091	 */
1092	save	%sp, -SA(MINFRAME), %sp
1093	set	ktsb_phys, %o1
1094	ld	[%o1], %o4
1095	set	ktsb_base, %o5
1096	set	ktsb4m_base, %l1
1097	brz,pt	%o4, 1f
1098	  nop
1099	set	ktsb_pbase, %o5
1100	set	ktsb4m_pbase, %l1
11011:
1102	sethi	%hi(ktsb_szcode), %o1
1103	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1104
1105	sethi	%hi(iktsb), %o0
1106	call	sfmmu_fix_ktlb_traptable
1107	  or	%o0, %lo(iktsb), %o0
1108
1109	sethi	%hi(dktsb), %o0
1110	call	sfmmu_fix_ktlb_traptable
1111	  or	%o0, %lo(dktsb), %o0
1112
1113	sethi	%hi(ktsb4m_szcode), %o1
1114	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1115
1116	sethi	%hi(iktsb4m), %o0
1117	call	sfmmu_fix_ktlb_traptable
1118	  or	%o0, %lo(iktsb4m), %o0
1119
1120	sethi	%hi(dktsb4m), %o0
1121	call	sfmmu_fix_ktlb_traptable
1122	  or	%o0, %lo(dktsb4m), %o0
1123
1124#ifndef sun4v
1125	mov	ASI_N, %o2
1126	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1127	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1128	sethi	%hi(tsb_kernel_patch_asi), %o0
1129	call	sfmmu_fixup_or
1130	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1131#endif /* !sun4v */
1132
1133	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1134
1135	sethi	%hi(dktsbbase), %o0
1136	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1137	  or	%o0, %lo(dktsbbase), %o0
1138
1139	sethi	%hi(iktsbbase), %o0
1140	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1141	  or	%o0, %lo(iktsbbase), %o0
1142
1143	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1144	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1145	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1146
1147#ifdef sun4v
1148	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1149	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1150	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1151#endif /* sun4v */
1152
1153	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1154
1155	sethi	%hi(dktsb4mbase), %o0
1156	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1157	  or	%o0, %lo(dktsb4mbase), %o0
1158
1159	sethi	%hi(iktsb4mbase), %o0
1160	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1161	  or	%o0, %lo(iktsb4mbase), %o0
1162
1163	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1164	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1165	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1166
1167#ifdef sun4v
1168	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1169	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1170	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1171#endif /* sun4v */
1172
1173	set	ktsb_szcode, %o4
1174	ld	[%o4], %o4
1175	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1176	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1177	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1178
1179#ifdef sun4v
1180	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1181	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1182	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1183#endif /* sun4v */
1184
1185	set	ktsb4m_szcode, %o4
1186	ld	[%o4], %o4
1187	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1188	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1189	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1190
1191#ifdef sun4v
1192	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1193	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1194	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1195#endif /* sun4v */
1196
1197	ret
1198	restore
1199	SET_SIZE(sfmmu_patch_ktsb)
1200
1201	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1202	/*
1203	 * Fixup trap handlers in common segkpm case.  This is reserved
1204	 * for future use should kpm TSB be changed to be other than the
1205	 * kernel TSB.
1206	 */
1207	retl
1208	nop
1209	SET_SIZE(sfmmu_kpm_patch_tlbm)
1210
1211	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1212	/*
1213	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1214	 * in the case where we are using large pages for
1215	 * seg_kpm (and hence must probe the second TSB for
1216	 * seg_kpm VAs)
1217	 */
1218	set	dktsb4m_kpmcheck_small, %o0
1219	MAKE_NOP_INSTR(%o1)
1220	st	%o1, [%o0]
1221	flush	%o0
1222	retl
1223	nop
1224	SET_SIZE(sfmmu_kpm_patch_tsbm)
1225
1226	ENTRY_NP(sfmmu_patch_utsb)
1227#ifdef UTSB_PHYS
1228	retl
1229	nop
1230#else /* UTSB_PHYS */
1231	/*
1232	 * We need to hot patch utsb_vabase and utsb4m_vabase
1233	 */
1234	save	%sp, -SA(MINFRAME), %sp
1235
1236	/* patch value of utsb_vabase */
1237	set	utsb_vabase, %o1
1238	ldx	[%o1], %o4
1239	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1240	call	sfmmu_fixup_setx
1241	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1242	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1243	call	sfmmu_fixup_setx
1244	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1245	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1246	call	sfmmu_fixup_setx
1247	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1248
1249	/* patch value of utsb4m_vabase */
1250	set	utsb4m_vabase, %o1
1251	ldx	[%o1], %o4
1252	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1253	call	sfmmu_fixup_setx
1254	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1255	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1256	call	sfmmu_fixup_setx
1257	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1258	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1259	call	sfmmu_fixup_setx
1260	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1261
1262	/*
1263	 * Patch TSB base register masks and shifts if needed.
1264	 * By default the TSB base register contents are set up for 4M slab.
1265	 * If we're using a smaller slab size and reserved VA range we need
1266	 * to patch up those values here.
1267	 */
1268	set	tsb_slab_shift, %o1
1269	set	MMU_PAGESHIFT4M, %o4
1270	lduw	[%o1], %o3
1271	subcc	%o4, %o3, %o4
1272	bz,pt	%icc, 1f
1273	  /* delay slot safe */
1274
1275	/* patch reserved VA range size if needed. */
1276	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1277	call	sfmmu_fixup_shiftx
1278	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1279	call	sfmmu_fixup_shiftx
1280	  add	%o0, I_SIZE, %o0
1281	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1282	call	sfmmu_fixup_shiftx
1283	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1284	call	sfmmu_fixup_shiftx
1285	  add	%o0, I_SIZE, %o0
12861:
1287	/* patch TSBREG_VAMASK used to set up TSB base register */
1288	set	tsb_slab_mask, %o1
1289	ldx	[%o1], %o4
1290	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1291	call	sfmmu_fixup_or
1292	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1293	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1294	call	sfmmu_fixup_or
1295	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1296
1297	ret
1298	restore
1299#endif /* UTSB_PHYS */
1300	SET_SIZE(sfmmu_patch_utsb)
1301
1302	ENTRY_NP(sfmmu_patch_shctx)
1303#ifdef sun4u
1304	retl
1305	  nop
1306#else /* sun4u */
1307	set	sfmmu_shctx_cpu_mondo_patch, %o0
1308	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1309	st	%o1, [%o0]
1310	flush	%o0
1311	MAKE_NOP_INSTR(%o1)
1312	add	%o0, I_SIZE, %o0	! next instr
1313	st	%o1, [%o0]
1314	flush	%o0
1315
1316	set	sfmmu_shctx_user_rtt_patch, %o0
1317	st      %o1, [%o0]		! nop 1st instruction
1318	flush	%o0
1319	add     %o0, I_SIZE, %o0
1320	st      %o1, [%o0]		! nop 2nd instruction
1321	flush	%o0
1322	add     %o0, I_SIZE, %o0
1323	st      %o1, [%o0]		! nop 3rd instruction
1324	flush	%o0
1325	add     %o0, I_SIZE, %o0
1326	st      %o1, [%o0]		! nop 4th instruction
1327	flush	%o0
1328	add     %o0, I_SIZE, %o0
1329	st      %o1, [%o0]		! nop 5th instruction
1330	flush	%o0
1331	add     %o0, I_SIZE, %o0
1332	st      %o1, [%o0]		! nop 6th instruction
1333	retl
1334	flush	%o0
1335#endif /* sun4u */
1336	SET_SIZE(sfmmu_patch_shctx)
1337
1338	/*
1339	 * Routine that loads an entry into a tsb using virtual addresses.
1340	 * Locking is required since all cpus can use the same TSB.
1341	 * Note that it is no longer required to have a valid context
1342	 * when calling this function.
1343	 */
1344	ENTRY_NP(sfmmu_load_tsbe)
1345	/*
1346	 * %o0 = pointer to tsbe to load
1347	 * %o1 = tsb tag
1348	 * %o2 = virtual pointer to TTE
1349	 * %o3 = 1 if physical address in %o0 else 0
1350	 */
1351	rdpr	%pstate, %o5
1352#ifdef DEBUG
1353	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1354#endif /* DEBUG */
1355
1356	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1357
1358	SETUP_TSB_ASI(%o3, %g3)
1359	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
1360
1361	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1362
1363	retl
1364	membar	#StoreStore|#StoreLoad
1365	SET_SIZE(sfmmu_load_tsbe)
1366
1367	/*
1368	 * Flush TSB of a given entry if the tag matches.
1369	 */
1370	ENTRY(sfmmu_unload_tsbe)
1371	/*
1372	 * %o0 = pointer to tsbe to be flushed
1373	 * %o1 = tag to match
1374	 * %o2 = 1 if physical address in %o0 else 0
1375	 */
1376	SETUP_TSB_ASI(%o2, %g1)
1377	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1378	retl
1379	membar	#StoreStore|#StoreLoad
1380	SET_SIZE(sfmmu_unload_tsbe)
1381
1382	/*
1383	 * Routine that loads a TTE into the kpm TSB from C code.
1384	 * Locking is required since kpm TSB is shared among all CPUs.
1385	 */
1386	ENTRY_NP(sfmmu_kpm_load_tsb)
1387	/*
1388	 * %o0 = vaddr
1389	 * %o1 = ttep
1390	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1391	 */
1392	rdpr	%pstate, %o5			! %o5 = saved pstate
1393#ifdef DEBUG
1394	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1395#endif /* DEBUG */
1396	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1397
1398#ifndef sun4v
1399	sethi	%hi(ktsb_phys), %o4
1400	mov	ASI_N, %o3
1401	ld	[%o4 + %lo(ktsb_phys)], %o4
1402	movrnz	%o4, ASI_MEM, %o3
1403	mov	%o3, %asi
1404#endif /* !sun4v */
1405	mov	%o0, %g1			! %g1 = vaddr
1406
1407	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1408	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1409	/* %g2 = tsbep, %g1 clobbered */
1410
1411	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1412	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1413	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
1414
1415	wrpr	%g0, %o5, %pstate		! enable interrupts
1416	retl
1417	  membar #StoreStore|#StoreLoad
1418	SET_SIZE(sfmmu_kpm_load_tsb)
1419
1420	/*
1421	 * Routine that shoots down a TTE in the kpm TSB or in the
1422	 * kernel TSB depending on virtpg. Locking is required since
1423	 * kpm/kernel TSB is shared among all CPUs.
1424	 */
1425	ENTRY_NP(sfmmu_kpm_unload_tsb)
1426	/*
1427	 * %o0 = vaddr
1428	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1429	 */
1430#ifndef sun4v
1431	sethi	%hi(ktsb_phys), %o4
1432	mov	ASI_N, %o3
1433	ld	[%o4 + %lo(ktsb_phys)], %o4
1434	movrnz	%o4, ASI_MEM, %o3
1435	mov	%o3, %asi
1436#endif /* !sun4v */
1437	mov	%o0, %g1			! %g1 = vaddr
1438
1439	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1440	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1441	/* %g2 = tsbep, %g1 clobbered */
1442
1443	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1444	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1445	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1446
1447	retl
1448	  membar	#StoreStore|#StoreLoad
1449	SET_SIZE(sfmmu_kpm_unload_tsb)
1450
1451
1452	ENTRY_NP(sfmmu_ttetopfn)
1453	ldx	[%o0], %g1			/* read tte */
1454	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1455	/*
1456	 * g1 = pfn
1457	 */
1458	retl
1459	mov	%g1, %o0
1460	SET_SIZE(sfmmu_ttetopfn)
1461
1462/*
1463 * These macros are used to update global sfmmu hme hash statistics
1464 * in perf critical paths. It is only enabled in debug kernels or
1465 * if SFMMU_STAT_GATHER is defined
1466 */
1467#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1468#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1469	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1470	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1471	cmp	tmp1, hatid						;\
1472	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1473	set	sfmmu_global_stat, tmp1					;\
1474	add	tmp1, tmp2, tmp1					;\
1475	ld	[tmp1], tmp2						;\
1476	inc	tmp2							;\
1477	st	tmp2, [tmp1]
1478
1479#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1480	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1481	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1482	cmp	tmp1, hatid						;\
1483	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1484	set	sfmmu_global_stat, tmp1					;\
1485	add	tmp1, tmp2, tmp1					;\
1486	ld	[tmp1], tmp2						;\
1487	inc	tmp2							;\
1488	st	tmp2, [tmp1]
1489
1490
1491#else /* DEBUG || SFMMU_STAT_GATHER */
1492
1493#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1494
1495#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1496
1497#endif  /* DEBUG || SFMMU_STAT_GATHER */
1498
1499/*
1500 * This macro is used to update global sfmmu kstas in non
1501 * perf critical areas so they are enabled all the time
1502 */
1503#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1504	sethi	%hi(sfmmu_global_stat), tmp1				;\
1505	add	tmp1, statname, tmp1					;\
1506	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1507	inc	tmp2							;\
1508	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1509
1510/*
1511 * These macros are used to update per cpu stats in non perf
1512 * critical areas so they are enabled all the time
1513 */
1514#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1515	ld	[tsbarea + stat], tmp1					;\
1516	inc	tmp1							;\
1517	st	tmp1, [tsbarea + stat]
1518
1519/*
1520 * These macros are used to update per cpu stats in non perf
1521 * critical areas so they are enabled all the time
1522 */
1523#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1524	lduh	[tsbarea + stat], tmp1					;\
1525	inc	tmp1							;\
1526	stuh	tmp1, [tsbarea + stat]
1527
1528#if defined(KPM_TLBMISS_STATS_GATHER)
1529	/*
1530	 * Count kpm dtlb misses separately to allow a different
1531	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1532	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1533	 */
1534#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1535	brgez	tagacc, label	/* KPM VA? */				;\
1536	nop								;\
1537	CPU_INDEX(tmp1, tsbma)						;\
1538	sethi	%hi(kpmtsbm_area), tsbma				;\
1539	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1540	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1541	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1542	/* VA range check */						;\
1543	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1544	cmp	tagacc, val						;\
1545	blu,pn	%xcc, label						;\
1546	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1547	cmp	tagacc, tmp1						;\
1548	bgeu,pn	%xcc, label						;\
1549	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1550	inc	val							;\
1551	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1552label:
1553#else
1554#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1555#endif	/* KPM_TLBMISS_STATS_GATHER */
1556
1557#ifdef	PTL1_PANIC_DEBUG
1558	.seg	".data"
1559	.global	test_ptl1_panic
1560test_ptl1_panic:
1561	.word	0
1562	.align	8
1563
1564	.seg	".text"
1565	.align	4
1566#endif	/* PTL1_PANIC_DEBUG */
1567
1568	/*
1569	 * The following routines are jumped to from the mmu trap handlers to do
1570	 * the setting up to call systrap.  They are separate routines instead
1571	 * of being part of the handlers because the handlers would exceed 32
1572	 * instructions and since this is part of the slow path the jump cost is
1573	 * irrelevant.
1574	 */
1575
1576	ENTRY_NP(sfmmu_pagefault)
1577	SET_GL_REG(1)
1578	USE_ALTERNATE_GLOBALS(%g5)
1579	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1580	rdpr	%tt, %g6
1581	cmp	%g6, FAST_IMMU_MISS_TT
1582	be,a,pn	%icc, 1f
1583	  mov	T_INSTR_MMU_MISS, %g3
1584	cmp	%g6, T_INSTR_MMU_MISS
1585	be,a,pn	%icc, 1f
1586	  mov	T_INSTR_MMU_MISS, %g3
1587	mov	%g5, %g2
1588	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1589	cmp	%g6, FAST_DMMU_MISS_TT
1590	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1591	cmp	%g6, T_DATA_MMU_MISS
1592	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1593
1594#ifdef  PTL1_PANIC_DEBUG
1595	/* check if we want to test the tl1 panic */
1596	sethi	%hi(test_ptl1_panic), %g4
1597	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1598	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1599	cmp	%g1, %g0
1600	bne,a,pn %icc, ptl1_panic
1601	  or	%g0, PTL1_BAD_DEBUG, %g1
1602#endif	/* PTL1_PANIC_DEBUG */
16031:
1604	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1605	/*
1606	 * g2 = tag access reg
1607	 * g3.l = type
1608	 * g3.h = 0
1609	 */
1610	sethi	%hi(trap), %g1
1611	or	%g1, %lo(trap), %g1
16122:
1613	ba,pt	%xcc, sys_trap
1614	  mov	-1, %g4
1615	SET_SIZE(sfmmu_pagefault)
1616
1617	ENTRY_NP(sfmmu_mmu_trap)
1618	SET_GL_REG(1)
1619	USE_ALTERNATE_GLOBALS(%g5)
1620	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1621	rdpr	%tt, %g6
1622	cmp	%g6, FAST_IMMU_MISS_TT
1623	be,a,pn	%icc, 1f
1624	  mov	T_INSTR_MMU_MISS, %g3
1625	cmp	%g6, T_INSTR_MMU_MISS
1626	be,a,pn	%icc, 1f
1627	  mov	T_INSTR_MMU_MISS, %g3
1628	mov	%g5, %g2
1629	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1630	cmp	%g6, FAST_DMMU_MISS_TT
1631	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1632	cmp	%g6, T_DATA_MMU_MISS
1633	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
16341:
1635	/*
1636	 * g2 = tag access reg
1637	 * g3 = type
1638	 */
1639	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1640	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1641	ba,pt	%xcc, sys_trap
1642	  mov	-1, %g4
1643	/*NOTREACHED*/
1644	SET_SIZE(sfmmu_mmu_trap)
1645
1646	ENTRY_NP(sfmmu_suspend_tl)
1647	SET_GL_REG(1)
1648	USE_ALTERNATE_GLOBALS(%g5)
1649	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1650	rdpr	%tt, %g6
1651	cmp	%g6, FAST_IMMU_MISS_TT
1652	be,a,pn	%icc, 1f
1653	  mov	T_INSTR_MMU_MISS, %g3
1654	mov	%g5, %g2
1655	cmp	%g6, FAST_DMMU_MISS_TT
1656	move	%icc, T_DATA_MMU_MISS, %g3
1657	movne	%icc, T_DATA_PROT, %g3
16581:
1659	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1660	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1661	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1662	ba,pt	%xcc, sys_trap
1663	  mov	PIL_15, %g4
1664	/*NOTREACHED*/
1665	SET_SIZE(sfmmu_suspend_tl)
1666
1667	/*
1668	 * No %g registers in use at this point.
1669	 */
1670	ENTRY_NP(sfmmu_window_trap)
1671	rdpr	%tpc, %g1
1672#ifdef sun4v
1673#ifdef DEBUG
1674	/* We assume previous %gl was 1 */
1675	rdpr	%tstate, %g4
1676	srlx	%g4, TSTATE_GL_SHIFT, %g4
1677	and	%g4, TSTATE_GL_MASK, %g4
1678	cmp	%g4, 1
1679	bne,a,pn %icc, ptl1_panic
1680	  mov	PTL1_BAD_WTRAP, %g1
1681#endif /* DEBUG */
1682	/* user miss at tl>1. better be the window handler or user_rtt */
1683	/* in user_rtt? */
1684	set	rtt_fill_start, %g4
1685	cmp	%g1, %g4
1686	blu,pn %xcc, 6f
1687	 .empty
1688	set	rtt_fill_end, %g4
1689	cmp	%g1, %g4
1690	bgeu,pn %xcc, 6f
1691	 nop
1692	set	fault_rtt_fn1, %g1
1693	wrpr	%g0, %g1, %tnpc
1694	ba,a	7f
16956:
1696	! must save this trap level before descending trap stack
1697	! no need to save %tnpc, either overwritten or discarded
1698	! already got it: rdpr	%tpc, %g1
1699	rdpr	%tstate, %g6
1700	rdpr	%tt, %g7
1701	! trap level saved, go get underlying trap type
1702	rdpr	%tl, %g5
1703	sub	%g5, 1, %g3
1704	wrpr	%g3, %tl
1705	rdpr	%tt, %g2
1706	wrpr	%g5, %tl
1707	! restore saved trap level
1708	wrpr	%g1, %tpc
1709	wrpr	%g6, %tstate
1710	wrpr	%g7, %tt
1711#else /* sun4v */
1712	/* user miss at tl>1. better be the window handler */
1713	rdpr	%tl, %g5
1714	sub	%g5, 1, %g3
1715	wrpr	%g3, %tl
1716	rdpr	%tt, %g2
1717	wrpr	%g5, %tl
1718#endif /* sun4v */
1719	and	%g2, WTRAP_TTMASK, %g4
1720	cmp	%g4, WTRAP_TYPE
1721	bne,pn	%xcc, 1f
1722	 nop
1723	/* tpc should be in the trap table */
1724	set	trap_table, %g4
1725	cmp	%g1, %g4
1726	blt,pn %xcc, 1f
1727	 .empty
1728	set	etrap_table, %g4
1729	cmp	%g1, %g4
1730	bge,pn %xcc, 1f
1731	 .empty
1732	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1733	add	%g1, WTRAP_FAULTOFF, %g1
1734	wrpr	%g0, %g1, %tnpc
17357:
1736	/*
1737	 * some wbuf handlers will call systrap to resolve the fault
1738	 * we pass the trap type so they figure out the correct parameters.
1739	 * g5 = trap type, g6 = tag access reg
1740	 */
1741
1742	/*
1743	 * only use g5, g6, g7 registers after we have switched to alternate
1744	 * globals.
1745	 */
1746	SET_GL_REG(1)
1747	USE_ALTERNATE_GLOBALS(%g5)
1748	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1749	rdpr	%tt, %g7
1750	cmp	%g7, FAST_IMMU_MISS_TT
1751	be,a,pn	%icc, ptl1_panic
1752	  mov	PTL1_BAD_WTRAP, %g1
1753	cmp	%g7, T_INSTR_MMU_MISS
1754	be,a,pn	%icc, ptl1_panic
1755	  mov	PTL1_BAD_WTRAP, %g1
1756	mov	T_DATA_PROT, %g5
1757	cmp	%g7, FAST_DMMU_MISS_TT
1758	move	%icc, T_DATA_MMU_MISS, %g5
1759	cmp	%g7, T_DATA_MMU_MISS
1760	move	%icc, T_DATA_MMU_MISS, %g5
1761	! XXXQ AGS re-check out this one
1762	done
17631:
1764	CPU_PADDR(%g1, %g4)
1765	add	%g1, CPU_TL1_HDLR, %g1
1766	lda	[%g1]ASI_MEM, %g4
1767	brnz,a,pt %g4, sfmmu_mmu_trap
1768	  sta	%g0, [%g1]ASI_MEM
1769	ba,pt	%icc, ptl1_panic
1770	  mov	PTL1_BAD_TRAP, %g1
1771	SET_SIZE(sfmmu_window_trap)
1772
1773	ENTRY_NP(sfmmu_kpm_exception)
1774	/*
1775	 * We have accessed an unmapped segkpm address or a legal segkpm
1776	 * address which is involved in a VAC alias conflict prevention.
1777	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1778	 * set. If it is, we will instead note that a fault has occurred
1779	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1780	 * a "retry"). This will step over the faulting instruction.
1781	 * Note that this means that a legal segkpm address involved in
1782	 * a VAC alias conflict prevention (a rare case to begin with)
1783	 * cannot be used in DTrace.
1784	 */
1785	CPU_INDEX(%g1, %g2)
1786	set	cpu_core, %g2
1787	sllx	%g1, CPU_CORE_SHIFT, %g1
1788	add	%g1, %g2, %g1
1789	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1790	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1791	bz	0f
1792	or	%g2, CPU_DTRACE_BADADDR, %g2
1793	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1794	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1795	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1796	done
17970:
1798	TSTAT_CHECK_TL1(1f, %g1, %g2)
17991:
1800	SET_GL_REG(1)
1801	USE_ALTERNATE_GLOBALS(%g5)
1802	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1803	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1804	/*
1805	 * g2=tagacc g3.l=type g3.h=0
1806	 */
1807	sethi	%hi(trap), %g1
1808	or	%g1, %lo(trap), %g1
1809	ba,pt	%xcc, sys_trap
1810	mov	-1, %g4
1811	SET_SIZE(sfmmu_kpm_exception)
1812
1813#if (IMAP_SEG != 0)
1814#error - ism_map->ism_seg offset is not zero
1815#endif
1816
1817/*
1818 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1819 * tlb miss and branches to label "ismhit". If this is not an ISM
1820 * process or an ISM tlb miss it falls thru.
1821 *
1822 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1823 * this process.
1824 * If so, it will branch to label "ismhit".  If not, it will fall through.
1825 *
1826 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1827 * so that any other threads of this process will not try and walk the ism
1828 * maps while they are being changed.
1829 *
1830 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
1831 *       will make sure of that. This means we can terminate our search on
1832 *       the first zero mapping we find.
1833 *
1834 * Parameters:
1835 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
1836 * tsbmiss	= address of tsb miss area (in)
1837 * ismseg	= contents of ism_seg for this ism map (out)
1838 * ismhat	= physical address of imap_ismhat for this ism map (out)
1839 * tmp1		= scratch reg (CLOBBERED)
1840 * tmp2		= scratch reg (CLOBBERED)
1841 * tmp3		= scratch reg (CLOBBERED)
1842 * label:    temporary labels
1843 * ismhit:   label where to jump to if an ism dtlb miss
1844 * exitlabel:label where to jump if hat is busy due to hat_unshare.
1845 */
1846#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
1847	label, ismhit)							\
1848	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
1849	brlz,pt  tmp1, label##3		/* exit if -1 */	;\
1850	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
1851label##1:								;\
1852	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
1853	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
1854label##2:								;\
1855	brz,pt  ismseg, label##3		/* no mapping */	;\
1856	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
1857	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
1858	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
1859	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
1860	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
1861	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
1862	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
1863	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
1864	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
1865	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
1866	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
1867									;\
1868	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
1869	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
1870	cmp	ismhat, tmp1						;\
1871	bl,pt	%xcc, label##2		/* keep looking  */	;\
1872	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
1873									;\
1874	add	tmp3, IBLK_NEXTPA, tmp1					;\
1875	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
1876	brgez,pt tmp1, label##1		/* continue if not -1*/	;\
1877	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
1878label##3:
1879
1880/*
1881 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
1882 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
1883 * Parameters:
1884 * tagacc = reg containing virtual address
1885 * hatid = reg containing sfmmu pointer
1886 * hmeshift = constant/register to shift vaddr to obtain vapg
1887 * hmebp = register where bucket pointer will be stored
1888 * vapg = register where virtual page will be stored
1889 * tmp1, tmp2 = tmp registers
1890 */
1891
1892
1893#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
1894	vapg, label, tmp1, tmp2)					\
1895	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
1896	brnz,a,pt tmp1, label##1					;\
1897	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
1898	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
1899	ba,pt	%xcc, label##2					;\
1900	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
1901label##1:								;\
1902	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
1903label##2:								;\
1904	srlx	tagacc, hmeshift, vapg					;\
1905	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
1906	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
1907	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
1908	add	hmebp, tmp1, hmebp
1909
1910/*
1911 * hashtag includes bspage + hashno (64 bits).
1912 */
1913
1914#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
1915	sllx	vapg, hmeshift, vapg					;\
1916	mov	hashno, hblktag						;\
1917	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
1918	or	vapg, hblktag, hblktag
1919
1920/*
1921 * Function to traverse hmeblk hash link list and find corresponding match.
1922 * The search is done using physical pointers. It returns the physical address
1923 * pointer to the hmeblk that matches with the tag provided.
1924 * Parameters:
1925 * hmebp	= register that points to hme hash bucket, also used as
1926 *		  tmp reg (clobbered)
1927 * hmeblktag	= register with hmeblk tag match
1928 * hatid	= register with hatid
1929 * hmeblkpa	= register where physical ptr will be stored
1930 * tmp1		= tmp reg
1931 * label: temporary label
1932 */
1933
1934#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, 	\
1935	tmp1, label)							\
1936	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
1937	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
1938	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1939label##1:								;\
1940	cmp	hmeblkpa, HMEBLK_ENDPA					;\
1941	be,pn   %xcc, label##2					;\
1942	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1943	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
1944	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
1945	add	hmebp, CLONGSIZE, hmebp					;\
1946	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
1947	xor	tmp1, hmeblktag, tmp1					;\
1948	xor	hmebp, hatid, hmebp					;\
1949	or	hmebp, tmp1, hmebp					;\
1950	brz,pn	hmebp, label##2	/* branch on hit */		;\
1951	  add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
1952	ba,pt	%xcc, label##1					;\
1953	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
1954label##2:
1955
1956/*
1957 * Function to traverse hmeblk hash link list and find corresponding match.
1958 * The search is done using physical pointers. It returns the physical address
1959 * pointer to the hmeblk that matches with the tag
1960 * provided.
1961 * Parameters:
1962 * hmeblktag	= register with hmeblk tag match (rid field is 0)
1963 * hatid	= register with hatid (pointer to SRD)
1964 * hmeblkpa	= register where physical ptr will be stored
1965 * tmp1		= tmp reg
1966 * tmp2		= tmp reg
1967 * label: temporary label
1968 */
1969
1970#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea,	\
1971	tmp1, tmp2, label)			 			\
1972label##1:								;\
1973	cmp	hmeblkpa, HMEBLK_ENDPA					;\
1974	be,pn   %xcc, label##4					;\
1975	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
1976	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
1977	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
1978	add	tmp2, CLONGSIZE, tmp2					;\
1979	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
1980	xor	tmp1, hmeblktag, tmp1					;\
1981	xor	tmp2, hatid, tmp2					;\
1982	brz,pn	tmp2, label##3	/* branch on hit */		;\
1983	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
1984label##2:								;\
1985	ba,pt	%xcc, label##1					;\
1986	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
1987label##3:								;\
1988	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
1989	bgeu,pt	%xcc, label##2					;\
1990	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
1991	and	tmp1, BT_ULMASK, tmp2					;\
1992	srlx	tmp1, BT_ULSHIFT, tmp1					;\
1993	sllx	tmp1, CLONGSHIFT, tmp1					;\
1994	add	tsbarea, tmp1, tmp1					;\
1995	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
1996	srlx	tmp1, tmp2, tmp1					;\
1997	btst	0x1, tmp1						;\
1998	bz,pn	%xcc, label##2					;\
1999	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2000label##4:
2001
2002#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2003#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2004#endif
2005
2006/*
2007 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2008 * the offset for the corresponding hment.
2009 * Parameters:
2010 * In:
2011 *	vaddr = register with virtual address
2012 *	hmeblkpa = physical pointer to hme_blk
2013 * Out:
2014 *	hmentoff = register where hment offset will be stored
2015 *	hmemisc = hblk_misc
2016 * Scratch:
2017 *	tmp1
2018 */
2019#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2020	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2021	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2022	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2023	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2024	  or	%g0, HMEBLK_HME1, hmentoff				;\
2025	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2026	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2027	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2028	add	tmp1, HMEBLK_HME1, hmentoff				;\
2029label1:
2030
2031/*
2032 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2033 *
2034 * tagacc	= (pseudo-)tag access register (in)
2035 * hatid	= sfmmu pointer for TSB miss (in)
2036 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2037 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2038 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2039 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2040 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2041 *		  for this page size.
2042 * hashno	= constant/register hash number
2043 * tmp		= temp value - clobbered
2044 * label	= temporary label for branching within macro.
2045 * foundlabel	= label to jump to when tte is found.
2046 * suspendlabel= label to jump to when tte is suspended.
2047 * exitlabel	= label to jump to when tte is not found.
2048 *
2049 */
2050#define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
2051		 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
2052									;\
2053	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2054	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2055	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2056		hmeblkpa, label##5, hmemisc, tmp)			;\
2057									;\
2058	/*								;\
2059	 * tagacc = tagacc						;\
2060	 * hatid = hatid						;\
2061	 * tsbarea = tsbarea						;\
2062	 * tte   = hmebp (hme bucket pointer)				;\
2063	 * hmeblkpa  = vapg  (virtual page)				;\
2064	 * hmemisc, tmp = scratch					;\
2065	 */								;\
2066	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2067	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2068									;\
2069	/*								;\
2070	 * tagacc = tagacc						;\
2071	 * hatid = hatid						;\
2072	 * tte   = hmebp						;\
2073	 * hmeblkpa  = CLOBBERED					;\
2074	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2075	 * tmp  = scratch						;\
2076	 */								;\
2077	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2078	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, 	 		\
2079		tsbarea, tagacc, label##1)				;\
2080	/*								;\
2081	 * tagacc = CLOBBERED						;\
2082	 * tte = CLOBBERED						;\
2083	 * hmeblkpa = hmeblkpa						;\
2084	 * tmp = scratch						;\
2085	 */								;\
2086	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2087	bne,pn   %xcc, label##4       /* branch if hmeblk found */    ;\
2088	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2089	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2090	  nop								;\
2091label##4:								;\
2092	/*								;\
2093	 * We have found the hmeblk containing the hment.		;\
2094	 * Now we calculate the corresponding tte.			;\
2095	 *								;\
2096	 * tagacc = tagacc						;\
2097	 * hatid = hatid						;\
2098	 * tte   = clobbered						;\
2099	 * hmeblkpa  = hmeblkpa						;\
2100	 * hmemisc  = hblktag						;\
2101	 * tmp = scratch						;\
2102	 */								;\
2103	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2104		label##2)						;\
2105									;\
2106	/*								;\
2107	 * tagacc = tagacc						;\
2108	 * hatid = hmentoff						;\
2109	 * tte   = clobbered						;\
2110	 * hmeblkpa  = hmeblkpa						;\
2111	 * hmemisc  = hblk_misc						;\
2112	 * tmp = scratch						;\
2113	 */								;\
2114									;\
2115	add	hatid, SFHME_TTE, hatid					;\
2116	add	hmeblkpa, hatid, hmeblkpa				;\
2117	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2118	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2119	set	TTE_SUSPEND, hatid					;\
2120	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2121	btst	tte, hatid						;\
2122	bz,pt	%xcc, foundlabel					;\
2123	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2124									;\
2125	/*								;\
2126	 * Mapping is suspended, so goto suspend label.			;\
2127	 */								;\
2128	ba,pt	%xcc, suspendlabel					;\
2129	  nop
2130
2131/*
2132 * GET_SHME_TTE is similar to GET_TTE() except it searches
2133 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2134 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2135 * either 0 (not part of scd) or 1 (part of scd).
2136 */
2137#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, 	\
2138		hmeshift, hashno, tmp, label, foundlabel,		\
2139		suspendlabel, exitlabel)				\
2140									;\
2141	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2142	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2143	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2144		hmeblkpa, label##5, hmemisc, tmp)			;\
2145									;\
2146	/*								;\
2147	 * tagacc = tagacc						;\
2148	 * hatid = hatid						;\
2149	 * tsbarea = tsbarea						;\
2150	 * tte   = hmebp (hme bucket pointer)				;\
2151	 * hmeblkpa  = vapg  (virtual page)				;\
2152	 * hmemisc, tmp = scratch					;\
2153	 */								;\
2154	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2155									;\
2156	/*								;\
2157	 * tagacc = tagacc						;\
2158	 * hatid = hatid						;\
2159	 * tsbarea = tsbarea						;\
2160	 * tte   = hmebp						;\
2161	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2162	 * hmeblkpa  = CLOBBERED					;\
2163	 * tmp = scratch						;\
2164	 */								;\
2165	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2166									;\
2167	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2168	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2169	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2170									;\
2171label##8:								;\
2172	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa,			\
2173		tsbarea, tagacc, tte, label##1)			;\
2174	/*								;\
2175	 * tagacc = CLOBBERED						;\
2176	 * tte = CLOBBERED						;\
2177	 * hmeblkpa = hmeblkpa						;\
2178	 * tmp = scratch						;\
2179	 */								;\
2180	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2181	bne,pn   %xcc, label##4       /* branch if hmeblk found */    ;\
2182	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2183	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2184	  nop								;\
2185label##4:								;\
2186	/*								;\
2187	 * We have found the hmeblk containing the hment.		;\
2188	 * Now we calculate the corresponding tte.			;\
2189	 *								;\
2190	 * tagacc = tagacc						;\
2191	 * hatid = hatid						;\
2192	 * tte   = clobbered						;\
2193	 * hmeblkpa  = hmeblkpa						;\
2194	 * hmemisc  = hblktag						;\
2195	 * tsbarea = tsbmiss area					;\
2196	 * tmp = scratch						;\
2197	 */								;\
2198	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2199		label##2)						;\
2200									;\
2201	/*								;\
2202	 * tagacc = tagacc						;\
2203	 * hatid = hmentoff						;\
2204	 * tte = clobbered						;\
2205	 * hmeblkpa  = hmeblkpa						;\
2206	 * hmemisc  = hblk_misc						;\
2207	 * tsbarea = tsbmiss area					;\
2208	 * tmp = scratch						;\
2209	 */								;\
2210									;\
2211	add	hatid, SFHME_TTE, hatid					;\
2212	add	hmeblkpa, hatid, hmeblkpa				;\
2213	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2214	brlz,pt tte, label##6						;\
2215	  nop								;\
2216	btst	HBLK_SZMASK, hmemisc					;\
2217	bnz,a,pt %icc, label##7					;\
2218	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2219									;\
2220	/*								;\
2221 	 * We found an invalid 8K tte in shme.				;\
2222	 * it may not belong to shme's region since			;\
2223	 * region size/alignment granularity is 8K but different	;\
2224	 * regions don't share hmeblks. Continue the search.		;\
2225	 */								;\
2226	sub	hmeblkpa, hatid, hmeblkpa				;\
2227	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2228	srlx	tagacc, hmeshift, tte					;\
2229	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2230	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2231	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2232	ba,a,pt	%xcc, label##8					;\
2233label##6:								;\
2234	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2235	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2236label##7:								;\
2237	set	TTE_SUSPEND, hatid					;\
2238	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2239	btst	tte, hatid						;\
2240	bz,pt	%xcc, foundlabel					;\
2241	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2242									;\
2243	/*								;\
2244	 * Mapping is suspended, so goto suspend label.			;\
2245	 */								;\
2246	ba,pt	%xcc, suspendlabel					;\
2247	  nop
2248
2249	/*
2250	 * KERNEL PROTECTION HANDLER
2251	 *
2252	 * g1 = tsb8k pointer register (clobbered)
2253	 * g2 = tag access register (ro)
2254	 * g3 - g7 = scratch registers
2255	 *
2256	 * Note: This function is patched at runtime for performance reasons.
2257	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2258	 */
2259	ENTRY_NP(sfmmu_kprot_trap)
2260	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2261sfmmu_kprot_patch_ktsb_base:
2262	RUNTIME_PATCH_SETX(%g1, %g6)
2263	/* %g1 = contents of ktsb_base or ktsb_pbase */
2264sfmmu_kprot_patch_ktsb_szcode:
2265	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2266
2267	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2268	! %g1 = First TSB entry pointer, as TSB miss handler expects
2269
2270	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2271sfmmu_kprot_patch_ktsb4m_base:
2272	RUNTIME_PATCH_SETX(%g3, %g6)
2273	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2274sfmmu_kprot_patch_ktsb4m_szcode:
2275	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2276
2277	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2278	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2279
2280        CPU_TSBMISS_AREA(%g6, %g7)
2281        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2282	ba,pt	%xcc, sfmmu_tsb_miss_tt
2283	  nop
2284
2285	/*
2286	 * USER PROTECTION HANDLER
2287	 *
2288	 * g1 = tsb8k pointer register (ro)
2289	 * g2 = tag access register (ro)
2290	 * g3 = faulting context (clobbered, currently not used)
2291	 * g4 - g7 = scratch registers
2292	 */
2293	ALTENTRY(sfmmu_uprot_trap)
2294#ifdef sun4v
2295	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2296	/* %g1 = first TSB entry ptr now, %g2 preserved */
2297
2298	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2299	brlz,pt %g3, 9f				/* check for 2nd TSB */
2300	  nop
2301
2302	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2303	/* %g3 = second TSB entry ptr now, %g2 preserved */
2304
2305#else /* sun4v */
2306#ifdef UTSB_PHYS
2307	/* g1 = first TSB entry ptr */
2308	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2309	brlz,pt %g3, 9f			/* check for 2nd TSB */
2310	  nop
2311
2312	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2313	/* %g3 = second TSB entry ptr now, %g2 preserved */
2314#else /* UTSB_PHYS */
2315	brgez,pt %g1, 9f		/* check for 2nd TSB */
2316	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2317
2318	mov	%g2, %g7
2319	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2320	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2321	mov	%g1, %g7
2322	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2323#endif /* UTSB_PHYS */
2324#endif /* sun4v */
23259:
2326	CPU_TSBMISS_AREA(%g6, %g7)
2327	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2328	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2329	  nop
2330
2331	/*
2332	 * Kernel 8K page iTLB miss.  We also get here if we took a
2333	 * fast instruction access mmu miss trap while running in
2334	 * invalid context.
2335	 *
2336	 * %g1 = 8K TSB pointer register (not used, clobbered)
2337	 * %g2 = tag access register (used)
2338	 * %g3 = faulting context id (used)
2339	 * %g7 = TSB tag to match (used)
2340	 */
2341	.align	64
2342	ALTENTRY(sfmmu_kitlb_miss)
2343	brnz,pn %g3, tsb_tl0_noctxt
2344	  nop
2345
2346	/* kernel miss */
2347	/* get kernel tsb pointer */
2348	/* we patch the next set of instructions at run time */
2349	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2350iktsbbase:
2351	RUNTIME_PATCH_SETX(%g4, %g5)
2352	/* %g4 = contents of ktsb_base or ktsb_pbase */
2353
2354iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2355	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2356	or	%g4, %g1, %g1			! form tsb ptr
2357	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2358	cmp	%g4, %g7
2359	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2360	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2361
2362	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2363	bz,pn	%icc, exec_fault
2364	  nop
2365	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2366	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2367	retry
2368
2369iktsb4mbase:
2370        RUNTIME_PATCH_SETX(%g4, %g6)
2371        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2372iktsb4m:
2373	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2374        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2375	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2376	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2377	cmp	%g4, %g7
2378	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2379	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2380	bz,pn	%icc, exec_fault
2381	  nop
2382	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2383	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2384	retry
2385
2386	/*
2387	 * Kernel dTLB miss.  We also get here if we took a fast data
2388	 * access mmu miss trap while running in invalid context.
2389	 *
2390	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2391	 *	We select the TSB miss handler to branch to depending on
2392	 *	the virtual address of the access.  In the future it may
2393	 *	be desirable to separate kpm TTEs into their own TSB,
2394	 *	in which case all that needs to be done is to set
2395	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2396	 *	early in the miss if we detect a kpm VA to a new handler.
2397	 *
2398	 * %g1 = 8K TSB pointer register (not used, clobbered)
2399	 * %g2 = tag access register (used)
2400	 * %g3 = faulting context id (used)
2401	 */
2402	.align	64
2403	ALTENTRY(sfmmu_kdtlb_miss)
2404	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2405	  nop
2406
2407	/* Gather some stats for kpm misses in the TLB. */
2408	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2409	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2410
2411	/*
2412	 * Get first TSB offset and look for 8K/64K/512K mapping
2413	 * using the 8K virtual page as the index.
2414	 *
2415	 * We patch the next set of instructions at run time;
2416	 * any changes here require sfmmu_patch_ktsb changes too.
2417	 */
2418dktsbbase:
2419	RUNTIME_PATCH_SETX(%g7, %g6)
2420	/* %g7 = contents of ktsb_base or ktsb_pbase */
2421
2422dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2423	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2424
2425	/*
2426	 * At this point %g1 is our index into the TSB.
2427	 * We just masked off enough bits of the VA depending
2428	 * on our TSB size code.
2429	 */
2430	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2431	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2432	cmp	%g6, %g4			! compare tag
2433	bne,pn	%xcc, dktsb4m_kpmcheck_small
2434	  add	%g7, %g1, %g1			/* form tsb ptr */
2435	TT_TRACE(trace_tsbhit)
2436	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2437	/* trapstat expects tte in %g5 */
2438	retry
2439
2440	/*
2441	 * If kpm is using large pages, the following instruction needs
2442	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2443	 * so that we will probe the 4M TSB regardless of the VA.  In
2444	 * the case kpm is using small pages, we know no large kernel
2445	 * mappings are located above 0x80000000.00000000 so we skip the
2446	 * probe as an optimization.
2447	 */
2448dktsb4m_kpmcheck_small:
2449	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2450	  /* delay slot safe, below */
2451
2452	/*
2453	 * Get second TSB offset and look for 4M mapping
2454	 * using 4M virtual page as the TSB index.
2455	 *
2456	 * Here:
2457	 * %g1 = 8K TSB pointer.  Don't squash it.
2458	 * %g2 = tag access register (we still need it)
2459	 */
2460	srlx	%g2, MMU_PAGESHIFT4M, %g3
2461
2462	/*
2463	 * We patch the next set of instructions at run time;
2464	 * any changes here require sfmmu_patch_ktsb changes too.
2465	 */
2466dktsb4mbase:
2467	RUNTIME_PATCH_SETX(%g7, %g6)
2468	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2469dktsb4m:
2470	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2471	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2472
2473	/*
2474	 * At this point %g3 is our index into the TSB.
2475	 * We just masked off enough bits of the VA depending
2476	 * on our TSB size code.
2477	 */
2478	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2479	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2480	cmp	%g6, %g4			! compare tag
2481
2482dktsb4m_tsbmiss:
2483	bne,pn	%xcc, dktsb4m_kpmcheck
2484	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2485	TT_TRACE(trace_tsbhit)
2486	/* we don't check TTE size here since we assume 4M TSB is separate */
2487	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2488	/* trapstat expects tte in %g5 */
2489	retry
2490
2491	/*
2492	 * So, we failed to find a valid TTE to match the faulting
2493	 * address in either TSB.  There are a few cases that could land
2494	 * us here:
2495	 *
2496	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2497	 *    to sfmmu_tsb_miss_tt to handle the miss.
2498	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2499	 *    4M TSB.  Let segkpm handle it.
2500	 *
2501	 * Note that we shouldn't land here in the case of a kpm VA when
2502	 * kpm_smallpages is active -- we handled that case earlier at
2503	 * dktsb4m_kpmcheck_small.
2504	 *
2505	 * At this point:
2506	 *  g1 = 8K-indexed primary TSB pointer
2507	 *  g2 = tag access register
2508	 *  g3 = 4M-indexed secondary TSB pointer
2509	 */
2510dktsb4m_kpmcheck:
2511	cmp	%g2, %g0
2512	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2513	  nop
2514	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2515	  nop
2516
2517#ifdef sun4v
2518	/*
2519	 * User instruction miss w/ single TSB.
2520	 * The first probe covers 8K, 64K, and 512K page sizes,
2521	 * because 64K and 512K mappings are replicated off 8K
2522	 * pointer.
2523	 *
2524	 * g1 = tsb8k pointer register
2525	 * g2 = tag access register
2526	 * g3 - g6 = scratch registers
2527	 * g7 = TSB tag to match
2528	 */
2529	.align	64
2530	ALTENTRY(sfmmu_uitlb_fastpath)
2531
2532	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2533	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2534	ba,pn	%xcc, sfmmu_tsb_miss_tt
2535	  mov	-1, %g3
2536
2537	/*
2538	 * User data miss w/ single TSB.
2539	 * The first probe covers 8K, 64K, and 512K page sizes,
2540	 * because 64K and 512K mappings are replicated off 8K
2541	 * pointer.
2542	 *
2543	 * g1 = tsb8k pointer register
2544	 * g2 = tag access register
2545	 * g3 - g6 = scratch registers
2546	 * g7 = TSB tag to match
2547	 */
2548	.align 64
2549	ALTENTRY(sfmmu_udtlb_fastpath)
2550
2551	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2552	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2553	ba,pn	%xcc, sfmmu_tsb_miss_tt
2554	  mov	-1, %g3
2555
2556	/*
2557	 * User instruction miss w/ multiple TSBs (sun4v).
2558	 * The first probe covers 8K, 64K, and 512K page sizes,
2559	 * because 64K and 512K mappings are replicated off 8K
2560	 * pointer.  Second probe covers 4M page size only.
2561	 *
2562	 * Just like sfmmu_udtlb_slowpath, except:
2563	 *   o Uses ASI_ITLB_IN
2564	 *   o checks for execute permission
2565	 *   o No ISM prediction.
2566	 *
2567	 * g1 = tsb8k pointer register
2568	 * g2 = tag access register
2569	 * g3 - g6 = scratch registers
2570	 * g7 = TSB tag to match
2571	 */
2572	.align	64
2573	ALTENTRY(sfmmu_uitlb_slowpath)
2574
2575	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2576	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2577	/* g4 - g5 = clobbered here */
2578
2579	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2580	/* g1 = first TSB pointer, g3 = second TSB pointer */
2581	srlx	%g2, TAG_VALO_SHIFT, %g7
2582	PROBE_2ND_ITSB(%g3, %g7)
2583	/* NOT REACHED */
2584
2585#else /* sun4v */
2586
2587	/*
2588	 * User instruction miss w/ multiple TSBs (sun4u).
2589	 * The first probe covers 8K, 64K, and 512K page sizes,
2590	 * because 64K and 512K mappings are replicated off 8K
2591	 * pointer.  Probe of 1st TSB has already been done prior to entry
2592	 * into this routine. For the UTSB_PHYS case we probe up to 3
2593	 * valid other TSBs in the following order:
2594	 * 1) shared TSB for 4M-256M pages
2595	 * 2) private TSB for 4M-256M pages
2596	 * 3) shared TSB for 8K-512K pages
2597	 *
2598	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
2599	 * 4M-256M pages.
2600	 *
2601	 * Just like sfmmu_udtlb_slowpath, except:
2602	 *   o Uses ASI_ITLB_IN
2603	 *   o checks for execute permission
2604	 *   o No ISM prediction.
2605	 *
2606	 * g1 = tsb8k pointer register
2607	 * g2 = tag access register
2608	 * g4 - g6 = scratch registers
2609	 * g7 = TSB tag to match
2610	 */
2611	.align	64
2612	ALTENTRY(sfmmu_uitlb_slowpath)
2613
2614#ifdef UTSB_PHYS
2615
2616       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2617        brlz,pt %g6, 1f
2618          nop
2619        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2620        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
26211:
2622        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2623        brlz,pt %g3, 2f
2624          nop
2625        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2626        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
26272:
2628        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2629        brlz,pt %g6, sfmmu_tsb_miss_tt
2630          nop
2631        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2632        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
2633        ba,pn   %xcc, sfmmu_tsb_miss_tt
2634          nop
2635
2636#else /* UTSB_PHYS */
2637	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2638	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2639	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2640	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2641	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2642	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2643       /* g1 = first TSB pointer, g3 = second TSB pointer */
2644        srlx    %g2, TAG_VALO_SHIFT, %g7
2645        PROBE_2ND_ITSB(%g3, %g7, isynth)
2646	ba,pn	%xcc, sfmmu_tsb_miss_tt
2647	  nop
2648
2649#endif /* UTSB_PHYS */
2650#endif /* sun4v */
2651
2652#if defined(sun4u) && defined(UTSB_PHYS)
2653
2654        /*
2655	 * We come here for ism predict DTLB_MISS case or if
2656	 * if probe in first TSB failed.
2657         */
2658
2659        .align 64
2660        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
2661
2662	/*
2663         * g1 = tsb8k pointer register
2664         * g2 = tag access register
2665         * g4 - %g6 = scratch registers
2666         * g7 = TSB tag to match
2667	 */
2668
2669	/*
2670	 * ISM non-predict probe order
2671         * probe 1ST_TSB (8K index)
2672         * probe 2ND_TSB (4M index)
2673         * probe 4TH_TSB (4M index)
2674         * probe 3RD_TSB (8K index)
2675	 *
2676	 * We already probed first TSB in DTLB_MISS handler.
2677	 */
2678
2679        /*
2680         * Private 2ND TSB 4M-256 pages
2681         */
2682	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2683	brlz,pt %g3, 1f
2684	  nop
2685        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2686        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2687
2688	/*
2689	 * Shared Context 4TH TSB 4M-256 pages
2690	 */
26911:
2692	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2693	brlz,pt %g6, 2f
2694	  nop
2695        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2696        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
2697
2698        /*
2699         * Shared Context 3RD TSB 8K-512K pages
2700         */
27012:
2702	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2703	brlz,pt %g6, sfmmu_tsb_miss_tt
2704	  nop
2705        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2706        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
2707	ba,pn	%xcc, sfmmu_tsb_miss_tt
2708	  nop
2709
2710	.align 64
2711        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
2712
2713	/*
2714         * g1 = tsb8k pointer register
2715         * g2 = tag access register
2716         * g4 - g6 = scratch registers
2717         * g7 = TSB tag to match
2718	 */
2719
2720	/*
2721	 * ISM predict probe order
2722	 * probe 4TH_TSB (4M index)
2723	 * probe 2ND_TSB (4M index)
2724	 * probe 1ST_TSB (8K index)
2725	 * probe 3RD_TSB (8K index)
2726
2727	/*
2728	 * Shared Context 4TH TSB 4M-256 pages
2729	 */
2730	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2731	brlz,pt %g6, 4f
2732	  nop
2733        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2734        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
2735
2736        /*
2737         * Private 2ND TSB 4M-256 pages
2738         */
27394:
2740	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2741	brlz,pt %g3, 5f
2742	  nop
2743        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2744        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
2745
27465:
2747        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
2748
2749        /*
2750         * Shared Context 3RD TSB 8K-512K pages
2751         */
2752	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2753	brlz,pt %g6, 6f
2754	  nop
2755        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2756        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
27576:
2758	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
2759	  nop
2760
2761#else /* sun4u && UTSB_PHYS */
2762
2763       .align 64
2764        ALTENTRY(sfmmu_udtlb_slowpath)
2765
2766	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2767	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2768	  mov	%g1, %g3
2769
2770udtlb_miss_probefirst:
2771	/*
2772	 * g1 = 8K TSB pointer register
2773	 * g2 = tag access register
2774	 * g3 = (potentially) second TSB entry ptr
2775	 * g6 = ism pred.
2776	 * g7 = vpg_4m
2777	 */
2778#ifdef sun4v
2779	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2780	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2781
2782	/*
2783	 * Here:
2784	 *   g1 = first TSB pointer
2785	 *   g2 = tag access reg
2786	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2787	 */
2788	brgz,pn	%g6, sfmmu_tsb_miss_tt
2789	  nop
2790#else /* sun4v */
2791	mov	%g1, %g4
2792	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2793	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2794
2795	/*
2796	 * Here:
2797	 *   g1 = first TSB pointer
2798	 *   g2 = tag access reg
2799	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2800	 */
2801	brgz,pn	%g6, sfmmu_tsb_miss_tt
2802	  nop
2803	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2804	/* fall through in 8K->4M probe order */
2805#endif /* sun4v */
2806
2807udtlb_miss_probesecond:
2808	/*
2809	 * Look in the second TSB for the TTE
2810	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2811	 * g2 = tag access reg
2812	 * g3 = 8K TSB pointer register
2813	 * g6 = ism pred.
2814	 * g7 = vpg_4m
2815	 */
2816#ifdef sun4v
2817	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2818	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2819	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2820#else /* sun4v */
2821	mov	%g3, %g7
2822	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2823	/* %g2 clobbered, %g3 =second tsbe ptr */
2824	mov	MMU_TAG_ACCESS, %g2
2825	ldxa	[%g2]ASI_DMMU, %g2
2826#endif /* sun4v */
2827
2828	srlx	%g2, TAG_VALO_SHIFT, %g7
2829	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2830	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
2831	brgz,pn	%g6, udtlb_miss_probefirst
2832	  nop
2833
2834	/* fall through to sfmmu_tsb_miss_tt */
2835#endif /* sun4u && UTSB_PHYS */
2836
2837
2838	ALTENTRY(sfmmu_tsb_miss_tt)
2839	TT_TRACE(trace_tsbmiss)
2840	/*
2841	 * We get here if there is a TSB miss OR a write protect trap.
2842	 *
2843	 * g1 = First TSB entry pointer
2844	 * g2 = tag access register
2845	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
2846	 * g4 - g7 = scratch registers
2847	 */
2848
2849	ALTENTRY(sfmmu_tsb_miss)
2850
2851	/*
2852	 * If trapstat is running, we need to shift the %tpc and %tnpc to
2853	 * point to trapstat's TSB miss return code (note that trapstat
2854	 * itself will patch the correct offset to add).
2855	 */
2856	rdpr	%tl, %g7
2857	cmp	%g7, 1
2858	ble,pt	%xcc, 0f
2859	  sethi	%hi(KERNELBASE), %g6
2860	rdpr	%tpc, %g7
2861	or	%g6, %lo(KERNELBASE), %g6
2862	cmp	%g7, %g6
2863	bgeu,pt	%xcc, 0f
2864	/* delay slot safe */
2865
2866	ALTENTRY(tsbmiss_trapstat_patch_point)
2867	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
2868	wrpr	%g7, %tpc
2869	add	%g7, 4, %g7
2870	wrpr	%g7, %tnpc
28710:
2872	CPU_TSBMISS_AREA(%g6, %g7)
2873	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
2874	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
2875
2876	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
2877	brz,a,pn %g3, 1f			/* skip ahead if kernel */
2878	  ldn	[%g6 + TSBMISS_KHATID], %g7
2879	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
2880	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
2881
2882	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
2883
2884	cmp	%g3, INVALID_CONTEXT
2885	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
2886	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
2887
2888#if defined(sun4v) || defined(UTSB_PHYS)
2889        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
2890        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
2891        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
2892#endif /* sun4v || UTSB_PHYS */
2893
2894	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
2895	/*
2896	 * The miss wasn't in an ISM segment.
2897	 *
2898	 * %g1 %g3, %g4, %g5, %g7 all clobbered
2899	 * %g2 = (pseudo) tag access
2900	 */
2901
2902	ba,pt	%icc, 2f
2903	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
2904
29051:
2906	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
2907	/*
2908	 * 8K and 64K hash.
2909	 */
29102:
2911
2912	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
2913		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
2914		sfmmu_suspend_tl, tsb_512K)
2915	/* NOT REACHED */
2916
2917tsb_512K:
2918	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
2919	brz,pn	%g5, 3f
2920	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
2921	and	%g4, HAT_512K_FLAG, %g5
2922
2923	/*
2924	 * Note that there is a small window here where we may have
2925	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
2926	 * flag yet, so we will skip searching the 512k hash list.
2927	 * In this case we will end up in pagefault which will find
2928	 * the mapping and return.  So, in this instance we will end up
2929	 * spending a bit more time resolving this TSB miss, but it can
2930	 * only happen once per process and even then, the chances of that
2931	 * are very small, so it's not worth the extra overhead it would
2932	 * take to close this window.
2933	 */
2934	brz,pn	%g5, tsb_4M
2935	  nop
29363:
2937	/*
2938	 * 512K hash
2939	 */
2940
2941	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
2942		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
2943		sfmmu_suspend_tl, tsb_4M)
2944	/* NOT REACHED */
2945
2946tsb_4M:
2947	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
2948	brz,pn	%g5, 4f
2949	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
2950	and	%g4, HAT_4M_FLAG, %g5
2951	brz,pn	%g5, tsb_32M
2952	  nop
29534:
2954	/*
2955	 * 4M hash
2956	 */
2957
2958	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
2959		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
2960		sfmmu_suspend_tl, tsb_32M)
2961	/* NOT REACHED */
2962
2963tsb_32M:
2964	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
2965#ifdef	sun4v
2966        brz,pn	%g5, 6f
2967#else
2968	brz,pn  %g5, tsb_pagefault
2969#endif
2970	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
2971	and	%g4, HAT_32M_FLAG, %g5
2972	brz,pn	%g5, tsb_256M
2973	  nop
29745:
2975	/*
2976	 * 32M hash
2977	 */
2978
2979	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
2980		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
2981		sfmmu_suspend_tl, tsb_256M)
2982	/* NOT REACHED */
2983
2984#if defined(sun4u) && !defined(UTSB_PHYS)
2985#define tsb_shme        tsb_pagefault
2986#endif
2987tsb_256M:
2988	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
2989	and	%g4, HAT_256M_FLAG, %g5
2990	brz,pn	%g5, tsb_shme
2991	  nop
29926:
2993	/*
2994	 * 256M hash
2995	 */
2996
2997	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
2998	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
2999	    sfmmu_suspend_tl, tsb_shme)
3000	/* NOT REACHED */
3001
3002tsb_checktte:
3003	/*
3004	 * g1 = hblk_misc
3005	 * g2 = tagacc
3006	 * g3 = tte
3007	 * g4 = tte pa
3008	 * g6 = tsbmiss area
3009	 * g7 = hatid
3010	 */
3011	brlz,a,pt %g3, tsb_validtte
3012	  rdpr	%tt, %g7
3013
3014#if defined(sun4u) && !defined(UTSB_PHYS)
3015#undef tsb_shme
3016	ba      tsb_pagefault
3017	  nop
3018#else /* sun4u && !UTSB_PHYS */
3019
3020tsb_shme:
3021	/*
3022	 * g2 = tagacc
3023	 * g6 = tsbmiss area
3024	 */
3025	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3026	brz,pn	%g5, tsb_pagefault
3027	  nop
3028	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3029	brz,pn	%g7, tsb_pagefault
3030	  nop
3031
3032	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3033		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
3034		sfmmu_suspend_tl, tsb_shme_512K)
3035	/* NOT REACHED */
3036
3037tsb_shme_512K:
3038	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3039	and	%g4, HAT_512K_FLAG, %g5
3040	brz,pn	%g5, tsb_shme_4M
3041	  nop
3042
3043	/*
3044	 * 512K hash
3045	 */
3046
3047	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3048		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
3049		sfmmu_suspend_tl, tsb_shme_4M)
3050	/* NOT REACHED */
3051
3052tsb_shme_4M:
3053	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3054	and	%g4, HAT_4M_FLAG, %g5
3055	brz,pn	%g5, tsb_shme_32M
3056	  nop
30574:
3058	/*
3059	 * 4M hash
3060	 */
3061	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3062		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
3063		sfmmu_suspend_tl, tsb_shme_32M)
3064	/* NOT REACHED */
3065
3066tsb_shme_32M:
3067	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3068	and	%g4, HAT_32M_FLAG, %g5
3069	brz,pn	%g5, tsb_shme_256M
3070	  nop
3071
3072	/*
3073	 * 32M hash
3074	 */
3075
3076	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3077		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
3078		sfmmu_suspend_tl, tsb_shme_256M)
3079	/* NOT REACHED */
3080
3081tsb_shme_256M:
3082	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3083	and	%g4, HAT_256M_FLAG, %g5
3084	brz,pn	%g5, tsb_pagefault
3085	  nop
3086
3087	/*
3088	 * 256M hash
3089	 */
3090
3091	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3092	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
3093	    sfmmu_suspend_tl, tsb_pagefault)
3094	/* NOT REACHED */
3095
3096tsb_shme_checktte:
3097
3098	brgez,pn %g3, tsb_pagefault
3099	  rdpr	%tt, %g7
3100	/*
3101	 * g1 = ctx1 flag
3102	 * g3 = tte
3103	 * g4 = tte pa
3104	 * g6 = tsbmiss area
3105	 * g7 = tt
3106	 */
3107
3108	brz,pt  %g1, tsb_validtte
3109	  nop
3110	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3111	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3112	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3113
3114	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3115#endif /* sun4u && !UTSB_PHYS */
3116
3117tsb_validtte:
3118	/*
3119	 * g3 = tte
3120	 * g4 = tte pa
3121	 * g6 = tsbmiss area
3122	 * g7 = tt
3123	 */
3124
3125	/*
3126	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3127	 */
3128	cmp	%g7, FAST_PROT_TT
3129	bne,pt	%icc, 4f
3130	  nop
3131
3132	TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
3133	    tsb_protfault)
3134
3135	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3136#ifdef sun4v
3137	MMU_FAULT_STATUS_AREA(%g7)
3138	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
3139#else /* sun4v */
3140	mov     MMU_TAG_ACCESS, %g5
3141	ldxa    [%g5]ASI_DMMU, %g5
3142#endif /* sun4v */
3143	ba,pt	%xcc, tsb_update_tl1
3144	  nop
31454:
3146	/*
3147	 * If ITLB miss check exec bit.
3148	 * If not set treat as invalid TTE.
3149	 */
3150	cmp     %g7, T_INSTR_MMU_MISS
3151	be,pn	%icc, 5f
3152	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3153	cmp     %g7, FAST_IMMU_MISS_TT
3154	bne,pt %icc, 3f
3155	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
31565:
3157	bz,pn %icc, tsb_protfault
3158	  nop
3159
31603:
3161	/*
3162	 * Set reference bit if not already set
3163	 */
3164	TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
3165
3166	/*
3167	 * Now, load into TSB/TLB.  At this point:
3168	 * g3 = tte
3169	 * g4 = patte
3170	 * g6 = tsbmiss area
3171	 */
3172	rdpr	%tt, %g7
3173#ifdef sun4v
3174	MMU_FAULT_STATUS_AREA(%g2)
3175	cmp	%g7, T_INSTR_MMU_MISS
3176	be,a,pt	%icc, 9f
3177	  nop
3178	cmp	%g7, FAST_IMMU_MISS_TT
3179	be,a,pt	%icc, 9f
3180	  nop
3181	add	%g2, MMFSA_D_, %g2
31829:
3183	ldx	[%g2 + MMFSA_CTX_], %g7
3184	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3185	ldx	[%g2 + MMFSA_ADDR_], %g2
3186	mov	%g2, %g5		! load the fault addr for later use
3187	srlx	%g2, TTARGET_VA_SHIFT, %g2
3188	or	%g2, %g7, %g2
3189#else /* sun4v */
3190	mov     MMU_TAG_ACCESS, %g5
3191	cmp     %g7, FAST_IMMU_MISS_TT
3192	be,a,pt %icc, 9f
3193	   ldxa  [%g0]ASI_IMMU, %g2
3194	ldxa    [%g0]ASI_DMMU, %g2
3195	ba,pt   %icc, tsb_update_tl1
3196	   ldxa  [%g5]ASI_DMMU, %g5
31979:
3198	ldxa    [%g5]ASI_IMMU, %g5
3199#endif /* sun4v */
3200
3201tsb_update_tl1:
3202	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3203	brz,pn	%g7, tsb_kernel
3204#ifdef sun4v
3205	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3206#else  /* sun4v */
3207	  srlx	%g3, TTE_SZ_SHFT, %g7
3208#endif /* sun4v */
3209
3210tsb_user:
3211#ifdef sun4v
3212	cmp	%g7, TTE4M
3213	bge,pn	%icc, tsb_user4m
3214	  nop
3215#else /* sun4v */
3216	cmp	%g7, TTESZ_VALID | TTE4M
3217	be,pn	%icc, tsb_user4m
3218	  srlx	%g3, TTE_SZ2_SHFT, %g7
3219	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3220#ifdef ITLB_32M_256M_SUPPORT
3221	bnz,pn	%icc, tsb_user4m
3222	  nop
3223#else /* ITLB_32M_256M_SUPPORT */
3224	bnz,a,pn %icc, tsb_user_pn_synth
3225	 nop
3226#endif /* ITLB_32M_256M_SUPPORT */
3227#endif /* sun4v */
3228
3229tsb_user8k:
3230#if defined(sun4v) || defined(UTSB_PHYS)
3231	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3232	and	%g7, HAT_CHKCTX1_FLAG, %g1
3233	brz,a,pn %g1, 1f
3234	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
3235	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3236	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
3237	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3238        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
32391:
3240#else /* defined(sun4v) || defined(UTSB_PHYS) */
3241	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
3242#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3243
3244#ifndef UTSB_PHYS
3245	mov	ASI_N, %g7	! user TSBs accessed by VA
3246	mov	%g7, %asi
3247#endif /* !UTSB_PHYS */
3248
3249	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
3250
3251	rdpr    %tt, %g5
3252#ifdef sun4v
3253	cmp	%g5, T_INSTR_MMU_MISS
3254	be,a,pn	%xcc, 9f
3255	  mov	%g3, %g5
3256#endif /* sun4v */
3257	cmp	%g5, FAST_IMMU_MISS_TT
3258	be,pn	%xcc, 9f
3259	  mov	%g3, %g5
3260
3261	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3262	! trapstat wants TTE in %g5
3263	retry
32649:
3265	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3266	! trapstat wants TTE in %g5
3267	retry
3268
3269tsb_user4m:
3270#if defined(sun4v) || defined(UTSB_PHYS)
3271	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3272	and	%g7, HAT_CHKCTX1_FLAG, %g1
3273	brz,a,pn %g1, 4f
3274	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
3275	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3276	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
3277	  nop
3278        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3279
3280#else /* defined(sun4v) || defined(UTSB_PHYS) */
3281	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
3282#endif /* defined(sun4v) || defined(UTSB_PHYS) */
32834:
3284	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3285	  nop
3286
3287#ifndef UTSB_PHYS
3288	mov	ASI_N, %g7	! user TSBs accessed by VA
3289	mov	%g7, %asi
3290#endif /* UTSB_PHYS */
3291
3292        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
3293
32945:
3295	rdpr    %tt, %g5
3296#ifdef sun4v
3297        cmp     %g5, T_INSTR_MMU_MISS
3298        be,a,pn %xcc, 9f
3299          mov   %g3, %g5
3300#endif /* sun4v */
3301        cmp     %g5, FAST_IMMU_MISS_TT
3302        be,pn   %xcc, 9f
3303        mov     %g3, %g5
3304
3305        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3306        ! trapstat wants TTE in %g5
3307        retry
33089:
3309        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3310        ! trapstat wants TTE in %g5
3311        retry
3312
3313#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3314	/*
3315	 * Panther ITLB synthesis.
3316	 * The Panther 32M and 256M ITLB code simulates these two large page
3317	 * sizes with 4M pages, to provide support for programs, for example
3318	 * Java, that may copy instructions into a 32M or 256M data page and
3319	 * then execute them. The code below generates the 4M pfn bits and
3320	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3321	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3322	 * are ignored by the hardware.
3323	 *
3324	 * Now, load into TSB/TLB.  At this point:
3325	 * g2 = tagtarget
3326	 * g3 = tte
3327	 * g4 = patte
3328	 * g5 = tt
3329	 * g6 = tsbmiss area
3330	 */
3331tsb_user_pn_synth:
3332	rdpr %tt, %g5
3333	cmp    %g5, FAST_IMMU_MISS_TT
3334	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3335	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3336	bz,pn %icc, 4b				/* if not, been here before */
3337	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3338	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3339	  mov	%g3, %g5
3340
3341	mov	MMU_TAG_ACCESS, %g7
3342	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3343	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3344
3345	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3346	mov	%g7, %asi
3347	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
33485:
3349        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3350        retry
3351
3352tsb_user_itlb_synth:
3353	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3354
3355	mov	MMU_TAG_ACCESS, %g7
3356	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3357	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3358	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3359	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3360
3361	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3362	mov	%g7, %asi
3363	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
33647:
3365	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3366        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3367        retry
3368#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3369
3370tsb_kernel:
3371	rdpr	%tt, %g5
3372#ifdef sun4v
3373	cmp	%g7, TTE4M
3374	bge,pn	%icc, 5f
3375#else
3376	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3377	be,pn	%icc, 5f
3378#endif /* sun4v */
3379	  nop
3380	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
3381	ba,pt	%xcc, 6f
3382	  nop
33835:
3384	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
3385	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
3386	  nop
33876:
3388#ifndef sun4v
3389tsb_kernel_patch_asi:
3390	or	%g0, RUNTIME_PATCH, %g6
3391	mov	%g6, %asi	! XXX avoid writing to %asi !!
3392#endif
3393	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
33943:
3395#ifdef sun4v
3396	cmp	%g5, T_INSTR_MMU_MISS
3397	be,a,pn	%icc, 1f
3398	  mov	%g3, %g5			! trapstat wants TTE in %g5
3399#endif /* sun4v */
3400	cmp	%g5, FAST_IMMU_MISS_TT
3401	be,pn	%icc, 1f
3402	  mov	%g3, %g5			! trapstat wants TTE in %g5
3403	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3404	! trapstat wants TTE in %g5
3405	retry
34061:
3407	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3408	! trapstat wants TTE in %g5
3409	retry
3410
3411tsb_ism:
3412	/*
3413	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3414	 * page size down to smallest.
3415	 *
3416	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3417	 *	register
3418	 * g3 = ismmap->ism_seg
3419	 * g4 = physical address of ismmap->ism_sfmmu
3420	 * g6 = tsbmiss area
3421	 */
3422	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3423	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3424	  mov	PTL1_BAD_ISM, %g1
3425						/* g5 = pa of imap_vb_shift */
3426	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3427	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3428	srlx	%g3, %g4, %g3			/* clr size field */
3429	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3430	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3431	and     %g2, %g1, %g4                   /* g4 = ctx number */
3432	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3433	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3434	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3435	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3436	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3437#if defined(sun4v) || defined(UTSB_PHYS)
3438	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3439	brz,pt %g5, tsb_chk4M_ism
3440	  nop
3441	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3442	or      %g5, HAT_CHKCTX1_FLAG, %g5
3443	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3444	rdpr    %tt, %g5
3445	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3446#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3447
3448	/*
3449	 * ISM pages are always locked down.
3450	 * If we can't find the tte then pagefault
3451	 * and let the spt segment driver resolve it.
3452	 *
3453	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3454	 * g4 = imap_hatflags
3455	 * g6 = tsb miss area
3456	 * g7 = ISM hatid
3457	 */
3458
3459tsb_chk4M_ism:
3460	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3461	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3462	  nop
3463
3464tsb_ism_32M:
3465	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3466	brz,pn	%g5, tsb_ism_256M
3467	  nop
3468
3469	/*
3470	 * 32M hash.
3471	 */
3472
3473	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
3474	    TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3475	    tsb_ism_4M)
3476	/* NOT REACHED */
3477
3478tsb_ism_32M_found:
3479	brlz,a,pt %g3, tsb_validtte
3480	  rdpr	%tt, %g7
3481	ba,pt	%xcc, tsb_ism_4M
3482	  nop
3483
3484tsb_ism_256M:
3485	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3486	brz,a,pn %g5, ptl1_panic
3487	  mov	PTL1_BAD_ISM, %g1
3488
3489	/*
3490	 * 256M hash.
3491	 */
3492	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
3493	    TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3494	    tsb_ism_4M)
3495
3496tsb_ism_256M_found:
3497	brlz,a,pt %g3, tsb_validtte
3498	  rdpr	%tt, %g7
3499
3500tsb_ism_4M:
3501	/*
3502	 * 4M hash.
3503	 */
3504	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
3505	    TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3506	    tsb_ism_8K)
3507	/* NOT REACHED */
3508
3509tsb_ism_4M_found:
3510	brlz,a,pt %g3, tsb_validtte
3511	  rdpr	%tt, %g7
3512
3513tsb_ism_8K:
3514	/*
3515	 * 8K and 64K hash.
3516	 */
3517
3518	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
3519	    TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3520	    tsb_pagefault)
3521	/* NOT REACHED */
3522
3523tsb_ism_8K_found:
3524	brlz,a,pt %g3, tsb_validtte
3525	  rdpr	%tt, %g7
3526
3527tsb_pagefault:
3528	rdpr	%tt, %g7
3529	cmp	%g7, FAST_PROT_TT
3530	be,a,pn	%icc, tsb_protfault
3531	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3532
3533tsb_protfault:
3534	/*
3535	 * we get here if we couldn't find a valid tte in the hash.
3536	 *
3537	 * If user and we are at tl>1 we go to window handling code.
3538	 *
3539	 * If kernel and the fault is on the same page as our stack
3540	 * pointer, then we know the stack is bad and the trap handler
3541	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3542	 *
3543	 * If this is a kernel trap and tl>1, panic.
3544	 *
3545	 * Otherwise we call pagefault.
3546	 */
3547	cmp	%g7, FAST_IMMU_MISS_TT
3548#ifdef sun4v
3549	MMU_FAULT_STATUS_AREA(%g4)
3550	ldx	[%g4 + MMFSA_I_CTX], %g5
3551	ldx	[%g4 + MMFSA_D_CTX], %g4
3552	move	%icc, %g5, %g4
3553	cmp	%g7, T_INSTR_MMU_MISS
3554	move	%icc, %g5, %g4
3555#else
3556	mov	MMU_TAG_ACCESS, %g4
3557	ldxa	[%g4]ASI_DMMU, %g2
3558	ldxa	[%g4]ASI_IMMU, %g5
3559	move	%icc, %g5, %g2
3560	cmp	%g7, T_INSTR_MMU_MISS
3561	move	%icc, %g5, %g2
3562	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3563#endif /* sun4v */
3564	brnz,pn	%g4, 3f				/* skip if not kernel */
3565	  rdpr	%tl, %g5
3566
3567	add	%sp, STACK_BIAS, %g3
3568	srlx	%g3, MMU_PAGESHIFT, %g3
3569	srlx	%g2, MMU_PAGESHIFT, %g4
3570	cmp	%g3, %g4
3571	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3572	  mov	PTL1_BAD_STACK, %g1
3573
3574	cmp	%g5, 1
3575	ble,pt	%icc, 2f
3576	  nop
3577	TSTAT_CHECK_TL1(2f, %g1, %g2)
3578	rdpr	%tt, %g2
3579	cmp	%g2, FAST_PROT_TT
3580	mov	PTL1_BAD_KPROT_FAULT, %g1
3581	movne	%icc, PTL1_BAD_KMISS, %g1
3582	ba,pt	%icc, ptl1_panic
3583	  nop
3584
35852:
3586	/*
3587	 * We are taking a pagefault in the kernel on a kernel address.  If
3588	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3589	 * want to call sfmmu_pagefault -- we will instead note that a fault
3590	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3591	 * (instead of a "retry").  This will step over the faulting
3592	 * instruction.
3593	 */
3594	CPU_INDEX(%g1, %g2)
3595	set	cpu_core, %g2
3596	sllx	%g1, CPU_CORE_SHIFT, %g1
3597	add	%g1, %g2, %g1
3598	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3599	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3600	bz	sfmmu_pagefault
3601	or	%g2, CPU_DTRACE_BADADDR, %g2
3602	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3603	GET_MMU_D_ADDR(%g3, %g4)
3604	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3605	done
3606
36073:
3608	cmp	%g5, 1
3609	ble,pt	%icc, 4f
3610	  nop
3611	TSTAT_CHECK_TL1(4f, %g1, %g2)
3612	ba,pt	%icc, sfmmu_window_trap
3613	  nop
3614
36154:
3616	/*
3617	 * We are taking a pagefault on a non-kernel address.  If we are in
3618	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3619	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3620	 */
3621	CPU_INDEX(%g1, %g2)
3622	set	cpu_core, %g2
3623	sllx	%g1, CPU_CORE_SHIFT, %g1
3624	add	%g1, %g2, %g1
3625	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3626	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3627	bz	sfmmu_mmu_trap
3628	or	%g2, CPU_DTRACE_BADADDR, %g2
3629	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3630	GET_MMU_D_ADDR(%g3, %g4)
3631	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3632
3633	/*
3634	 * Be sure that we're actually taking this miss from the kernel --
3635	 * otherwise we have managed to return to user-level with
3636	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3637	 */
3638	rdpr	%tstate, %g2
3639	btst	TSTATE_PRIV, %g2
3640	bz,a	ptl1_panic
3641	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3642	done
3643
3644	ALTENTRY(tsb_tl0_noctxt)
3645	/*
3646	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3647	 * if it is, indicated that we have faulted and issue a done.
3648	 */
3649	CPU_INDEX(%g5, %g6)
3650	set	cpu_core, %g6
3651	sllx	%g5, CPU_CORE_SHIFT, %g5
3652	add	%g5, %g6, %g5
3653	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3654	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3655	bz	1f
3656	or	%g6, CPU_DTRACE_BADADDR, %g6
3657	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3658	GET_MMU_D_ADDR(%g3, %g4)
3659	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3660
3661	/*
3662	 * Be sure that we're actually taking this miss from the kernel --
3663	 * otherwise we have managed to return to user-level with
3664	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3665	 */
3666	rdpr	%tstate, %g5
3667	btst	TSTATE_PRIV, %g5
3668	bz,a	ptl1_panic
3669	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3670	TSTAT_CHECK_TL1(2f, %g1, %g2);
36712:
3672	done
3673
36741:
3675	rdpr	%tt, %g5
3676	cmp	%g5, FAST_IMMU_MISS_TT
3677#ifdef sun4v
3678	MMU_FAULT_STATUS_AREA(%g2)
3679	be,a,pt	%icc, 2f
3680	  ldx	[%g2 + MMFSA_I_CTX], %g3
3681	cmp	%g5, T_INSTR_MMU_MISS
3682	be,a,pt	%icc, 2f
3683	  ldx	[%g2 + MMFSA_I_CTX], %g3
3684	ldx	[%g2 + MMFSA_D_CTX], %g3
36852:
3686#else
3687	mov	MMU_TAG_ACCESS, %g2
3688	be,a,pt	%icc, 2f
3689	  ldxa	[%g2]ASI_IMMU, %g3
3690	ldxa	[%g2]ASI_DMMU, %g3
36912:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3692#endif /* sun4v */
3693	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3694	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3695	rdpr	%tl, %g5
3696	cmp	%g5, 1
3697	ble,pt	%icc, sfmmu_mmu_trap
3698	  nop
3699	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3700	ba,pt	%icc, sfmmu_window_trap
3701	  nop
3702	SET_SIZE(sfmmu_tsb_miss)
3703
3704	/*
3705	 * This routine will look for a user or kernel vaddr in the hash
3706	 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't grab
3707	 * any locks.  It should only be used by other sfmmu routines.
3708	 */
3709	ENTRY_NP(sfmmu_vatopfn)
3710 	/*
3711 	 * disable interrupts
3712 	 */
3713 	rdpr	%pstate, %o3
3714#ifdef DEBUG
3715	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3716#endif
3717	/*
3718	 * disable interrupts to protect the TSBMISS area
3719	 */
3720	andn    %o3, PSTATE_IE, %o5
3721	wrpr    %o5, 0, %pstate
3722
3723	/*
3724	 * o0 = vaddr
3725	 * o1 = sfmmup
3726	 * o2 = ttep
3727	 */
3728	CPU_TSBMISS_AREA(%g1, %o5)
3729	ldn	[%g1 + TSBMISS_KHATID], %o4
3730	cmp	%o4, %o1
3731	bne,pn	%ncc, vatopfn_nokernel
3732	  mov	TTE64K, %g5			/* g5 = rehash # */
3733	mov %g1,%o5				/* o5 = tsbmiss_area */
3734	/*
3735	 * o0 = vaddr
3736	 * o1 & o4 = hatid
3737	 * o2 = ttep
3738	 * o5 = tsbmiss area
3739	 */
3740	mov	HBLK_RANGE_SHIFT, %g6
37411:
3742
3743	/*
3744	 * o0 = vaddr
3745	 * o1 = sfmmup
3746	 * o2 = ttep
3747	 * o3 = old %pstate
3748	 * o4 = hatid
3749	 * o5 = tsbmiss
3750	 * g5 = rehash #
3751	 * g6 = hmeshift
3752	 *
3753	 * The first arg to GET_TTE is actually tagaccess register
3754	 * not just vaddr. Since this call is for kernel we need to clear
3755	 * any lower vaddr bits that would be interpreted as ctx bits.
3756	 */
3757	set     TAGACC_CTX_MASK, %g1
3758	andn    %o0, %g1, %o0
3759	GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
3760		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3761
3762kvtop_hblk_found:
3763	/*
3764	 * o0 = vaddr
3765	 * o1 = sfmmup
3766	 * o2 = ttep
3767	 * g1 = tte
3768	 * g2 = tte pa
3769	 * g3 = scratch
3770	 * o2 = tsbmiss area
3771	 * o1 = hat id
3772	 */
3773	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3774	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3775	stx %g1,[%o2]				/* put tte into *ttep */
3776	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3777	/*
3778	 * o0 = vaddr
3779	 * o1 = sfmmup
3780	 * o2 = ttep
3781	 * g1 = pfn
3782	 */
3783	ba,pt	%xcc, 6f
3784	  mov	%g1, %o0
3785
3786kvtop_nohblk:
3787	/*
3788	 * we get here if we couldn't find valid hblk in hash.  We rehash
3789	 * if neccesary.
3790	 */
3791	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3792#ifdef sun4v
3793	cmp	%g5, MAX_HASHCNT
3794#else
3795	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3796#endif /* sun4v */
3797	be,a,pn	%icc, 6f
3798	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3799	mov	%o1, %o4			/* restore hatid */
3800#ifdef sun4v
3801        add	%g5, 2, %g5
3802	cmp	%g5, 3
3803	move	%icc, MMU_PAGESHIFT4M, %g6
3804	ba,pt	%icc, 1b
3805	movne	%icc, MMU_PAGESHIFT256M, %g6
3806#else
3807        inc	%g5
3808	cmp	%g5, 2
3809	move	%icc, MMU_PAGESHIFT512K, %g6
3810	ba,pt	%icc, 1b
3811	movne	%icc, MMU_PAGESHIFT4M, %g6
3812#endif /* sun4v */
38136:
3814	retl
3815 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3816
3817tsb_suspend:
3818	/*
3819	 * o0 = vaddr
3820	 * o1 = sfmmup
3821	 * o2 = ttep
3822	 * g1 = tte
3823	 * g2 = tte pa
3824	 * g3 = tte va
3825	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
3826	 */
3827	stx %g1,[%o2]				/* put tte into *ttep */
3828	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
3829	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
3830	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
38318:
3832	retl
3833	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
3834
3835vatopfn_nokernel:
3836	/*
3837	 * This routine does NOT support user addresses
3838	 * There is a routine in C that supports this.
3839	 * The only reason why we don't have the C routine
3840	 * support kernel addresses as well is because
3841	 * we do va_to_pa while holding the hashlock.
3842	 */
3843 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3844	save	%sp, -SA(MINFRAME), %sp
3845	sethi	%hi(sfmmu_panic3), %o0
3846	call	panic
3847	 or	%o0, %lo(sfmmu_panic3), %o0
3848
3849	SET_SIZE(sfmmu_vatopfn)
3850
3851	/*
3852	 * %o0 = vaddr
3853	 * %o1 = hashno (aka szc)
3854	 *
3855	 *
3856	 * This routine is similar to sfmmu_vatopfn() but will only look for
3857	 * a kernel vaddr in the hash structure for the specified rehash value.
3858	 * It's just an optimization for the case when pagesize for a given
3859	 * va range is already known (e.g. large page heap) and we don't want
3860	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
3861	 *
3862	 * Returns valid pfn or PFN_INVALID if
3863	 * tte for specified rehash # is not found, invalid or suspended.
3864	 */
3865	ENTRY_NP(sfmmu_kvaszc2pfn)
3866 	/*
3867 	 * disable interrupts
3868 	 */
3869 	rdpr	%pstate, %o3
3870#ifdef DEBUG
3871	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
3872#endif
3873	/*
3874	 * disable interrupts to protect the TSBMISS area
3875	 */
3876	andn    %o3, PSTATE_IE, %o5
3877	wrpr    %o5, 0, %pstate
3878
3879	CPU_TSBMISS_AREA(%g1, %o5)
3880	ldn	[%g1 + TSBMISS_KHATID], %o4
3881	sll	%o1, 1, %g6
3882	add	%g6, %o1, %g6
3883	add	%g6, MMU_PAGESHIFT, %g6
3884	/*
3885	 * %o0 = vaddr
3886	 * %o1 = hashno
3887	 * %o3 = old %pstate
3888	 * %o4 = ksfmmup
3889	 * %g1 = tsbmiss area
3890	 * %g6 = hmeshift
3891	 */
3892
3893	/*
3894	 * The first arg to GET_TTE is actually tagaccess register
3895	 * not just vaddr. Since this call is for kernel we need to clear
3896	 * any lower vaddr bits that would be interpreted as ctx bits.
3897	 */
3898	srlx	%o0, MMU_PAGESHIFT, %o0
3899	sllx	%o0, MMU_PAGESHIFT, %o0
3900	GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
3901		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
3902		kvaszc2pfn_nohblk)
3903
3904kvaszc2pfn_hblk_found:
3905	/*
3906	 * %g3 = tte
3907	 * %o0 = vaddr
3908	 */
3909	brgez,a,pn %g3, 1f			/* check if tte is invalid */
3910	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3911	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
3912	/*
3913	 * g3 = pfn
3914	 */
3915	ba,pt	%xcc, 1f
3916	  mov	%g3, %o0
3917
3918kvaszc2pfn_nohblk:
3919	mov	-1, %o0
3920
39211:
3922	retl
3923 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3924
3925	SET_SIZE(sfmmu_kvaszc2pfn)
3926
3927
3928
3929/*
3930 * kpm lock used between trap level tsbmiss handler and kpm C level.
3931 */
3932#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
3933	mov     0xff, tmp1						;\
3934label1:									;\
3935	casa    [kpmlckp]asi, %g0, tmp1					;\
3936	brnz,pn tmp1, label1						;\
3937	mov     0xff, tmp1						;\
3938	membar  #LoadLoad
3939
3940#define KPMLOCK_EXIT(kpmlckp, asi)					\
3941	membar  #LoadStore|#StoreStore					;\
3942	sta     %g0, [kpmlckp]asi
3943
3944/*
3945 * Lookup a memseg for a given pfn and if found, return the physical
3946 * address of the corresponding struct memseg in mseg, otherwise
3947 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
3948 * tsbmp, %asi is assumed to be ASI_MEM.
3949 * This lookup is done by strictly traversing only the physical memseg
3950 * linkage. The more generic approach, to check the virtual linkage
3951 * before using the physical (used e.g. with hmehash buckets), cannot
3952 * be used here. Memory DR operations can run in parallel to this
3953 * lookup w/o any locks and updates of the physical and virtual linkage
3954 * cannot be done atomically wrt. to each other. Because physical
3955 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
3956 * as "physical NULL" pointer.
3957 */
3958#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
3959	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
3960	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
3961	udivx	pfn, mseg, mseg						;\
3962	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
3963	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
3964	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
3965	add	tmp1, mseg, tmp1					;\
3966	ldxa	[tmp1]%asi, mseg					;\
3967	cmp	mseg, MSEG_NULLPTR_PA					;\
3968	be,pn	%xcc, label##1		/* if not found */	;\
3969	  nop								;\
3970	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3971	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3972	blu,pn	%xcc, label##1					;\
3973	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3974	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3975	bgeu,pn	%xcc, label##1					;\
3976	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
3977	mulx	tmp1, PAGE_SIZE, tmp1					;\
3978	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
3979	add	tmp2, tmp1, tmp1			/* pp */	;\
3980	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
3981	cmp	tmp2, pfn						;\
3982	be,pt	%xcc, label##_ok			/* found */	;\
3983label##1:								;\
3984	/* brute force lookup */					;\
3985	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
3986	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
3987label##2:								;\
3988	cmp	mseg, MSEG_NULLPTR_PA					;\
3989	be,pn	%xcc, label##_ok		/* if not found */	;\
3990	  nop								;\
3991	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3992	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3993	blu,a,pt %xcc, label##2					;\
3994	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3995	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3996	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3997	bgeu,a,pt %xcc, label##2					;\
3998	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3999label##_ok:
4000
4001	/*
4002	 * kpm tsb miss handler large pages
4003	 * g1 = 8K kpm TSB entry pointer
4004	 * g2 = tag access register
4005	 * g3 = 4M kpm TSB entry pointer
4006	 */
4007	ALTENTRY(sfmmu_kpm_dtsb_miss)
4008	TT_TRACE(trace_tsbmiss)
4009
4010	CPU_INDEX(%g7, %g6)
4011	sethi	%hi(kpmtsbm_area), %g6
4012	sllx	%g7, KPMTSBM_SHIFT, %g7
4013	or	%g6, %lo(kpmtsbm_area), %g6
4014	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4015
4016	/* check enable flag */
4017	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4018	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4019	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4020	  nop
4021
4022	/* VA range check */
4023	ldx	[%g6 + KPMTSBM_VBASE], %g7
4024	cmp	%g2, %g7
4025	blu,pn	%xcc, sfmmu_tsb_miss
4026	  ldx	[%g6 + KPMTSBM_VEND], %g5
4027	cmp	%g2, %g5
4028	bgeu,pn	%xcc, sfmmu_tsb_miss
4029	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4030
4031	/*
4032	 * check TL tsbmiss handling flag
4033	 * bump tsbmiss counter
4034	 */
4035	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4036#ifdef	DEBUG
4037	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4038	inc	%g5
4039	brz,pn	%g3, sfmmu_kpm_exception
4040	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4041#else
4042	inc	%g5
4043	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4044#endif
4045	/*
4046	 * At this point:
4047	 *  g1 = 8K kpm TSB pointer (not used)
4048	 *  g2 = tag access register
4049	 *  g3 = clobbered
4050	 *  g6 = per-CPU kpm tsbmiss area
4051	 *  g7 = kpm_vbase
4052	 */
4053
4054	/* vaddr2pfn */
4055	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4056	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4057	srax    %g4, %g3, %g2			/* which alias range (r) */
4058	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4059	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4060
4061	/*
4062	 * Setup %asi
4063	 * mseg_pa = page_numtomemseg_nolock(pfn)
4064	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4065	 * g2=pfn
4066	 */
4067	mov	ASI_MEM, %asi
4068	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4069	cmp	%g3, MSEG_NULLPTR_PA
4070	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4071	  nop
4072
4073	/*
4074	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4075	 * g2=pfn g3=mseg_pa
4076	 */
4077	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4078	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4079	srlx	%g2, %g5, %g4
4080	sllx	%g4, %g5, %g4
4081	sub	%g4, %g7, %g4
4082	srlx	%g4, %g5, %g4
4083
4084	/*
4085	 * Validate inx value
4086	 * g2=pfn g3=mseg_pa g4=inx
4087	 */
4088#ifdef	DEBUG
4089	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4090	cmp	%g4, %g5			/* inx - nkpmpgs */
4091	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4092	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4093#else
4094	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4095#endif
4096	/*
4097	 * kp = &mseg_pa->kpm_pages[inx]
4098	 */
4099	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4100	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4101	add	%g5, %g4, %g5			/* kp */
4102
4103	/*
4104	 * KPMP_HASH(kp)
4105	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4106	 */
4107	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4108	sub	%g7, 1, %g7			/* mask */
4109	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4110	add	%g5, %g1, %g5			/* y = ksp + x */
4111	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4112
4113	/*
4114	 * Calculate physical kpm_page pointer
4115	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4116	 */
4117	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4118	add	%g1, %g4, %g1			/* kp_pa */
4119
4120	/*
4121	 * Calculate physical hash lock address
4122	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4123	 */
4124	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4125	sllx	%g5, KPMHLK_SHIFT, %g5
4126	add	%g4, %g5, %g3
4127	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4128
4129	/*
4130	 * Assemble tte
4131	 * g1=kp_pa g2=pfn g3=hlck_pa
4132	 */
4133#ifdef sun4v
4134	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4135	sllx	%g5, 32, %g5
4136	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4137	or	%g4, TTE4M, %g4
4138	or	%g5, %g4, %g5
4139#else
4140	sethi	%hi(TTE_VALID_INT), %g4
4141	mov	TTE4M, %g5
4142	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4143	or	%g5, %g4, %g5			/* upper part */
4144	sllx	%g5, 32, %g5
4145	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4146	or	%g5, %g4, %g5
4147#endif
4148	sllx	%g2, MMU_PAGESHIFT, %g4
4149	or	%g5, %g4, %g5			/* tte */
4150	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4151	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4152
4153	/*
4154	 * tsb dropin
4155	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4156	 */
4157
4158	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4159	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4160
4161	/* use C-handler if there's no go for dropin */
4162	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4163	cmp	%g7, -1
4164	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4165	  nop
4166
4167#ifdef	DEBUG
4168	/* double check refcnt */
4169	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4170	brz,pn	%g7, 5f			/* let C-handler deal with this */
4171	  nop
4172#endif
4173
4174#ifndef sun4v
4175	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4176	mov	ASI_N, %g1
4177	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4178	movnz	%icc, ASI_MEM, %g1
4179	mov	%g1, %asi
4180#endif
4181
4182	/*
4183	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4184	 * If we fail to lock the TSB entry then just load the tte into the
4185	 * TLB.
4186	 */
4187	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
4188
4189	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4190	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4191locked_tsb_l1:
4192	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4193
4194	/* KPMLOCK_EXIT(kpmlckp, asi) */
4195	KPMLOCK_EXIT(%g3, ASI_MEM)
4196
4197	/*
4198	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4199	 * point to trapstat's TSB miss return code (note that trapstat
4200	 * itself will patch the correct offset to add).
4201	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4202	 */
4203	rdpr	%tl, %g7
4204	cmp	%g7, 1
4205	ble	%icc, 0f
4206	sethi	%hi(KERNELBASE), %g6
4207	rdpr	%tpc, %g7
4208	or	%g6, %lo(KERNELBASE), %g6
4209	cmp	%g7, %g6
4210	bgeu	%xcc, 0f
4211	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4212	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4213	wrpr	%g7, %tpc
4214	add	%g7, 4, %g7
4215	wrpr	%g7, %tnpc
42160:
4217	retry
42185:
4219	/* g3=hlck_pa */
4220	KPMLOCK_EXIT(%g3, ASI_MEM)
4221	ba,pt	%icc, sfmmu_kpm_exception
4222	  nop
4223	SET_SIZE(sfmmu_kpm_dtsb_miss)
4224
4225	/*
4226	 * kpm tsbmiss handler for smallpages
4227	 * g1 = 8K kpm TSB pointer
4228	 * g2 = tag access register
4229	 * g3 = 4M kpm TSB pointer
4230	 */
4231	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4232	TT_TRACE(trace_tsbmiss)
4233	CPU_INDEX(%g7, %g6)
4234	sethi	%hi(kpmtsbm_area), %g6
4235	sllx	%g7, KPMTSBM_SHIFT, %g7
4236	or	%g6, %lo(kpmtsbm_area), %g6
4237	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4238
4239	/* check enable flag */
4240	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4241	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4242	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4243	  nop
4244
4245	/*
4246	 * VA range check
4247	 * On fail: goto sfmmu_tsb_miss
4248	 */
4249	ldx	[%g6 + KPMTSBM_VBASE], %g7
4250	cmp	%g2, %g7
4251	blu,pn	%xcc, sfmmu_tsb_miss
4252	  ldx	[%g6 + KPMTSBM_VEND], %g5
4253	cmp	%g2, %g5
4254	bgeu,pn	%xcc, sfmmu_tsb_miss
4255	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4256
4257	/*
4258	 * check TL tsbmiss handling flag
4259	 * bump tsbmiss counter
4260	 */
4261	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4262#ifdef	DEBUG
4263	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4264	inc	%g5
4265	brz,pn	%g1, sfmmu_kpm_exception
4266	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4267#else
4268	inc	%g5
4269	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4270#endif
4271	/*
4272	 * At this point:
4273	 *  g1 = clobbered
4274	 *  g2 = tag access register
4275	 *  g3 = 4M kpm TSB pointer (not used)
4276	 *  g6 = per-CPU kpm tsbmiss area
4277	 *  g7 = kpm_vbase
4278	 */
4279
4280	/*
4281	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4282	 * which is defined in mach_kpm.h. Any changes in that macro
4283	 * should also be ported back to this assembly code.
4284	 */
4285	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
4286	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4287	srax    %g4, %g3, %g7			/* which alias range (r) */
4288	brz,pt	%g7, 2f
4289	  sethi   %hi(vac_colors_mask), %g5
4290	ld	[%g5 + %lo(vac_colors_mask)], %g5
4291
4292	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
4293	and	%g1, %g5, %g1			/* g1 = v */
4294	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
4295	cmp	%g7, %g1			/* if (r > v) */
4296	bleu,pn %xcc, 1f
4297	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
4298	sub	%g7, %g1, %g5			/* g5 = r - v */
4299	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
4300	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
4301	ba	2f
4302	  nop
43031:
4304	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
4305	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
4306
4307	/*
4308	 * paddr2pfn
4309	 *  g1 = vcolor (not used)
4310	 *  g2 = tag access register
4311	 *  g3 = clobbered
4312	 *  g4 = paddr
4313	 *  g5 = clobbered
4314	 *  g6 = per-CPU kpm tsbmiss area
4315	 *  g7 = clobbered
4316	 */
43172:
4318	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
4319
4320	/*
4321	 * Setup %asi
4322	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4323	 * if (mseg not found) sfmmu_kpm_exception
4324	 * g2=pfn g6=per-CPU kpm tsbmiss area
4325	 * g4 g5 g7 for scratch use.
4326	 */
4327	mov	ASI_MEM, %asi
4328	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4329	cmp	%g3, MSEG_NULLPTR_PA
4330	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4331	  nop
4332
4333	/*
4334	 * inx = pfn - mseg_pa->kpm_pbase
4335	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
4336	 */
4337	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4338	sub	%g2, %g7, %g4
4339
4340#ifdef	DEBUG
4341	/*
4342	 * Validate inx value
4343	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
4344	 */
4345	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4346	cmp	%g4, %g5			/* inx - nkpmpgs */
4347	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4348	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4349#else
4350	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4351#endif
4352	/* ksp = &mseg_pa->kpm_spages[inx] */
4353	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4354	add	%g5, %g4, %g5			/* ksp */
4355
4356	/*
4357	 * KPMP_SHASH(kp)
4358	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4359	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
4360	 */
4361	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4362	sub	%g7, 1, %g7			/* mask */
4363	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4364	add	%g5, %g1, %g5			/* y = ksp + x */
4365	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4366
4367	/*
4368	 * Calculate physical kpm_spage pointer
4369	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4370	 * g6=per-CPU kpm tsbmiss area
4371	 */
4372	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4373	add	%g1, %g4, %g1			/* ksp_pa */
4374
4375	/*
4376	 * Calculate physical hash lock address.
4377	 * Note: Changes in kpm_shlk_t must be reflected here.
4378	 * g1=ksp_pa g2=pfn g5=hashinx
4379	 * g6=per-CPU kpm tsbmiss area
4380	 */
4381	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4382	sllx	%g5, KPMSHLK_SHIFT, %g5
4383	add	%g4, %g5, %g3			/* hlck_pa */
4384
4385	/*
4386	 * Assemble non-cacheable tte initially
4387	 * g1=ksp_pa g2=pfn g3=hlck_pa
4388	 * g6=per-CPU kpm tsbmiss area
4389	 */
4390	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4391	sllx	%g5, 32, %g5
4392	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4393	or	%g5, %g4, %g5
4394	sllx	%g2, MMU_PAGESHIFT, %g4
4395	or	%g5, %g4, %g5			/* tte */
4396	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4397	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4398
4399	/*
4400	 * tsb dropin
4401	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4402	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
4403	 */
4404
4405	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4406	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4407
4408	/* use C-handler if there's no go for dropin */
4409	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
4410	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
4411	bz,pt	%icc, 5f				/* no go */
4412	  nop
4413	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
4414	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
4415	be,a,pn	%xcc, 3f
4416	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
44173:
4418#ifndef sun4v
4419	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4420	mov	ASI_N, %g1
4421	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4422	movnz	%icc, ASI_MEM, %g1
4423	mov	%g1, %asi
4424#endif
4425
4426	/*
4427	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4428	 * If we fail to lock the TSB entry then just load the tte into the
4429	 * TLB.
4430	 */
4431	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
4432
4433	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4434	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4435locked_tsb_l2:
4436	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4437
4438	/* KPMLOCK_EXIT(kpmlckp, asi) */
4439	KPMLOCK_EXIT(%g3, ASI_MEM)
4440
4441	/*
4442	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4443	 * point to trapstat's TSB miss return code (note that trapstat
4444	 * itself will patch the correct offset to add).
4445	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4446	 */
4447	rdpr	%tl, %g7
4448	cmp	%g7, 1
4449	ble	%icc, 0f
4450	sethi	%hi(KERNELBASE), %g6
4451	rdpr	%tpc, %g7
4452	or	%g6, %lo(KERNELBASE), %g6
4453	cmp	%g7, %g6
4454	bgeu	%xcc, 0f
4455	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4456	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4457	wrpr	%g7, %tpc
4458	add	%g7, 4, %g7
4459	wrpr	%g7, %tnpc
44600:
4461	retry
44625:
4463	/* g3=hlck_pa */
4464	KPMLOCK_EXIT(%g3, ASI_MEM)
4465	ba,pt	%icc, sfmmu_kpm_exception
4466	  nop
4467	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4468
4469#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4470#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4471#endif
4472
4473	.seg	".data"
4474sfmmu_kpm_tsbmtl_panic:
4475	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4476	.byte	0
4477sfmmu_kpm_stsbmtl_panic:
4478	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4479	.byte	0
4480	.align	4
4481	.seg	".text"
4482
4483	/*
4484	 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4485	 * Called from C-level, sets/clears "go" indication for trap level
4486	 * handler.  khl_lock is a low level spin lock to protect the kp_tsbmtl
4487	 * field.  Assumed that &kp->kp_refcntc is checked for zero or -1 at
4488	 * C-level.  Assumes khl_mutex is held when called from C-level.
4489	 */
4490	ENTRY_NP(sfmmu_kpm_tsbmtl)
4491	rdpr	%pstate, %o3
4492	/*
4493	 * %o0 = &kp_refcntc
4494	 * %o1 = &khl_lock
4495	 * %o2 = 0/1 (off/on)
4496	 * %o3 = pstate save
4497	 */
4498#ifdef DEBUG
4499	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4500	bnz,pt %icc, 1f				/* disabled, panic	 */
4501	  nop
4502	save	%sp, -SA(MINFRAME), %sp
4503	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4504	call	panic
4505	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4506	ret
4507	restore
45081:
4509#endif /* DEBUG */
4510	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4511
4512	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4513	mov	-1, %o5
4514	brz,a	%o2, 2f
4515	  mov	0, %o5
45162:
4517	sth	%o5, [%o0]
4518	KPMLOCK_EXIT(%o1, ASI_N)
4519
4520	retl
4521	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4522	SET_SIZE(sfmmu_kpm_tsbmtl)
4523
4524	/*
4525	 * kpm_smallpages: stores val to byte at address mapped within low level
4526	 * lock brackets. The old value is returned.  Called from C-level.
4527	 */
4528	ENTRY_NP(sfmmu_kpm_stsbmtl)
4529	rdpr	%pstate, %o3
4530	/*
4531	 * %o0 = &mapped
4532	 * %o1 = &kshl_lock
4533	 * %o2 = val
4534	 * %o3 = pstate save
4535	 */
4536#ifdef DEBUG
4537	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4538	bnz,pt %icc, 1f				/* disabled, panic	 */
4539	  nop
4540	save	%sp, -SA(MINFRAME), %sp
4541	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4542	call	panic
4543	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4544	ret
4545	restore
45461:
4547#endif /* DEBUG */
4548	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4549
4550	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4551	ldsb	[%o0], %o5
4552	stb	%o2, [%o0]
4553	KPMLOCK_EXIT(%o1, ASI_N)
4554
4555	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
4556	retl
4557	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4558	SET_SIZE(sfmmu_kpm_stsbmtl)
4559
4560#ifdef sun4v
4561	/*
4562	 * User/kernel data miss w// multiple TSBs
4563	 * The first probe covers 8K, 64K, and 512K page sizes,
4564	 * because 64K and 512K mappings are replicated off 8K
4565	 * pointer.  Second probe covers 4M page size only.
4566	 *
4567	 * MMU fault area contains miss address and context.
4568	 */
4569	ALTENTRY(sfmmu_slow_dmmu_miss)
4570	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4571
4572slow_miss_common:
4573	/*
4574	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4575	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4576	 */
4577	brnz,pt	%g3, 8f			! check for user context
4578	  nop
4579
4580	/*
4581	 * Kernel miss
4582	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4583	 * branch to sfmmu_tsb_miss_tt to handle it.
4584	 */
4585	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4586sfmmu_dslow_patch_ktsb_base:
4587	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4588sfmmu_dslow_patch_ktsb_szcode:
4589	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4590
4591	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4592	! %g1 = First TSB entry pointer, as TSB miss handler expects
4593
4594	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4595sfmmu_dslow_patch_ktsb4m_base:
4596	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4597sfmmu_dslow_patch_ktsb4m_szcode:
4598	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4599
4600	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4601	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4602	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4603	.empty
4604
46058:
4606	/*
4607	 * User miss
4608	 * Get first TSB pointer in %g1
4609	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4610	 * Branch to sfmmu_tsb_miss_tt to handle it
4611	 */
4612	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4613	/* %g1 = first TSB entry ptr now, %g2 preserved */
4614
4615	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4616	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
4617	  nop
4618
4619	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4620	/* %g3 = second TSB entry ptr now, %g2 preserved */
46219:
4622	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4623	.empty
4624	SET_SIZE(sfmmu_slow_dmmu_miss)
4625
4626
4627	/*
4628	 * User/kernel instruction miss w/ multiple TSBs
4629	 * The first probe covers 8K, 64K, and 512K page sizes,
4630	 * because 64K and 512K mappings are replicated off 8K
4631	 * pointer.  Second probe covers 4M page size only.
4632	 *
4633	 * MMU fault area contains miss address and context.
4634	 */
4635	ALTENTRY(sfmmu_slow_immu_miss)
4636	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4637	ba,a,pt	%xcc, slow_miss_common
4638	SET_SIZE(sfmmu_slow_immu_miss)
4639
4640#endif /* sun4v */
4641
4642/*
4643 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4644 */
4645	.seg	".data"
4646	.align	64
4647	.global tsbmiss_area
4648tsbmiss_area:
4649	.skip	(TSBMISS_SIZE * NCPU)
4650
4651	.align	64
4652	.global kpmtsbm_area
4653kpmtsbm_area:
4654	.skip	(KPMTSBM_SIZE * NCPU)
4655