xref: /titanic_51/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 88df2d76721d60b8b7cad14f9380446d06569f7c)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/intr.h>
53#include <sys/clock.h>
54#include <sys/trapstat.h>
55
56#ifdef TRAPTRACE
57#include <sys/traptrace.h>
58
59/*
60 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
61 */
62#define	TT_TRACE(label)		\
63	ba	label		;\
64	rd	%pc, %g7
65#else
66
67#define	TT_TRACE(label)
68
69#endif /* TRAPTRACE */
70
71#ifndef	lint
72
73#if (TTE_SUSPEND_SHIFT > 0)
74#define	TTE_SUSPEND_INT_SHIFT(reg)				\
75	sllx	reg, TTE_SUSPEND_SHIFT, reg
76#else
77#define	TTE_SUSPEND_INT_SHIFT(reg)
78#endif
79
80#endif /* lint */
81
82#ifndef	lint
83
84/*
85 * Assumes TSBE_TAG is 0
86 * Assumes TSBE_INTHI is 0
87 * Assumes TSBREG.split is 0
88 */
89
90#if TSBE_TAG != 0
91#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
92#endif
93
94#if TSBTAG_INTHI != 0
95#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
96#endif
97
98/*
99 * The following code assumes the tsb is not split.
100 *
101 * With TSBs no longer shared between processes, it's no longer
102 * necessary to hash the context bits into the tsb index to get
103 * tsb coloring; the new implementation treats the TSB as a
104 * direct-mapped, virtually-addressed cache.
105 *
106 * In:
107 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
108 *    tsbbase = base address of TSB (clobbered)
109 *    tagacc = tag access register (clobbered)
110 *    szc = size code of TSB (ro)
111 *    tmp = scratch reg
112 * Out:
113 *    tsbbase = pointer to entry in TSB
114 */
115#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
116	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
117	srlx	tagacc, vpshift, tagacc 				;\
118	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
119	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
120	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
121	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
122	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
123
124/*
125 * When the kpm TSB is used it is assumed that it is direct mapped
126 * using (vaddr>>vpshift)%tsbsz as the index.
127 *
128 * Note that, for now, the kpm TSB and kernel TSB are the same for
129 * each mapping size.  However that need not always be the case.  If
130 * the trap handlers are updated to search a different TSB for kpm
131 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
132 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
133 *
134 * In:
135 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
136 *    vaddr = virtual address (clobbered)
137 *    tsbp, szc, tmp = scratch
138 * Out:
139 *    tsbp = pointer to entry in TSB
140 */
141#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
142	cmp	vpshift, MMU_PAGESHIFT					;\
143	bne,pn	%icc, 1f		/* branch if large case */	;\
144	  sethi	%hi(kpmsm_tsbsz), szc					;\
145	sethi	%hi(kpmsm_tsbbase), tsbp				;\
146	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
147	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
148	ba,pt	%icc, 2f						;\
149	  nop								;\
1501:	sethi	%hi(kpm_tsbsz), szc					;\
151	sethi	%hi(kpm_tsbbase), tsbp					;\
152	ld	[szc + %lo(kpm_tsbsz)], szc				;\
153	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1542:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
155
156/*
157 * Lock the TSBE at virtual address tsbep.
158 *
159 * tsbep = TSBE va (ro)
160 * tmp1, tmp2 = scratch registers (clobbered)
161 * label = label to use for branches (text)
162 * %asi = ASI to use for TSB access
163 *
164 * NOTE that we flush the TSB using fast VIS instructions that
165 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
166 * not be treated as a locked entry or we'll get stuck spinning on
167 * an entry that isn't locked but really invalid.
168 */
169
170#if defined(UTSB_PHYS)
171
172#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
173	lda	[tsbep]ASI_MEM, tmp1					;\
174label:									;\
175	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
176	cmp	tmp1, tmp2 						;\
177	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
178	  lda	[tsbep]ASI_MEM, tmp1					;\
179	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
180	cmp	tmp1, tmp2 						;\
181	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
182	  lda	[tsbep]ASI_MEM, tmp1					;\
183	/* tsbe lock acquired */					;\
184	membar #StoreStore
185
186#else /* UTSB_PHYS */
187
188#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
189	lda	[tsbep]%asi, tmp1					;\
190label:									;\
191	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
192	cmp	tmp1, tmp2 						;\
193	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
194	  lda	[tsbep]%asi, tmp1					;\
195	casa	[tsbep]%asi, tmp1, tmp2					;\
196	cmp	tmp1, tmp2 						;\
197	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
198	  lda	[tsbep]%asi, tmp1					;\
199	/* tsbe lock acquired */					;\
200	membar #StoreStore
201
202#endif /* UTSB_PHYS */
203
204/*
205 * Atomically write TSBE at virtual address tsbep.
206 *
207 * tsbep = TSBE va (ro)
208 * tte = TSBE TTE (ro)
209 * tagtarget = TSBE tag (ro)
210 * %asi = ASI to use for TSB access
211 */
212
213#if defined(UTSB_PHYS)
214
215#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
216	add	tsbep, TSBE_TTE, tmp1					;\
217	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
218	membar #StoreStore						;\
219	add	tsbep, TSBE_TAG, tmp1					;\
220	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
221
222#else /* UTSB_PHYS */
223
224#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
225	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
226	membar #StoreStore						;\
227	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
228
229#endif /* UTSB_PHYS */
230
231/*
232 * Load an entry into the TSB at TL > 0.
233 *
234 * tsbep = pointer to the TSBE to load as va (ro)
235 * tte = value of the TTE retrieved and loaded (wo)
236 * tagtarget = tag target register.  To get TSBE tag to load,
237 *   we need to mask off the context and leave only the va (clobbered)
238 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
239 * tmp1, tmp2 = scratch registers
240 * label = label to use for branches (text)
241 * %asi = ASI to use for TSB access
242 */
243
244#if defined(UTSB_PHYS)
245
246#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
247	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
248	/*								;\
249	 * I don't need to update the TSB then check for the valid tte.	;\
250	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
251	 * we always invalidate the hash table before we unload the TSB.;\
252	 */								;\
253	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
254	ldxa	[ttepa]ASI_MEM, tte					;\
255	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
256	sethi	%hi(TSBTAG_INVALID), tmp2				;\
257	add	tsbep, TSBE_TAG, tmp1					;\
258	brgez,a,pn tte, label/**/f					;\
259	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
260	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
261label:
262
263#else /* UTSB_PHYS */
264
265#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
266	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
267	/*								;\
268	 * I don't need to update the TSB then check for the valid tte.	;\
269	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
270	 * we always invalidate the hash table before we unload the TSB.;\
271	 */								;\
272	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
273	ldxa	[ttepa]ASI_MEM, tte					;\
274	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
275	sethi	%hi(TSBTAG_INVALID), tmp2				;\
276	brgez,a,pn tte, label/**/f					;\
277	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
278	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
279label:
280
281#endif /* UTSB_PHYS */
282
283/*
284 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
285 *   for ITLB synthesis.
286 *
287 * tsbep = pointer to the TSBE to load as va (ro)
288 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
289 *   with exec_perm turned off and exec_synth turned on
290 * tagtarget = tag target register.  To get TSBE tag to load,
291 *   we need to mask off the context and leave only the va (clobbered)
292 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
293 * tmp1, tmp2 = scratch registers
294 * label = label to use for branch (text)
295 * %asi = ASI to use for TSB access
296 */
297
298#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
299	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
300	/*								;\
301	 * I don't need to update the TSB then check for the valid tte.	;\
302	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
303	 * we always invalidate the hash table before we unload the TSB.;\
304	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
305	 * and exec_synth bit to 1.					;\
306	 */								;\
307	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
308	mov	tte, tmp1						;\
309	ldxa	[ttepa]ASI_MEM, tte					;\
310	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
311	sethi	%hi(TSBTAG_INVALID), tmp2				;\
312	brgez,a,pn tte, label/**/f					;\
313	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
314	or	tte, tmp1, tte						;\
315	andn	tte, TTE_EXECPRM_INT, tte				;\
316	or	tte, TTE_E_SYNTH_INT, tte				;\
317	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
318label:
319
320/*
321 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
322 *
323 * tte = value of the TTE, used to get tte_size bits (ro)
324 * tagaccess = tag access register, used to get 4M pfn bits (ro)
325 * pfn = 4M pfn bits shifted to offset for tte (out)
326 * tmp1 = scratch register
327 * label = label to use for branch (text)
328 */
329
330#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
331	/*								;\
332	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
333	 * Return them, shifted, in pfn.				;\
334	 */								;\
335	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
336	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
337	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
338	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
339	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
340	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
341label:									;\
342	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
343
344/*
345 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
346 * for ITLB synthesis.
347 *
348 * tte = value of the TTE, used to get tte_size bits (rw)
349 * tmp1 = scratch register
350 */
351
352#define	SET_TTE4M_PN(tte, tmp)						\
353	/*								;\
354	 * Set 4M pagesize tte bits. 					;\
355	 */								;\
356	set	TTE4M, tmp						;\
357	sllx	tmp, TTE_SZ_SHFT, tmp					;\
358	or	tte, tmp, tte
359
360/*
361 * Load an entry into the TSB at TL=0.
362 *
363 * tsbep = pointer to the TSBE to load as va (ro)
364 * tteva = pointer to the TTE to load as va (ro)
365 * tagtarget = TSBE tag to load (which contains no context), synthesized
366 * to match va of MMU tag target register only (ro)
367 * tmp1, tmp2 = scratch registers (clobbered)
368 * label = label to use for branches (text)
369 * %asi = ASI to use for TSB access
370 */
371
372#if defined(UTSB_PHYS)
373
374#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
375	/* can't rd tteva after locking tsb because it can tlb miss */	;\
376	ldx	[tteva], tteva			/* load tte */		;\
377	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
378	sethi	%hi(TSBTAG_INVALID), tmp2				;\
379	add	tsbep, TSBE_TAG, tmp1					;\
380	brgez,a,pn tteva, label/**/f					;\
381	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
382	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
383label:
384
385#else /* UTSB_PHYS */
386
387#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
388	/* can't rd tteva after locking tsb because it can tlb miss */	;\
389	ldx	[tteva], tteva			/* load tte */		;\
390	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
391	sethi	%hi(TSBTAG_INVALID), tmp2				;\
392	brgez,a,pn tteva, label/**/f					;\
393	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
394	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
395label:
396
397#endif /* UTSB_PHYS */
398
399/*
400 * Invalidate a TSB entry in the TSB.
401 *
402 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
403 *	 about this earlier to ensure this is true.  Thus when we are
404 *	 directly referencing tsbep below, we are referencing the tte_tag
405 *	 field of the TSBE.  If this  offset ever changes, the code below
406 *	 will need to be modified.
407 *
408 * tsbep = pointer to TSBE as va (ro)
409 * tag = invalidation is done if this matches the TSBE tag (ro)
410 * tmp1 - tmp3 = scratch registers (clobbered)
411 * label = label name to use for branches (text)
412 * %asi = ASI to use for TSB access
413 */
414
415#if defined(UTSB_PHYS)
416
417#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
418	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
419	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
420label/**/1:								;\
421	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
422	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
423	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
424	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
425	cmp	tag, tmp3		/* compare tags */		;\
426	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
427	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
428	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
429	cmp	tmp1, tmp3		/* if not successful */		;\
430	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
431	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
432label/**/2:
433
434#else /* UTSB_PHYS */
435
436#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
437	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
438	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
439label/**/1:								;\
440	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
441	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
442	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
443	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
444	cmp	tag, tmp3		/* compare tags */		;\
445	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
446	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
447	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
448	cmp	tmp1, tmp3		/* if not successful */		;\
449	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
450	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
451label/**/2:
452
453#endif /* UTSB_PHYS */
454
455#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
456#error	- TSB_SOFTSZ_MASK too small
457#endif
458
459
460/*
461 * An implementation of setx which will be hot patched at run time.
462 * since it is being hot patched, there is no value passed in.
463 * Thus, essentially we are implementing
464 *	setx value, tmp, dest
465 * where value is RUNTIME_PATCH (aka 0) in this case.
466 */
467#define	RUNTIME_PATCH_SETX(dest, tmp)					\
468	sethi	%hh(RUNTIME_PATCH), tmp					;\
469	sethi	%lm(RUNTIME_PATCH), dest				;\
470	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
471	or	dest, %lo(RUNTIME_PATCH), dest				;\
472	sllx	tmp, 32, tmp						;\
473	nop				/* for perf reasons */		;\
474	or	tmp, dest, dest		/* contents of patched value */
475
476
477#endif (lint)
478
479
480#if defined (lint)
481
482/*
483 * sfmmu related subroutines
484 */
485
486/*
487 * Use cas, if tte has changed underneath us then reread and try again.
488 * In the case of a retry, it will update sttep with the new original.
489 */
490/* ARGSUSED */
491int
492sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
493{ return(0); }
494
495/*
496 * Use cas, if tte has changed underneath us then return 1, else return 0
497 */
498/* ARGSUSED */
499int
500sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
501{ return(0); }
502
503/* ARGSUSED */
504void
505sfmmu_copytte(tte_t *sttep, tte_t *dttep)
506{}
507
508/*ARGSUSED*/
509struct tsbe *
510sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
511{ return(0); }
512
513/*ARGSUSED*/
514uint64_t
515sfmmu_make_tsbtag(caddr_t va)
516{ return(0); }
517
518#else	/* lint */
519
520	.seg	".data"
521	.global	sfmmu_panic1
522sfmmu_panic1:
523	.asciz	"sfmmu_asm: interrupts already disabled"
524
525	.global	sfmmu_panic3
526sfmmu_panic3:
527	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
528
529	.global	sfmmu_panic4
530sfmmu_panic4:
531	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
532
533	.global	sfmmu_panic5
534sfmmu_panic5:
535	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
536
537
538	ENTRY_NP(sfmmu_modifytte)
539	ldx	[%o2], %g3			/* current */
540	ldx	[%o0], %g1			/* original */
5412:
542	ldx	[%o1], %g2			/* modified */
543	cmp	%g2, %g3			/* is modified = current? */
544	be,a,pt	%xcc,1f				/* yes, don't write */
545	stx	%g3, [%o0]			/* update new original */
546	casx	[%o2], %g1, %g2
547	cmp	%g1, %g2
548	be,pt	%xcc, 1f			/* cas succeeded - return */
549	  nop
550	ldx	[%o2], %g3			/* new current */
551	stx	%g3, [%o0]			/* save as new original */
552	ba,pt	%xcc, 2b
553	  mov	%g3, %g1
5541:	retl
555	membar	#StoreLoad
556	SET_SIZE(sfmmu_modifytte)
557
558	ENTRY_NP(sfmmu_modifytte_try)
559	ldx	[%o1], %g2			/* modified */
560	ldx	[%o2], %g3			/* current */
561	ldx	[%o0], %g1			/* original */
562	cmp	%g3, %g2			/* is modified = current? */
563	be,a,pn %xcc,1f				/* yes, don't write */
564	mov	0, %o1				/* as if cas failed. */
565
566	casx	[%o2], %g1, %g2
567	membar	#StoreLoad
568	cmp	%g1, %g2
569	movne	%xcc, -1, %o1			/* cas failed. */
570	move	%xcc, 1, %o1			/* cas succeeded. */
5711:
572	stx	%g2, [%o0]			/* report "current" value */
573	retl
574	mov	%o1, %o0
575	SET_SIZE(sfmmu_modifytte_try)
576
577	ENTRY_NP(sfmmu_copytte)
578	ldx	[%o0], %g1
579	retl
580	stx	%g1, [%o1]
581	SET_SIZE(sfmmu_copytte)
582
583
584	/*
585	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
586	 * %o0 = TSB base address (in), pointer to TSB entry (out)
587	 * %o1 = vaddr (in)
588	 * %o2 = vpshift (in)
589	 * %o3 = tsb size code (in)
590	 * %o4 = scratch register
591	 */
592	ENTRY_NP(sfmmu_get_tsbe)
593	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
594	retl
595	nop
596	SET_SIZE(sfmmu_get_tsbe)
597
598	/*
599	 * Return a TSB tag for the given va.
600	 * %o0 = va (in/clobbered)
601	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
602	 */
603	ENTRY_NP(sfmmu_make_tsbtag)
604	retl
605	srln	%o0, TTARGET_VA_SHIFT, %o0
606	SET_SIZE(sfmmu_make_tsbtag)
607
608#endif /* lint */
609
610/*
611 * Other sfmmu primitives
612 */
613
614
615#if defined (lint)
616void
617sfmmu_patch_ktsb(void)
618{
619}
620
621void
622sfmmu_kpm_patch_tlbm(void)
623{
624}
625
626void
627sfmmu_kpm_patch_tsbm(void)
628{
629}
630
631/* ARGSUSED */
632void
633sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
634{
635}
636
637/* ARGSUSED */
638void
639sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
640{
641}
642
643/* ARGSUSED */
644void
645sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
646{
647}
648
649/* ARGSUSED */
650void
651sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
652{
653}
654
655#else /* lint */
656
657#define	I_SIZE		4
658
659	ENTRY_NP(sfmmu_fix_ktlb_traptable)
660	/*
661	 * %o0 = start of patch area
662	 * %o1 = size code of TSB to patch
663	 * %o3 = scratch
664	 */
665	/* fix sll */
666	ld	[%o0], %o3			/* get sll */
667	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
668	st	%o3, [%o0]			/* write sll */
669	flush	%o0
670	/* fix srl */
671	add	%o0, I_SIZE, %o0		/* goto next instr. */
672	ld	[%o0], %o3			/* get srl */
673	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
674	st	%o3, [%o0]			/* write srl */
675	retl
676	flush	%o0
677	SET_SIZE(sfmmu_fix_ktlb_traptable)
678
679	ENTRY_NP(sfmmu_fixup_ktsbbase)
680	/*
681	 * %o0 = start of patch area
682	 * %o5 = kernel virtual or physical tsb base address
683	 * %o2, %o3 are used as scratch registers.
684	 */
685	/* fixup sethi instruction */
686	ld	[%o0], %o3
687	srl	%o5, 10, %o2			! offset is bits 32:10
688	or	%o3, %o2, %o3			! set imm22
689	st	%o3, [%o0]
690	/* fixup offset of lduw/ldx */
691	add	%o0, I_SIZE, %o0		! next instr
692	ld	[%o0], %o3
693	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
694	or	%o3, %o2, %o3
695	st	%o3, [%o0]
696	retl
697	flush	%o0
698	SET_SIZE(sfmmu_fixup_ktsbbase)
699
700	ENTRY_NP(sfmmu_fixup_setx)
701	/*
702	 * %o0 = start of patch area
703	 * %o4 = 64 bit value to patch
704	 * %o2, %o3 are used as scratch registers.
705	 *
706	 * Note: Assuming that all parts of the instructions which need to be
707	 *	 patched correspond to RUNTIME_PATCH (aka 0)
708	 *
709	 * Note the implementation of setx which is being patched is as follows:
710	 *
711	 * sethi   %hh(RUNTIME_PATCH), tmp
712	 * sethi   %lm(RUNTIME_PATCH), dest
713	 * or      tmp, %hm(RUNTIME_PATCH), tmp
714	 * or      dest, %lo(RUNTIME_PATCH), dest
715	 * sllx    tmp, 32, tmp
716	 * nop
717	 * or      tmp, dest, dest
718	 *
719	 * which differs from the implementation in the
720	 * "SPARC Architecture Manual"
721	 */
722	/* fixup sethi instruction */
723	ld	[%o0], %o3
724	srlx	%o4, 42, %o2			! bits [63:42]
725	or	%o3, %o2, %o3			! set imm22
726	st	%o3, [%o0]
727	/* fixup sethi instruction */
728	add	%o0, I_SIZE, %o0		! next instr
729	ld	[%o0], %o3
730	sllx	%o4, 32, %o2			! clear upper bits
731	srlx	%o2, 42, %o2			! bits [31:10]
732	or	%o3, %o2, %o3			! set imm22
733	st	%o3, [%o0]
734	/* fixup or instruction */
735	add	%o0, I_SIZE, %o0		! next instr
736	ld	[%o0], %o3
737	srlx	%o4, 32, %o2			! bits [63:32]
738	and	%o2, 0x3ff, %o2			! bits [41:32]
739	or	%o3, %o2, %o3			! set imm
740	st	%o3, [%o0]
741	/* fixup or instruction */
742	add	%o0, I_SIZE, %o0		! next instr
743	ld	[%o0], %o3
744	and	%o4, 0x3ff, %o2			! bits [9:0]
745	or	%o3, %o2, %o3			! set imm
746	st	%o3, [%o0]
747	retl
748	flush	%o0
749	SET_SIZE(sfmmu_fixup_setx)
750
751	ENTRY_NP(sfmmu_fixup_or)
752	/*
753	 * %o0 = start of patch area
754	 * %o4 = 32 bit value to patch
755	 * %o2, %o3 are used as scratch registers.
756	 * Note: Assuming that all parts of the instructions which need to be
757	 *	 patched correspond to RUNTIME_PATCH (aka 0)
758	 */
759	ld	[%o0], %o3
760	and	%o4, 0x3ff, %o2			! bits [9:0]
761	or	%o3, %o2, %o3			! set imm
762	st	%o3, [%o0]
763	retl
764	flush	%o0
765	SET_SIZE(sfmmu_fixup_or)
766
767	ENTRY_NP(sfmmu_fixup_shiftx)
768	/*
769	 * %o0 = start of patch area
770	 * %o4 = signed int immediate value to add to sllx/srlx imm field
771	 * %o2, %o3 are used as scratch registers.
772	 *
773	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
774	 * so we do a simple add.  The caller must be careful to prevent
775	 * overflow, which could easily occur if the initial value is nonzero!
776	 */
777	ld	[%o0], %o3			! %o3 = instruction to patch
778	and	%o3, 0x3f, %o2			! %o2 = existing imm value
779	add	%o2, %o4, %o2			! %o2 = new imm value
780	andn	%o3, 0x3f, %o3			! clear old imm value
781	and	%o2, 0x3f, %o2			! truncate new imm value
782	or	%o3, %o2, %o3			! set new imm value
783	st	%o3, [%o0]			! store updated instruction
784	retl
785	flush	%o0
786	SET_SIZE(sfmmu_fixup_shiftx)
787
788	ENTRY_NP(sfmmu_fixup_mmu_asi)
789	/*
790	 * Patch imm_asi of all ldda instructions in the MMU
791	 * trap handlers.  We search MMU_PATCH_INSTR instructions
792	 * starting from the itlb miss handler (trap 0x64).
793	 * %o0 = address of tt[0,1]_itlbmiss
794	 * %o1 = imm_asi to setup, shifted by appropriate offset.
795	 * %o3 = number of instructions to search
796	 * %o4 = reserved by caller: called from leaf routine
797	 */
7981:	ldsw	[%o0], %o2			! load instruction to %o2
799	brgez,pt %o2, 2f
800	  srl	%o2, 30, %o5
801	btst	1, %o5				! test bit 30; skip if not set
802	bz,pt	%icc, 2f
803	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
804	srlx	%o5, 58, %o5			! isolate op3 part of opcode
805	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
806	brnz,pt	%o5, 2f				! skip if not a match
807	  or	%o2, %o1, %o2			! or in imm_asi
808	st	%o2, [%o0]			! write patched instruction
8092:	dec	%o3
810	brnz,a,pt %o3, 1b			! loop until we're done
811	  add	%o0, I_SIZE, %o0
812	retl
813	flush	%o0
814	SET_SIZE(sfmmu_fixup_mmu_asi)
815
816	/*
817	 * Patch immediate ASI used to access the TSB in the
818	 * trap table.
819	 * inputs: %o0 = value of ktsb_phys
820	 */
821	ENTRY_NP(sfmmu_patch_mmu_asi)
822	mov	%o7, %o4			! save return pc in %o4
823	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
824	movrz	%o0, ASI_NQUAD_LD, %o3
825	sll	%o3, 5, %o1			! imm_asi offset
826	mov	6, %o3				! number of instructions
827	sethi	%hi(dktsb), %o0			! to search
828	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
829	  or	%o0, %lo(dktsb), %o0
830	mov	6, %o3				! number of instructions
831	sethi	%hi(dktsb4m), %o0		! to search
832	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
833	  or	%o0, %lo(dktsb4m), %o0
834	mov	6, %o3				! number of instructions
835	sethi	%hi(iktsb), %o0			! to search
836	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
837	  or	%o0, %lo(iktsb), %o0
838	mov	%o4, %o7			! retore return pc -- leaf
839	retl
840	nop
841	SET_SIZE(sfmmu_patch_mmu_asi)
842
843	ENTRY_NP(sfmmu_patch_ktsb)
844	/*
845	 * We need to fix iktsb, dktsb, et. al.
846	 */
847	save	%sp, -SA(MINFRAME), %sp
848	set	ktsb_phys, %o1
849	ld	[%o1], %o4
850	set	ktsb_base, %o5
851	set	ktsb4m_base, %l1
852	brz,pt	%o4, 1f
853	  nop
854	set	ktsb_pbase, %o5
855	set	ktsb4m_pbase, %l1
8561:
857	sethi	%hi(ktsb_szcode), %o1
858	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
859
860	sethi	%hi(iktsb), %o0
861	call	sfmmu_fix_ktlb_traptable
862	  or	%o0, %lo(iktsb), %o0
863
864	sethi	%hi(dktsb), %o0
865	call	sfmmu_fix_ktlb_traptable
866	  or	%o0, %lo(dktsb), %o0
867
868	sethi	%hi(ktsb4m_szcode), %o1
869	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
870
871	sethi	%hi(dktsb4m), %o0
872	call	sfmmu_fix_ktlb_traptable
873	  or	%o0, %lo(dktsb4m), %o0
874
875#ifndef sun4v
876	mov	ASI_N, %o2
877	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
878	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
879	sethi	%hi(tsb_kernel_patch_asi), %o0
880	call	sfmmu_fixup_or
881	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
882#endif
883
884	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
885
886	sethi	%hi(dktsbbase), %o0
887	call	sfmmu_fixup_setx	! patch value of ktsb base addr
888	  or	%o0, %lo(dktsbbase), %o0
889
890	sethi	%hi(iktsbbase), %o0
891	call	sfmmu_fixup_setx	! patch value of ktsb base addr
892	  or	%o0, %lo(iktsbbase), %o0
893
894	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
895	call	sfmmu_fixup_setx	! patch value of ktsb base addr
896	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
897
898#ifdef sun4v
899	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
900	call	sfmmu_fixup_setx	! patch value of ktsb base addr
901	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
902#endif /* sun4v */
903
904	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
905
906	sethi	%hi(dktsb4mbase), %o0
907	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
908	  or	%o0, %lo(dktsb4mbase), %o0
909
910	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
911	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
912	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
913
914#ifdef sun4v
915	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
916	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
917	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
918#endif /* sun4v */
919
920	set	ktsb_szcode, %o4
921	ld	[%o4], %o4
922	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
923	call	sfmmu_fixup_or		! patch value of ktsb_szcode
924	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
925
926#ifdef sun4v
927	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
928	call	sfmmu_fixup_or		! patch value of ktsb_szcode
929	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
930#endif /* sun4v */
931
932	set	ktsb4m_szcode, %o4
933	ld	[%o4], %o4
934	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
935	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
936	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
937
938#ifdef sun4v
939	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
940	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
941	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
942#endif /* sun4v */
943
944	ret
945	restore
946	SET_SIZE(sfmmu_patch_ktsb)
947
948	ENTRY_NP(sfmmu_kpm_patch_tlbm)
949	/*
950	 * Fixup trap handlers in common segkpm case.  This is reserved
951	 * for future use should kpm TSB be changed to be other than the
952	 * kernel TSB.
953	 */
954	retl
955	nop
956	SET_SIZE(sfmmu_kpm_patch_tlbm)
957
958	ENTRY_NP(sfmmu_kpm_patch_tsbm)
959	/*
960	 * nop the branch to sfmmu_kpm_dtsb_miss_small
961	 * in the case where we are using large pages for
962	 * seg_kpm (and hence must probe the second TSB for
963	 * seg_kpm VAs)
964	 */
965	set	dktsb4m_kpmcheck_small, %o0
966	MAKE_NOP_INSTR(%o1)
967	st	%o1, [%o0]
968	flush	%o0
969	retl
970	nop
971	SET_SIZE(sfmmu_kpm_patch_tsbm)
972
973	ENTRY_NP(sfmmu_patch_utsb)
974#ifdef UTSB_PHYS
975	retl
976	nop
977#else /* UTSB_PHYS */
978	/*
979	 * We need to hot patch utsb_vabase and utsb4m_vabase
980	 */
981	save	%sp, -SA(MINFRAME), %sp
982
983	/* patch value of utsb_vabase */
984	set	utsb_vabase, %o1
985	ldx	[%o1], %o4
986	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
987	call	sfmmu_fixup_setx
988	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
989	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
990	call	sfmmu_fixup_setx
991	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
992	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
993	call	sfmmu_fixup_setx
994	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
995
996	/* patch value of utsb4m_vabase */
997	set	utsb4m_vabase, %o1
998	ldx	[%o1], %o4
999	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1000	call	sfmmu_fixup_setx
1001	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1002	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1003	call	sfmmu_fixup_setx
1004	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1005	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1006	call	sfmmu_fixup_setx
1007	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1008
1009	/*
1010	 * Patch TSB base register masks and shifts if needed.
1011	 * By default the TSB base register contents are set up for 4M slab.
1012	 * If we're using a smaller slab size and reserved VA range we need
1013	 * to patch up those values here.
1014	 */
1015	set	tsb_slab_shift, %o1
1016	set	MMU_PAGESHIFT4M, %o4
1017	ldsw	[%o1], %o3
1018	subcc	%o4, %o3, %o4
1019	bz,pt	%icc, 1f
1020	  /* delay slot safe */
1021
1022	/* patch reserved VA range size if needed. */
1023	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1024	call	sfmmu_fixup_shiftx
1025	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1026	call	sfmmu_fixup_shiftx
1027	  add	%o0, I_SIZE, %o0
1028	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1029	call	sfmmu_fixup_shiftx
1030	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1031	call	sfmmu_fixup_shiftx
1032	  add	%o0, I_SIZE, %o0
10331:
1034	/* patch TSBREG_VAMASK used to set up TSB base register */
1035	set	tsb_slab_mask, %o1
1036	lduw	[%o1], %o4
1037	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1038	call	sfmmu_fixup_or
1039	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1040	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1041	call	sfmmu_fixup_or
1042	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1043
1044	ret
1045	restore
1046#endif /* UTSB_PHYS */
1047	SET_SIZE(sfmmu_patch_utsb)
1048
1049
1050	/*
1051	 * Routine that loads an entry into a tsb using virtual addresses.
1052	 * Locking is required since all cpus can use the same TSB.
1053	 * Note that it is no longer required to have a valid context
1054	 * when calling this function.
1055	 */
1056	ENTRY_NP(sfmmu_load_tsbe)
1057	/*
1058	 * %o0 = pointer to tsbe to load
1059	 * %o1 = tsb tag
1060	 * %o2 = virtual pointer to TTE
1061	 * %o3 = 1 if physical address in %o0 else 0
1062	 */
1063	rdpr	%pstate, %o5
1064#ifdef DEBUG
1065	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1066	bnz,pt 	%icc, 1f			/* disabled, panic	 */
1067	  nop
1068
1069	sethi	%hi(panicstr), %g1
1070	ldx	[%g1 + %lo(panicstr)], %g1
1071	tst	%g1
1072	bnz,pt	%icc, 1f
1073	  nop
1074
1075	save	%sp, -SA(MINFRAME), %sp
1076	sethi	%hi(sfmmu_panic1), %o0
1077	call	panic
1078	 or	%o0, %lo(sfmmu_panic1), %o0
10791:
1080#endif /* DEBUG */
1081
1082	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1083
1084	SETUP_TSB_ASI(%o3, %g3)
1085	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1086
1087	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1088
1089	retl
1090	membar	#StoreStore|#StoreLoad
1091	SET_SIZE(sfmmu_load_tsbe)
1092
1093	/*
1094	 * Flush TSB of a given entry if the tag matches.
1095	 */
1096	ENTRY(sfmmu_unload_tsbe)
1097	/*
1098	 * %o0 = pointer to tsbe to be flushed
1099	 * %o1 = tag to match
1100	 * %o2 = 1 if physical address in %o0 else 0
1101	 */
1102	SETUP_TSB_ASI(%o2, %g1)
1103	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1104	retl
1105	membar	#StoreStore|#StoreLoad
1106	SET_SIZE(sfmmu_unload_tsbe)
1107
1108	/*
1109	 * Routine that loads a TTE into the kpm TSB from C code.
1110	 * Locking is required since kpm TSB is shared among all CPUs.
1111	 */
1112	ENTRY_NP(sfmmu_kpm_load_tsb)
1113	/*
1114	 * %o0 = vaddr
1115	 * %o1 = ttep
1116	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1117	 */
1118	rdpr	%pstate, %o5			! %o5 = saved pstate
1119#ifdef DEBUG
1120	andcc	%o5, PSTATE_IE, %g0		! if interrupts already
1121	bnz,pt	%icc, 1f			! disabled, panic
1122	  nop
1123
1124	sethi	%hi(panicstr), %g1
1125	ldx	[%g1 + %lo(panicstr)], %g1
1126	tst	%g1
1127	bnz,pt	%icc, 1f
1128	  nop
1129
1130	save	%sp, -SA(MINFRAME), %sp
1131	sethi	%hi(sfmmu_panic1), %o0
1132	call	panic
1133	  or	%o0, %lo(sfmmu_panic1), %o0
11341:
1135#endif /* DEBUG */
1136	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1137
1138#ifndef sun4v
1139	sethi	%hi(ktsb_phys), %o4
1140	mov	ASI_N, %o3
1141	ld	[%o4 + %lo(ktsb_phys)], %o4
1142	movrnz	%o4, ASI_MEM, %o3
1143	mov	%o3, %asi
1144#endif
1145	mov	%o0, %g1			! %g1 = vaddr
1146
1147	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1148	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1149	/* %g2 = tsbep, %g1 clobbered */
1150
1151	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1152	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1153	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1154
1155	wrpr	%g0, %o5, %pstate		! enable interrupts
1156	retl
1157	  membar #StoreStore|#StoreLoad
1158	SET_SIZE(sfmmu_kpm_load_tsb)
1159
1160	/*
1161	 * Routine that shoots down a TTE in the kpm TSB or in the
1162	 * kernel TSB depending on virtpg. Locking is required since
1163	 * kpm/kernel TSB is shared among all CPUs.
1164	 */
1165	ENTRY_NP(sfmmu_kpm_unload_tsb)
1166	/*
1167	 * %o0 = vaddr
1168	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1169	 */
1170#ifndef sun4v
1171	sethi	%hi(ktsb_phys), %o4
1172	mov	ASI_N, %o3
1173	ld	[%o4 + %lo(ktsb_phys)], %o4
1174	movrnz	%o4, ASI_MEM, %o3
1175	mov	%o3, %asi
1176#endif
1177	mov	%o0, %g1			! %g1 = vaddr
1178
1179	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1180	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1181	/* %g2 = tsbep, %g1 clobbered */
1182
1183	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1184	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1185	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1186
1187	retl
1188	  membar	#StoreStore|#StoreLoad
1189	SET_SIZE(sfmmu_kpm_unload_tsb)
1190
1191#endif /* lint */
1192
1193
1194#if defined (lint)
1195
1196/*ARGSUSED*/
1197pfn_t
1198sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1199{ return(0); }
1200
1201#else /* lint */
1202
1203	ENTRY_NP(sfmmu_ttetopfn)
1204	ldx	[%o0], %g1			/* read tte */
1205	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1206	/*
1207	 * g1 = pfn
1208	 */
1209	retl
1210	mov	%g1, %o0
1211	SET_SIZE(sfmmu_ttetopfn)
1212
1213#endif /* !lint */
1214
1215
1216#if defined (lint)
1217/*
1218 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1219 * the hash list.
1220 */
1221/* ARGSUSED */
1222void
1223sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1224	uint64_t hblkpa)
1225{
1226}
1227
1228/*
1229 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1230 * hash list.
1231 */
1232/* ARGSUSED */
1233void
1234sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1235	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1236{
1237}
1238#else /* lint */
1239
1240/*
1241 * Functions to grab/release hme bucket list lock.  I only use a byte
1242 * instead of the whole int because eventually we might want to
1243 * put some counters on the other bytes (of course, these routines would
1244 * have to change).  The code that grab this lock should execute
1245 * with interrupts disabled and hold the lock for the least amount of time
1246 * possible.
1247 */
1248
1249/*
1250 * Even though hmeh_listlock is updated using pa there's no need to flush
1251 * dcache since hmeh_listlock will be restored to the original value (0)
1252 * before interrupts are reenabled.
1253 */
1254
1255/*
1256 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1257 * routines still use virtual addresses to update the bucket fields. But they
1258 * must not cause a TLB miss after grabbing the low level bucket lock. To
1259 * achieve this we must make sure the bucket structure is completely within an
1260 * 8K page.
1261 */
1262
1263#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1264#error - the size of hmehash_bucket structure is not power of 2
1265#endif
1266
1267#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1268	mov     0xff, tmp2                                      ;\
1269	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1270label1:                                                         ;\
1271	casa    [tmp1]asi, %g0, tmp2                            ;\
1272	brnz,pn tmp2, label1                                    ;\
1273	mov     0xff, tmp2                                      ;\
1274	membar  #LoadLoad
1275
1276#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1277	membar  #LoadStore|#StoreStore                          ;\
1278	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1279	sta     %g0, [tmp1]asi
1280
1281	.seg	".data"
1282hblk_add_panic1:
1283	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1284	.byte	0
1285hblk_add_panic2:
1286	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1287	.byte	0
1288	.align	4
1289	.seg	".text"
1290
1291	ENTRY_NP(sfmmu_hblk_hash_add)
1292	/*
1293	 * %o0 = hmebp
1294	 * %o1 = hmeblkp
1295	 * %o2 = hblkpa
1296	 */
1297	rdpr	%pstate, %o5
1298#ifdef DEBUG
1299	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1300	bnz,pt %icc, 3f				/* disabled, panic	 */
1301	  nop
1302	save	%sp, -SA(MINFRAME), %sp
1303	sethi	%hi(hblk_add_panic1), %o0
1304	call	panic
1305	 or	%o0, %lo(hblk_add_panic1), %o0
1306	ret
1307	restore
1308
13093:
1310#endif /* DEBUG */
1311	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1312	mov	%o2, %g1
1313
1314	/*
1315	 * g1 = hblkpa
1316	 */
1317	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1318	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1319#ifdef	DEBUG
1320	cmp	%o4, %g0
1321	bne,pt %xcc, 1f
1322	 nop
1323	brz,pt %g2, 1f
1324	 nop
1325	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1326	save	%sp, -SA(MINFRAME), %sp
1327	sethi	%hi(hblk_add_panic2), %o0
1328	call	panic
1329	  or	%o0, %lo(hblk_add_panic2), %o0
1330	ret
1331	restore
13321:
1333#endif /* DEBUG */
1334	/*
1335	 * We update hmeblks entries before grabbing lock because the stores
1336	 * could take a tlb miss and require the hash lock.  The buckets
1337	 * are part of the nucleus so we are cool with those stores.
1338	 *
1339	 * if buckets are not part of the nucleus our game is to
1340	 * not touch any other page via va until we drop the lock.
1341	 * This guarantees we won't get a tlb miss before the lock release
1342	 * since interrupts are disabled.
1343	 */
1344	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1345	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1346	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1347	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1348	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1349	HMELOCK_EXIT(%o0, %g2, ASI_N)
1350	retl
1351	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1352	SET_SIZE(sfmmu_hblk_hash_add)
1353
1354	ENTRY_NP(sfmmu_hblk_hash_rm)
1355	/*
1356	 * This function removes an hmeblk from the hash chain.
1357	 * It is written to guarantee we don't take a tlb miss
1358	 * by using physical addresses to update the list.
1359	 *
1360	 * %o0 = hmebp
1361	 * %o1 = hmeblkp
1362	 * %o2 = hmeblkp previous pa
1363	 * %o3 = hmeblkp previous
1364	 */
1365
1366	mov	%o3, %o4			/* o4 = hmeblkp previous */
1367
1368	rdpr	%pstate, %o5
1369#ifdef DEBUG
1370	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1371	bnz,pt 	%icc, 3f			/* disabled, panic	 */
1372	  nop
1373
1374	sethi	%hi(panicstr), %g1
1375	ldx	[%g1 + %lo(panicstr)], %g1
1376	tst	%g1
1377	bnz,pt	%icc, 3f
1378	  nop
1379
1380	sethi	%hi(sfmmu_panic1), %o0
1381	call	panic
1382	 or	%o0, %lo(sfmmu_panic1), %o0
13833:
1384#endif /* DEBUG */
1385	/*
1386	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1387	 */
1388	andn    %o5, PSTATE_IE, %g1
1389	wrpr    %g1, 0, %pstate
1390
1391#ifndef sun4v
1392	sethi   %hi(dcache_line_mask), %g4
1393	ld      [%g4 + %lo(dcache_line_mask)], %g4
1394#endif /* sun4v */
1395
1396	/*
1397	 * if buckets are not part of the nucleus our game is to
1398	 * not touch any other page via va until we drop the lock.
1399	 * This guarantees we won't get a tlb miss before the lock release
1400	 * since interrupts are disabled.
1401	 */
1402	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1403	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1404	cmp	%g2, %o1
1405	bne,pt	%ncc,1f
1406	 mov	ASI_MEM, %asi
1407	/*
1408	 * hmeblk is first on list
1409	 */
1410	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1411	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1412	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1413	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1414	ba,pt	%xcc, 2f
1415	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
14161:
1417	/* hmeblk is not first on list */
1418
1419	mov	%o2, %g3
1420#ifndef sun4v
1421	GET_CPU_IMPL(%g2)
1422	cmp 	%g2, CHEETAH_IMPL
1423	bge,a,pt %icc, hblk_hash_rm_1
1424	  and	%o4, %g4, %g2
1425	cmp	%g2, SPITFIRE_IMPL
1426	blt	%icc, hblk_hash_rm_2		/* no flushing needed for OPL */
1427	  and	%o4, %g4, %g2
1428	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1429	add	%o4, HMEBLK_NEXT, %o4
1430	and	%o4, %g4, %g2
1431	ba	hblk_hash_rm_2
1432	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1433hblk_hash_rm_1:
1434
1435	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1436	membar	#Sync
1437	add     %g3, HMEBLK_NEXT, %g2
1438	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1439hblk_hash_rm_2:
1440	membar	#Sync
1441#endif /* sun4v */
1442	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1443	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1444	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1445	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1446	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
14472:
1448	HMELOCK_EXIT(%o0, %g2, ASI_N)
1449	retl
1450	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1451	SET_SIZE(sfmmu_hblk_hash_rm)
1452
1453#endif /* lint */
1454
1455/*
1456 * These macros are used to update global sfmmu hme hash statistics
1457 * in perf critical paths. It is only enabled in debug kernels or
1458 * if SFMMU_STAT_GATHER is defined
1459 */
1460#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1461#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1462	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1463	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1464	cmp	tmp1, hatid						;\
1465	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1466	set	sfmmu_global_stat, tmp1					;\
1467	add	tmp1, tmp2, tmp1					;\
1468	ld	[tmp1], tmp2						;\
1469	inc	tmp2							;\
1470	st	tmp2, [tmp1]
1471
1472#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1473	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1474	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1475	cmp	tmp1, hatid						;\
1476	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1477	set	sfmmu_global_stat, tmp1					;\
1478	add	tmp1, tmp2, tmp1					;\
1479	ld	[tmp1], tmp2						;\
1480	inc	tmp2							;\
1481	st	tmp2, [tmp1]
1482
1483
1484#else /* DEBUG || SFMMU_STAT_GATHER */
1485
1486#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1487
1488#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1489
1490#endif  /* DEBUG || SFMMU_STAT_GATHER */
1491
1492/*
1493 * This macro is used to update global sfmmu kstas in non
1494 * perf critical areas so they are enabled all the time
1495 */
1496#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1497	sethi	%hi(sfmmu_global_stat), tmp1				;\
1498	add	tmp1, statname, tmp1					;\
1499	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1500	inc	tmp2							;\
1501	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1502
1503/*
1504 * These macros are used to update per cpu stats in non perf
1505 * critical areas so they are enabled all the time
1506 */
1507#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1508	ld	[tsbarea + stat], tmp1					;\
1509	inc	tmp1							;\
1510	st	tmp1, [tsbarea + stat]
1511
1512/*
1513 * These macros are used to update per cpu stats in non perf
1514 * critical areas so they are enabled all the time
1515 */
1516#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1517	lduh	[tsbarea + stat], tmp1					;\
1518	inc	tmp1							;\
1519	stuh	tmp1, [tsbarea + stat]
1520
1521#if defined(KPM_TLBMISS_STATS_GATHER)
1522	/*
1523	 * Count kpm dtlb misses separately to allow a different
1524	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1525	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1526	 */
1527#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1528	brgez	tagacc, label	/* KPM VA? */				;\
1529	nop								;\
1530	CPU_INDEX(tmp1, tsbma)						;\
1531	sethi	%hi(kpmtsbm_area), tsbma				;\
1532	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1533	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1534	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1535	/* VA range check */						;\
1536	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1537	cmp	tagacc, val						;\
1538	blu,pn	%xcc, label						;\
1539	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1540	cmp	tagacc, tmp1						;\
1541	bgeu,pn	%xcc, label						;\
1542	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1543	inc	val							;\
1544	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1545label:
1546#else
1547#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1548#endif	/* KPM_TLBMISS_STATS_GATHER */
1549
1550#if defined (lint)
1551/*
1552 * The following routines are jumped to from the mmu trap handlers to do
1553 * the setting up to call systrap.  They are separate routines instead of
1554 * being part of the handlers because the handlers would exceed 32
1555 * instructions and since this is part of the slow path the jump
1556 * cost is irrelevant.
1557 */
1558void
1559sfmmu_pagefault(void)
1560{
1561}
1562
1563void
1564sfmmu_mmu_trap(void)
1565{
1566}
1567
1568void
1569sfmmu_window_trap(void)
1570{
1571}
1572
1573void
1574sfmmu_kpm_exception(void)
1575{
1576}
1577
1578#else /* lint */
1579
1580#ifdef	PTL1_PANIC_DEBUG
1581	.seg	".data"
1582	.global	test_ptl1_panic
1583test_ptl1_panic:
1584	.word	0
1585	.align	8
1586
1587	.seg	".text"
1588	.align	4
1589#endif	/* PTL1_PANIC_DEBUG */
1590
1591
1592	ENTRY_NP(sfmmu_pagefault)
1593	SET_GL_REG(1)
1594	USE_ALTERNATE_GLOBALS(%g5)
1595	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1596	rdpr	%tt, %g6
1597	cmp	%g6, FAST_IMMU_MISS_TT
1598	be,a,pn	%icc, 1f
1599	  mov	T_INSTR_MMU_MISS, %g3
1600	cmp	%g6, T_INSTR_MMU_MISS
1601	be,a,pn	%icc, 1f
1602	  mov	T_INSTR_MMU_MISS, %g3
1603	mov	%g5, %g2
1604	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1605	cmp	%g6, FAST_DMMU_MISS_TT
1606	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1607	cmp	%g6, T_DATA_MMU_MISS
1608	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1609
1610#ifdef  PTL1_PANIC_DEBUG
1611	/* check if we want to test the tl1 panic */
1612	sethi	%hi(test_ptl1_panic), %g4
1613	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1614	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1615	cmp	%g1, %g0
1616	bne,a,pn %icc, ptl1_panic
1617	  or	%g0, PTL1_BAD_DEBUG, %g1
1618#endif	/* PTL1_PANIC_DEBUG */
16191:
1620	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1621	/*
1622	 * g2 = tag access reg
1623	 * g3.l = type
1624	 * g3.h = 0
1625	 */
1626	sethi	%hi(trap), %g1
1627	or	%g1, %lo(trap), %g1
16282:
1629	ba,pt	%xcc, sys_trap
1630	  mov	-1, %g4
1631	SET_SIZE(sfmmu_pagefault)
1632
1633	ENTRY_NP(sfmmu_mmu_trap)
1634	SET_GL_REG(1)
1635	USE_ALTERNATE_GLOBALS(%g5)
1636	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1637	rdpr	%tt, %g6
1638	cmp	%g6, FAST_IMMU_MISS_TT
1639	be,a,pn	%icc, 1f
1640	  mov	T_INSTR_MMU_MISS, %g3
1641	cmp	%g6, T_INSTR_MMU_MISS
1642	be,a,pn	%icc, 1f
1643	  mov	T_INSTR_MMU_MISS, %g3
1644	mov	%g5, %g2
1645	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1646	cmp	%g6, FAST_DMMU_MISS_TT
1647	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1648	cmp	%g6, T_DATA_MMU_MISS
1649	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
16501:
1651	/*
1652	 * g2 = tag access reg
1653	 * g3 = type
1654	 */
1655	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1656	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1657	ba,pt	%xcc, sys_trap
1658	  mov	-1, %g4
1659	/*NOTREACHED*/
1660	SET_SIZE(sfmmu_mmu_trap)
1661
1662	ENTRY_NP(sfmmu_suspend_tl)
1663	SET_GL_REG(1)
1664	USE_ALTERNATE_GLOBALS(%g5)
1665	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1666	rdpr	%tt, %g6
1667	cmp	%g6, FAST_IMMU_MISS_TT
1668	be,a,pn	%icc, 1f
1669	  mov	T_INSTR_MMU_MISS, %g3
1670	mov	%g5, %g2
1671	cmp	%g6, FAST_DMMU_MISS_TT
1672	move	%icc, T_DATA_MMU_MISS, %g3
1673	movne	%icc, T_DATA_PROT, %g3
16741:
1675	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1676	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1677	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1678	ba,pt	%xcc, sys_trap
1679	  mov	PIL_15, %g4
1680	/*NOTREACHED*/
1681	SET_SIZE(sfmmu_suspend_tl)
1682
1683	/*
1684	 * No %g registers in use at this point.
1685	 */
1686	ENTRY_NP(sfmmu_window_trap)
1687	rdpr	%tpc, %g1
1688#ifdef sun4v
1689#ifdef DEBUG
1690	/* We assume previous %gl was 1 */
1691	rdpr	%tstate, %g4
1692	srlx	%g4, TSTATE_GL_SHIFT, %g4
1693	and	%g4, TSTATE_GL_MASK, %g4
1694	cmp	%g4, 1
1695	bne,a,pn %icc, ptl1_panic
1696	  mov	PTL1_BAD_WTRAP, %g1
1697#endif /* DEBUG */
1698	/* user miss at tl>1. better be the window handler or user_rtt */
1699	/* in user_rtt? */
1700	set	rtt_fill_start, %g4
1701	cmp	%g1, %g4
1702	blu,pn %xcc, 6f
1703	 .empty
1704	set	rtt_fill_end, %g4
1705	cmp	%g1, %g4
1706	bgeu,pn %xcc, 6f
1707	 nop
1708	set	fault_rtt_fn1, %g1
1709	wrpr	%g0, %g1, %tnpc
1710	ba,a	7f
17116:
1712	! must save this trap level before descending trap stack
1713	! no need to save %tnpc, either overwritten or discarded
1714	! already got it: rdpr	%tpc, %g1
1715	rdpr	%tstate, %g6
1716	rdpr	%tt, %g7
1717	! trap level saved, go get underlying trap type
1718	rdpr	%tl, %g5
1719	sub	%g5, 1, %g3
1720	wrpr	%g3, %tl
1721	rdpr	%tt, %g2
1722	wrpr	%g5, %tl
1723	! restore saved trap level
1724	wrpr	%g1, %tpc
1725	wrpr	%g6, %tstate
1726	wrpr	%g7, %tt
1727#else /* sun4v */
1728	/* user miss at tl>1. better be the window handler */
1729	rdpr	%tl, %g5
1730	sub	%g5, 1, %g3
1731	wrpr	%g3, %tl
1732	rdpr	%tt, %g2
1733	wrpr	%g5, %tl
1734#endif /* sun4v */
1735	and	%g2, WTRAP_TTMASK, %g4
1736	cmp	%g4, WTRAP_TYPE
1737	bne,pn	%xcc, 1f
1738	 nop
1739	/* tpc should be in the trap table */
1740	set	trap_table, %g4
1741	cmp	%g1, %g4
1742	blt,pn %xcc, 1f
1743	 .empty
1744	set	etrap_table, %g4
1745	cmp	%g1, %g4
1746	bge,pn %xcc, 1f
1747	 .empty
1748	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1749	add	%g1, WTRAP_FAULTOFF, %g1
1750	wrpr	%g0, %g1, %tnpc
17517:
1752	/*
1753	 * some wbuf handlers will call systrap to resolve the fault
1754	 * we pass the trap type so they figure out the correct parameters.
1755	 * g5 = trap type, g6 = tag access reg
1756	 */
1757
1758	/*
1759	 * only use g5, g6, g7 registers after we have switched to alternate
1760	 * globals.
1761	 */
1762	SET_GL_REG(1)
1763	USE_ALTERNATE_GLOBALS(%g5)
1764	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1765	rdpr	%tt, %g7
1766	cmp	%g7, FAST_IMMU_MISS_TT
1767	be,a,pn	%icc, ptl1_panic
1768	  mov	PTL1_BAD_WTRAP, %g1
1769	cmp	%g7, T_INSTR_MMU_MISS
1770	be,a,pn	%icc, ptl1_panic
1771	  mov	PTL1_BAD_WTRAP, %g1
1772	mov	T_DATA_PROT, %g5
1773	cmp	%g7, FAST_DMMU_MISS_TT
1774	move	%icc, T_DATA_MMU_MISS, %g5
1775	cmp	%g7, T_DATA_MMU_MISS
1776	move	%icc, T_DATA_MMU_MISS, %g5
1777	! XXXQ AGS re-check out this one
1778	done
17791:
1780	CPU_ADDR(%g1, %g4)
1781	ld	[%g1 + CPU_TL1_HDLR], %g4
1782	brnz,a,pt %g4, sfmmu_mmu_trap
1783	  st	%g0, [%g1 + CPU_TL1_HDLR]
1784	ba,pt	%icc, ptl1_panic
1785	  mov	PTL1_BAD_TRAP, %g1
1786	SET_SIZE(sfmmu_window_trap)
1787
1788	ENTRY_NP(sfmmu_kpm_exception)
1789	/*
1790	 * We have accessed an unmapped segkpm address or a legal segkpm
1791	 * address which is involved in a VAC alias conflict prevention.
1792	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1793	 * set. If it is, we will instead note that a fault has occurred
1794	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1795	 * a "retry"). This will step over the faulting instruction.
1796	 * Note that this means that a legal segkpm address involved in
1797	 * a VAC alias conflict prevention (a rare case to begin with)
1798	 * cannot be used in DTrace.
1799	 */
1800	CPU_INDEX(%g1, %g2)
1801	set	cpu_core, %g2
1802	sllx	%g1, CPU_CORE_SHIFT, %g1
1803	add	%g1, %g2, %g1
1804	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1805	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1806	bz	0f
1807	or	%g2, CPU_DTRACE_BADADDR, %g2
1808	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1809	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1810	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1811	done
18120:
1813	TSTAT_CHECK_TL1(1f, %g1, %g2)
18141:
1815	SET_GL_REG(1)
1816	USE_ALTERNATE_GLOBALS(%g5)
1817	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1818	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1819	/*
1820	 * g2=tagacc g3.l=type g3.h=0
1821	 */
1822	sethi	%hi(trap), %g1
1823	or	%g1, %lo(trap), %g1
1824	ba,pt	%xcc, sys_trap
1825	mov	-1, %g4
1826	SET_SIZE(sfmmu_kpm_exception)
1827
1828#endif /* lint */
1829
1830#if defined (lint)
1831
1832void
1833sfmmu_tsb_miss(void)
1834{
1835}
1836
1837void
1838sfmmu_kpm_dtsb_miss(void)
1839{
1840}
1841
1842void
1843sfmmu_kpm_dtsb_miss_small(void)
1844{
1845}
1846
1847#else /* lint */
1848
1849
1850#if (CTX_SIZE != (1 << CTX_SZ_SHIFT))
1851#error - size of context struct does not match with CTX_SZ_SHIFT
1852#endif
1853
1854#if (IMAP_SEG != 0)
1855#error - ism_map->ism_seg offset is not zero
1856#endif
1857
1858/*
1859 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1860 * tlb miss and branches to label "ismhit". If this is not an ISM
1861 * process or an ISM tlb miss it falls thru.
1862 *
1863 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1864 * this process.
1865 * If so, it will branch to label "ismhit".  If not, it will fall through.
1866 *
1867 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1868 * so that any other threads of this process will not try and walk the ism
1869 * maps while they are being changed.
1870 *
1871 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
1872 *       will make sure of that. This means we can terminate our search on
1873 *       the first zero mapping we find.
1874 *
1875 * Parameters:
1876 * tagacc	= tag access register (vaddr + ctx) (in)
1877 * tsbmiss	= address of tsb miss area (in)
1878 * ismseg	= contents of ism_seg for this ism map (out)
1879 * ismhat	= physical address of imap_ismhat for this ism map (out)
1880 * tmp1		= scratch reg (CLOBBERED)
1881 * tmp2		= scratch reg (CLOBBERED)
1882 * tmp3		= scratch reg (CLOBBERED)
1883 * label:    temporary labels
1884 * ismhit:   label where to jump to if an ism dtlb miss
1885 * exitlabel:label where to jump if hat is busy due to hat_unshare.
1886 */
1887#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
1888	label, ismhit)							\
1889	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
1890	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
1891	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
1892label/**/1:								;\
1893	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
1894	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
1895label/**/2:								;\
1896	brz,pt  ismseg, label/**/3		/* no mapping */	;\
1897	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
1898	lduha	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
1899	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
1900	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
1901	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
1902	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
1903	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
1904	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
1905	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
1906	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
1907	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
1908									;\
1909	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
1910	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
1911	cmp	ismhat, tmp1						;\
1912	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
1913	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
1914									;\
1915	add	tmp3, IBLK_NEXTPA, tmp1					;\
1916	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
1917	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
1918	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
1919label/**/3:
1920
1921/*
1922 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
1923 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
1924 * Parameters:
1925 * vaddr = reg containing virtual address
1926 * hatid = reg containing sfmmu pointer
1927 * hmeshift = constant/register to shift vaddr to obtain vapg
1928 * hmebp = register where bucket pointer will be stored
1929 * vapg = register where virtual page will be stored
1930 * tmp1, tmp2 = tmp registers
1931 */
1932
1933
1934#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
1935	vapg, label, tmp1, tmp2)					\
1936	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
1937	brnz,a,pt tmp1, label/**/1					;\
1938	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
1939	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
1940	ba,pt	%xcc, label/**/2					;\
1941	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
1942label/**/1:								;\
1943	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
1944label/**/2:								;\
1945	srlx	tagacc, hmeshift, vapg					;\
1946	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
1947	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
1948	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
1949	add	hmebp, tmp1, hmebp
1950
1951/*
1952 * hashtag includes bspage + hashno (64 bits).
1953 */
1954
1955#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
1956	sllx	vapg, hmeshift, vapg					;\
1957	or	vapg, hashno, hblktag
1958
1959/*
1960 * Function to traverse hmeblk hash link list and find corresponding match.
1961 * The search is done using physical pointers. It returns the physical address
1962 * and virtual address pointers to the hmeblk that matches with the tag
1963 * provided.
1964 * Parameters:
1965 * hmebp	= register that points to hme hash bucket, also used as
1966 *		  tmp reg (clobbered)
1967 * hmeblktag	= register with hmeblk tag match
1968 * hatid	= register with hatid
1969 * hmeblkpa	= register where physical ptr will be stored
1970 * hmeblkva	= register where virtual ptr will be stored
1971 * tmp1		= tmp reg
1972 * label: temporary label
1973 */
1974
1975#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
1976	tsbarea, tmp1, label)					 	\
1977	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
1978	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
1979	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
1980	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
1981	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1982label/**/1:								;\
1983	brz,pn	hmeblkva, label/**/2					;\
1984	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1985	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
1986	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
1987	add	hmebp, CLONGSIZE, hmebp					;\
1988	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
1989	xor	tmp1, hmeblktag, tmp1					;\
1990	xor	hmebp, hatid, hmebp					;\
1991	or	hmebp, tmp1, hmebp					;\
1992	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
1993	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
1994	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
1995	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
1996	ba,pt	%xcc, label/**/1					;\
1997	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
1998label/**/2:
1999
2000
2001#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2002#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2003#endif
2004
2005/*
2006 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2007 * he offset for the corresponding hment.
2008 * Parameters:
2009 * vaddr = register with virtual address
2010 * hmeblkpa = physical pointer to hme_blk
2011 * hment = register where address of hment will be stored
2012 * hmentoff = register where hment offset will be stored
2013 * label1 = temporary label
2014 */
2015#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, tmp1, label1)	\
2016	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2017	lda	[hmentoff]ASI_MEM, tmp1 				;\
2018	andcc	tmp1, HBLK_SZMASK, %g0	 /* tmp1 = get_hblk_sz(%g5) */	;\
2019	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2020	  or	%g0, HMEBLK_HME1, hmentoff				;\
2021	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2022	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2023	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2024	add	tmp1, HMEBLK_HME1, hmentoff				;\
2025label1:
2026
2027/*
2028 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2029 *
2030 * tagacc	= tag access register (vaddr + ctx) (in)
2031 * hatid	= sfmmu pointer for TSB miss (in)
2032 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2033 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2034 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
2035 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2036 * hmentoff	= temporarily stores hment offset (clobbered)
2037 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2038 *		  for this page size.
2039 * hashno	= constant/register hash number
2040 * label	= temporary label for branching within macro.
2041 * foundlabel	= label to jump to when tte is found.
2042 * suspendlabel= label to jump to when tte is suspended.
2043 * exitlabel	= label to jump to when tte is not found.  The hmebp lock
2044 *		  is still held at this time.
2045 *
2046 * The caller should set up the tsbmiss->scratch[2] field correctly before
2047 * calling this funciton  (aka TSBMISS_SCRATCH + TSBMISS_HATID)
2048 */
2049#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmentoff, \
2050		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
2051									;\
2052	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2053	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2054	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2055		hmeblkpa, label/**/5, hmentoff, hmeblkva)		;\
2056									;\
2057	/*								;\
2058	 * tagacc = tagacc						;\
2059	 * hatid = hatid						;\
2060	 * tsbarea = tsbarea						;\
2061	 * tte   = hmebp (hme bucket pointer)				;\
2062	 * hmeblkpa  = vapg  (virtual page)				;\
2063	 * hmentoff, hmeblkva = scratch					;\
2064	 */								;\
2065	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmentoff)	;\
2066									;\
2067	/*								;\
2068	 * tagacc = tagacc						;\
2069	 * hatid = hatid						;\
2070	 * tte   = hmebp						;\
2071	 * hmeblkpa  = CLOBBERED					;\
2072	 * hmentoff  = htag_bspage & hashno				;\
2073	 * hmeblkva  = scratch						;\
2074	 */								;\
2075	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2076	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2077	HMEHASH_SEARCH(tte, hmentoff, hatid, hmeblkpa, hmeblkva, 	\
2078		tsbarea, tagacc, label/**/1)				;\
2079	/*								;\
2080	 * tagacc = CLOBBERED						;\
2081	 * tte = CLOBBERED						;\
2082	 * hmeblkpa = hmeblkpa						;\
2083	 * hmeblkva = hmeblkva						;\
2084	 */								;\
2085	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2086	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2087	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2088	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2089	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2090	  nop								;\
2091label/**/4:								;\
2092	/*								;\
2093	 * We have found the hmeblk containing the hment.		;\
2094	 * Now we calculate the corresponding tte.			;\
2095	 *								;\
2096	 * tagacc = tagacc						;\
2097	 * hatid = clobbered						;\
2098	 * tte   = hmebp						;\
2099	 * hmeblkpa  = hmeblkpa						;\
2100	 * hmentoff  = hblktag						;\
2101	 * hmeblkva  = hmeblkva 					;\
2102	 */								;\
2103	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hmentoff, hatid, label/**/2)	;\
2104									;\
2105	add	hmentoff, SFHME_TTE, hmentoff				;\
2106	add	hmeblkpa, hmentoff, hmeblkpa				;\
2107	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2108	add	hmeblkva, hmentoff, hmeblkva				;\
2109	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2110	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmentoff ;\
2111	HMELOCK_EXIT(hmentoff, hmentoff, ASI_MEM)	/* drop lock */	;\
2112	set	TTE_SUSPEND, hmentoff					;\
2113	TTE_SUSPEND_INT_SHIFT(hmentoff)					;\
2114	btst	tte, hmentoff						;\
2115	bz,pt	%xcc, foundlabel					;\
2116	  nop								;\
2117									;\
2118	/*								;\
2119	 * Mapping is suspended, so goto suspend label.			;\
2120	 */								;\
2121	ba,pt	%xcc, suspendlabel					;\
2122	  nop
2123
2124	/*
2125	 * KERNEL PROTECTION HANDLER
2126	 *
2127	 * g1 = tsb8k pointer register (clobbered)
2128	 * g2 = tag access register (ro)
2129	 * g3 - g7 = scratch registers
2130	 *
2131	 * Note: This function is patched at runtime for performance reasons.
2132	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2133	 */
2134	ENTRY_NP(sfmmu_kprot_trap)
2135	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2136sfmmu_kprot_patch_ktsb_base:
2137	RUNTIME_PATCH_SETX(%g1, %g6)
2138	/* %g1 = contents of ktsb_base or ktsb_pbase */
2139sfmmu_kprot_patch_ktsb_szcode:
2140	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2141
2142	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2143	! %g1 = First TSB entry pointer, as TSB miss handler expects
2144
2145	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2146sfmmu_kprot_patch_ktsb4m_base:
2147	RUNTIME_PATCH_SETX(%g3, %g6)
2148	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2149sfmmu_kprot_patch_ktsb4m_szcode:
2150	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2151
2152	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2153	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2154
2155	CPU_TSBMISS_AREA(%g6, %g7)
2156	HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2157	ba,pt	%xcc, sfmmu_tsb_miss_tt
2158	  nop
2159
2160	/*
2161	 * USER PROTECTION HANDLER
2162	 *
2163	 * g1 = tsb8k pointer register (ro)
2164	 * g2 = tag access register (ro)
2165	 * g3 = faulting context (clobbered, currently not used)
2166	 * g4 - g7 = scratch registers
2167	 */
2168	ALTENTRY(sfmmu_uprot_trap)
2169#ifdef sun4v
2170	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2171	/* %g1 = first TSB entry ptr now, %g2 preserved */
2172
2173	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2174	brlz,pt %g3, 9f			/* check for 2nd TSB */
2175	  mov	%g0, %g3		/* clear second tsbe ptr */
2176
2177	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2178	/* %g3 = second TSB entry ptr now, %g2 preserved */
2179
2180#else /* sun4v */
2181#ifdef UTSB_PHYS
2182	/* g1 = first TSB entry ptr */
2183	GET_2ND_TSBREG(%g3)
2184	brlz,a,pt %g3, 9f		/* check for 2nd TSB */
2185	  mov	%g0, %g3		/* clear second tsbe ptr */
2186
2187	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2188	/* %g3 = second TSB entry ptr now, %g2 preserved */
2189#else /* UTSB_PHYS */
2190	brgez,pt %g1, 9f		/* check for 2nd TSB */
2191	  mov	%g0, %g3		/* clear second tsbe ptr */
2192
2193	mov	%g2, %g7
2194	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2195	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2196	mov	%g1, %g7
2197	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2198#endif /* UTSB_PHYS */
2199#endif /* sun4v */
22009:
2201	CPU_TSBMISS_AREA(%g6, %g7)
2202	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2203	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2204	  nop
2205
2206	/*
2207	 * Kernel 8K page iTLB miss.  We also get here if we took a
2208	 * fast instruction access mmu miss trap while running in
2209	 * invalid context.
2210	 *
2211	 * %g1 = 8K TSB pointer register (not used, clobbered)
2212	 * %g2 = tag access register (used)
2213	 * %g3 = faulting context id (used)
2214	 * %g7 = 4M virtual page number for tag matching  (used)
2215	 */
2216	.align	64
2217	ALTENTRY(sfmmu_kitlb_miss)
2218	brnz,pn %g3, tsb_tl0_noctxt
2219	  nop
2220
2221	/* kernel miss */
2222	/* get kernel tsb pointer */
2223	/* we patch the next set of instructions at run time */
2224	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2225iktsbbase:
2226	RUNTIME_PATCH_SETX(%g4, %g5)
2227	/* %g4 = contents of ktsb_base or ktsb_pbase */
2228
2229iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2230	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2231	or	%g4, %g1, %g1			! form tsb ptr
2232	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2233	cmp	%g4, %g7
2234	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2235	  andcc %g5, TTE_EXECPRM_INT, %g0	! check exec bit
2236	bz,pn	%icc, exec_fault
2237	  nop
2238	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2239	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2240	retry
2241
2242	/*
2243	 * Kernel dTLB miss.  We also get here if we took a fast data
2244	 * access mmu miss trap while running in invalid context.
2245	 *
2246	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2247	 *	We select the TSB miss handler to branch to depending on
2248	 *	the virtual address of the access.  In the future it may
2249	 *	be desirable to separate kpm TTEs into their own TSB,
2250	 *	in which case all that needs to be done is to set
2251	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2252	 *	early in the miss if we detect a kpm VA to a new handler.
2253	 *
2254	 * %g1 = 8K TSB pointer register (not used, clobbered)
2255	 * %g2 = tag access register (used)
2256	 * %g3 = faulting context id (used)
2257	 */
2258	.align	64
2259	ALTENTRY(sfmmu_kdtlb_miss)
2260	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2261	  nop
2262
2263	/* Gather some stats for kpm misses in the TLB. */
2264	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2265	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2266
2267	/*
2268	 * Get first TSB offset and look for 8K/64K/512K mapping
2269	 * using the 8K virtual page as the index.
2270	 *
2271	 * We patch the next set of instructions at run time;
2272	 * any changes here require sfmmu_patch_ktsb changes too.
2273	 */
2274dktsbbase:
2275	RUNTIME_PATCH_SETX(%g7, %g6)
2276	/* %g7 = contents of ktsb_base or ktsb_pbase */
2277
2278dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2279	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2280
2281	/*
2282	 * At this point %g1 is our index into the TSB.
2283	 * We just masked off enough bits of the VA depending
2284	 * on our TSB size code.
2285	 */
2286	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2287	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2288	cmp	%g6, %g4			! compare tag
2289	bne,pn	%xcc, dktsb4m_kpmcheck_small
2290	  add	%g7, %g1, %g1			/* form tsb ptr */
2291	TT_TRACE(trace_tsbhit)
2292	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2293	/* trapstat expects tte in %g5 */
2294	retry
2295
2296	/*
2297	 * If kpm is using large pages, the following instruction needs
2298	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2299	 * so that we will probe the 4M TSB regardless of the VA.  In
2300	 * the case kpm is using small pages, we know no large kernel
2301	 * mappings are located above 0x80000000.00000000 so we skip the
2302	 * probe as an optimization.
2303	 */
2304dktsb4m_kpmcheck_small:
2305	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2306	  /* delay slot safe, below */
2307
2308	/*
2309	 * Get second TSB offset and look for 4M mapping
2310	 * using 4M virtual page as the TSB index.
2311	 *
2312	 * Here:
2313	 * %g1 = 8K TSB pointer.  Don't squash it.
2314	 * %g2 = tag access register (we still need it)
2315	 */
2316	srlx	%g2, MMU_PAGESHIFT4M, %g3
2317
2318	/*
2319	 * We patch the next set of instructions at run time;
2320	 * any changes here require sfmmu_patch_ktsb changes too.
2321	 */
2322dktsb4mbase:
2323	RUNTIME_PATCH_SETX(%g7, %g6)
2324	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2325dktsb4m:
2326	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2327	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2328
2329	/*
2330	 * At this point %g3 is our index into the TSB.
2331	 * We just masked off enough bits of the VA depending
2332	 * on our TSB size code.
2333	 */
2334	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2335	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2336	cmp	%g6, %g4			! compare tag
2337
2338dktsb4m_tsbmiss:
2339	bne,pn	%xcc, dktsb4m_kpmcheck
2340	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2341	TT_TRACE(trace_tsbhit)
2342	/* we don't check TTE size here since we assume 4M TSB is separate */
2343	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2344	/* trapstat expects tte in %g5 */
2345	retry
2346
2347	/*
2348	 * So, we failed to find a valid TTE to match the faulting
2349	 * address in either TSB.  There are a few cases that could land
2350	 * us here:
2351	 *
2352	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2353	 *    to sfmmu_tsb_miss_tt to handle the miss.
2354	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2355	 *    4M TSB.  Let segkpm handle it.
2356	 *
2357	 * Note that we shouldn't land here in the case of a kpm VA when
2358	 * kpm_smallpages is active -- we handled that case earlier at
2359	 * dktsb4m_kpmcheck_small.
2360	 *
2361	 * At this point:
2362	 *  g1 = 8K-indexed primary TSB pointer
2363	 *  g2 = tag access register
2364	 *  g3 = 4M-indexed secondary TSB pointer
2365	 */
2366dktsb4m_kpmcheck:
2367	cmp	%g2, %g0
2368	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2369	  nop
2370	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2371	  nop
2372
2373#ifdef sun4v
2374	/*
2375	 * User instruction miss w/ single TSB.
2376	 * The first probe covers 8K, 64K, and 512K page sizes,
2377	 * because 64K and 512K mappings are replicated off 8K
2378	 * pointer.
2379	 *
2380	 * g1 = tsb8k pointer register
2381	 * g2 = tag access register
2382	 * g3 - g6 = scratch registers
2383	 * g7 = TSB tag to match
2384	 */
2385	.align	64
2386	ALTENTRY(sfmmu_uitlb_fastpath)
2387
2388	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2389	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2390	ba,pn	%xcc, sfmmu_tsb_miss_tt
2391	  mov	%g0, %g3
2392
2393	/*
2394	 * User data miss w/ single TSB.
2395	 * The first probe covers 8K, 64K, and 512K page sizes,
2396	 * because 64K and 512K mappings are replicated off 8K
2397	 * pointer.
2398	 *
2399	 * g1 = tsb8k pointer register
2400	 * g2 = tag access register
2401	 * g3 - g6 = scratch registers
2402	 * g7 = TSB tag to match
2403	 */
2404	.align 64
2405	ALTENTRY(sfmmu_udtlb_fastpath)
2406
2407	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2408	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2409	ba,pn	%xcc, sfmmu_tsb_miss_tt
2410	  mov	%g0, %g3
2411
2412	/*
2413	 * User instruction miss w/ multiple TSBs (sun4v).
2414	 * The first probe covers 8K, 64K, and 512K page sizes,
2415	 * because 64K and 512K mappings are replicated off 8K
2416	 * pointer.  Second probe covers 4M page size only.
2417	 *
2418	 * Just like sfmmu_udtlb_slowpath, except:
2419	 *   o Uses ASI_ITLB_IN
2420	 *   o checks for execute permission
2421	 *   o No ISM prediction.
2422	 *
2423	 * g1 = tsb8k pointer register
2424	 * g2 = tag access register
2425	 * g3 - g6 = scratch registers
2426	 * g7 = TSB tag to match
2427	 */
2428	.align	64
2429	ALTENTRY(sfmmu_uitlb_slowpath)
2430
2431	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2432	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2433	/* g4 - g5 = clobbered here */
2434
2435	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2436	/* g1 = first TSB pointer, g3 = second TSB pointer */
2437	srlx	%g2, TAG_VALO_SHIFT, %g7
2438	PROBE_2ND_ITSB(%g3, %g7)
2439	/* NOT REACHED */
2440
2441#else /* sun4v */
2442
2443	/*
2444	 * User instruction miss w/ multiple TSBs (sun4u).
2445	 * The first probe covers 8K, 64K, and 512K page sizes,
2446	 * because 64K and 512K mappings are replicated off 8K
2447	 * pointer.  Second probe covers 4M page size only.
2448	 *
2449	 * Just like sfmmu_udtlb_slowpath, except:
2450	 *   o Uses ASI_ITLB_IN
2451	 *   o checks for execute permission
2452	 *   o No ISM prediction.
2453	 *
2454	 * g1 = tsb8k pointer register
2455	 * g2 = tag access register
2456	 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
2457	 * g4 - g6 = scratch registers
2458	 * g7 = TSB tag to match
2459	 */
2460	.align	64
2461	ALTENTRY(sfmmu_uitlb_slowpath)
2462
2463#ifdef UTSB_PHYS
2464	/*
2465	 * g1 = 1st TSB entry pointer
2466	 * g3 = 2nd TSB base register
2467	 * Need 2nd TSB entry pointer for 2nd probe.
2468	 */
2469	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2470
2471	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2472#else /* UTSB_PHYS */
2473	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2474	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2475	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2476
2477	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2478	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2479	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2480#endif /* UTSB_PHYS */
2481	/* g1 = first TSB pointer, g3 = second TSB pointer */
2482	srlx	%g2, TAG_VALO_SHIFT, %g7
2483	PROBE_2ND_ITSB(%g3, %g7, isynth)
2484	/* NOT REACHED */
2485#endif /* sun4v */
2486
2487	/*
2488	 * User data miss w/ multiple TSBs.
2489	 * The first probe covers 8K, 64K, and 512K page sizes,
2490	 * because 64K and 512K mappings are replicated off 8K
2491	 * pointer.  Second probe covers 4M page size only.
2492	 *
2493	 * We consider probing for 4M pages first if the VA falls
2494	 * in a range that's likely to be ISM.
2495	 *
2496	 * g1 = tsb8k pointer register
2497	 * g2 = tag access register
2498	 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
2499	 * g4 - g6 = scratch registers
2500	 * g7 = TSB tag to match
2501	 */
2502	.align 64
2503	ALTENTRY(sfmmu_udtlb_slowpath)
2504
2505	/*
2506	 * Check for ISM.  If it exists, look for 4M mappings in the second TSB
2507	 * first, then probe for other mappings in the first TSB if that fails.
2508	 */
2509	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2510	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2511	  mov	%g1, %g3
2512
2513udtlb_miss_probefirst:
2514	/*
2515	 * g1 = 8K TSB pointer register
2516	 * g2 = tag access register
2517	 * g3 = (potentially) second TSB entry ptr
2518	 * g6 = ism pred.
2519	 * g7 = vpg_4m
2520	 */
2521#ifdef sun4v
2522	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2523	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2524
2525	/*
2526	 * Here:
2527	 *   g1 = first TSB pointer
2528	 *   g2 = tag access reg
2529	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2530	 */
2531	brgz,pn	%g6, sfmmu_tsb_miss_tt
2532	  nop
2533#else /* sun4v */
2534#ifndef UTSB_PHYS
2535	mov	%g1, %g4
2536	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2537#endif UTSB_PHYS
2538	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2539
2540	/*
2541	 * Here:
2542	 *   g1 = first TSB pointer
2543	 *   g2 = tag access reg
2544	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2545	 */
2546	brgz,pn	%g6, sfmmu_tsb_miss_tt
2547	  nop
2548#ifndef UTSB_PHYS
2549	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2550#endif UTSB_PHYS
2551	/* fall through in 8K->4M probe order */
2552#endif /* sun4v */
2553
2554udtlb_miss_probesecond:
2555	/*
2556	 * Look in the second TSB for the TTE
2557	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2558	 * g2 = tag access reg
2559	 * g3 = 8K TSB pointer register
2560	 * g6 = ism pred.
2561	 * g7 = vpg_4m
2562	 */
2563#ifdef sun4v
2564	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2565	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2566	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2567#else /* sun4v */
2568#ifdef UTSB_PHYS
2569	GET_2ND_TSBREG(%g3)
2570	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2571	/* tagacc (%g2) is okay, no need to reload, %g3 = second tsbe ptr */
2572#else /* UTSB_PHYS */
2573	mov	%g3, %g7
2574	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2575	/* %g2 clobbered, %g3 =second tsbe ptr */
2576	mov	MMU_TAG_ACCESS, %g2
2577	ldxa	[%g2]ASI_DMMU, %g2
2578#endif /* UTSB_PHYS */
2579#endif /* sun4v */
2580
2581	srlx	%g2, TAG_VALO_SHIFT, %g7
2582	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2583	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
2584	brgz,pn	%g6, udtlb_miss_probefirst
2585	  nop
2586
2587	/* fall through to sfmmu_tsb_miss_tt */
2588
2589	ALTENTRY(sfmmu_tsb_miss_tt)
2590	TT_TRACE(trace_tsbmiss)
2591	/*
2592	 * We get here if there is a TSB miss OR a write protect trap.
2593	 *
2594	 * g1 = First TSB entry pointer
2595	 * g2 = tag access register
2596	 * g3 = 4M TSB entry pointer; NULL if no 2nd TSB
2597	 * g4 - g7 = scratch registers
2598	 */
2599
2600	ALTENTRY(sfmmu_tsb_miss)
2601
2602	/*
2603	 * If trapstat is running, we need to shift the %tpc and %tnpc to
2604	 * point to trapstat's TSB miss return code (note that trapstat
2605	 * itself will patch the correct offset to add).
2606	 */
2607	rdpr	%tl, %g7
2608	cmp	%g7, 1
2609	ble,pt	%xcc, 0f
2610	  sethi	%hi(KERNELBASE), %g6
2611	rdpr	%tpc, %g7
2612	or	%g6, %lo(KERNELBASE), %g6
2613	cmp	%g7, %g6
2614	bgeu,pt	%xcc, 0f
2615	/* delay slot safe */
2616
2617	ALTENTRY(tsbmiss_trapstat_patch_point)
2618	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
2619	wrpr	%g7, %tpc
2620	add	%g7, 4, %g7
2621	wrpr	%g7, %tnpc
26220:
2623	CPU_TSBMISS_AREA(%g6, %g7)
2624
2625	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save first tsb pointer */
2626	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save second tsb pointer */
2627
2628	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
2629	brz,a,pn %g3, 1f			/* skip ahead if kernel */
2630	  ldn	[%g6 + TSBMISS_KHATID], %g7
2631	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
2632	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
2633
2634	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
2635
2636	cmp	%g3, INVALID_CONTEXT
2637	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
2638	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
2639
2640	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
2641	/*
2642	 * The miss wasn't in an ISM segment.
2643	 *
2644	 * %g1 %g3, %g4, %g5, %g7 all clobbered
2645	 * %g2 = tag access (vaddr + ctx)
2646	 */
2647
2648	ba,pt	%icc, 2f
2649	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
2650
26511:
2652	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
2653	/*
2654	 * 8K and 64K hash.
2655	 */
26562:
2657
2658	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2659		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
2660		sfmmu_suspend_tl, tsb_512K)
2661	/* NOT REACHED */
2662
2663tsb_512K:
2664	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2665	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2666	brz,pn	%g5, 3f
2667	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2668	and	%g4, HAT_512K_FLAG, %g5
2669
2670	/*
2671	 * Note that there is a small window here where we may have
2672	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
2673	 * flag yet, so we will skip searching the 512k hash list.
2674	 * In this case we will end up in pagefault which will find
2675	 * the mapping and return.  So, in this instance we will end up
2676	 * spending a bit more time resolving this TSB miss, but it can
2677	 * only happen once per process and even then, the chances of that
2678	 * are very small, so it's not worth the extra overhead it would
2679	 * take to close this window.
2680	 */
2681	brz,pn	%g5, tsb_4M
2682	  nop
26833:
2684	/*
2685	 * 512K hash
2686	 */
2687
2688	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2689		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
2690		sfmmu_suspend_tl, tsb_4M)
2691	/* NOT REACHED */
2692
2693tsb_4M:
2694	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2695	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2696	brz,pn	%g5, 4f
2697	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2698	and	%g4, HAT_4M_FLAG, %g5
2699	brz,pn	%g5, tsb_32M
2700	  nop
27014:
2702	/*
2703	 * 4M hash
2704	 */
2705
2706	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2707		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
2708		sfmmu_suspend_tl, tsb_32M)
2709	/* NOT REACHED */
2710
2711tsb_32M:
2712#ifndef sun4v
2713	GET_CPU_IMPL(%g5)
2714	cmp	%g5, OLYMPUS_C_IMPL
2715	be,pn	%xcc, 0f
2716	  nop
2717	cmp	%g5, PANTHER_IMPL
2718	bne,pt	%xcc, tsb_pagefault
2719	  nop
2720#endif
2721
27220:
2723	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2724	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2725#ifdef sun4v
2726        brz,pn	%g5, 6f
2727#else
2728	brz,pn	%g5, tsb_pagefault
2729#endif
2730	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2731	and	%g4, HAT_32M_FLAG, %g5
2732	brz,pn	%g5, tsb_256M
2733	  nop
27345:
2735	/*
2736	 * 32M hash
2737	 */
2738
2739	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2740		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
2741		sfmmu_suspend_tl, tsb_256M)
2742	/* NOT REACHED */
2743
2744tsb_256M:
2745	lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2746	and	%g4, HAT_256M_FLAG, %g5
2747	brz,pn	%g5, tsb_pagefault
2748	  nop
27496:
2750	/*
2751	 * 256M hash
2752	 */
2753
2754	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2755	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
2756	    sfmmu_suspend_tl, tsb_pagefault)
2757	/* NOT REACHED */
2758
2759tsb_checktte:
2760	/*
2761	 * g3 = tte
2762	 * g4 = tte pa
2763	 * g5 = tte va
2764	 * g6 = tsbmiss area
2765	 */
2766	brgez,pn %g3, tsb_pagefault	/* if tte invalid branch */
2767	  nop
2768
2769tsb_validtte:
2770	/*
2771	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
2772	 */
2773	rdpr	%tt, %g7
2774	cmp	%g7, FAST_PROT_TT
2775	bne,pt	%icc, 4f
2776	  nop
2777
2778	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
2779	    tsb_protfault)
2780
2781	rdpr	%tt, %g5
2782	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
2783	ba,pt	%xcc, tsb_update_tl1
2784	  nop
2785
27864:
2787	/*
2788	 * If ITLB miss check exec bit.
2789	 * If not set treat as invalid TTE.
2790	 */
2791	cmp     %g7, T_INSTR_MMU_MISS
2792	be,pn	%icc, 5f
2793	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
2794	cmp     %g7, FAST_IMMU_MISS_TT
2795	bne,pt %icc, 3f
2796	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
27975:
2798	bz,pn %icc, tsb_protfault
2799	  nop
2800
28013:
2802	/*
2803	 * Set reference bit if not already set
2804	 */
2805	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
2806
2807	/*
2808	 * Now, load into TSB/TLB.  At this point:
2809	 * g3 = tte
2810	 * g4 = patte
2811	 * g6 = tsbmiss area
2812	 */
2813	rdpr	%tt, %g5
2814#ifdef sun4v
2815	MMU_FAULT_STATUS_AREA(%g2)
2816	cmp	%g5, T_INSTR_MMU_MISS
2817	be,a,pt	%icc, 9f
2818	  nop
2819	cmp	%g5, FAST_IMMU_MISS_TT
2820	be,a,pt	%icc, 9f
2821	  nop
2822	add	%g2, MMFSA_D_, %g2
28239:
2824	ldx	[%g2 + MMFSA_CTX_], %g7
2825	sllx	%g7, TTARGET_CTX_SHIFT, %g7
2826	ldx	[%g2 + MMFSA_ADDR_], %g2
2827	srlx	%g2, TTARGET_VA_SHIFT, %g2
2828	or	%g2, %g7, %g2
2829#else
2830	cmp	%g5, FAST_IMMU_MISS_TT
2831	be,a,pt	%icc, tsb_update_tl1
2832	  ldxa	[%g0]ASI_IMMU, %g2
2833	ldxa	[%g0]ASI_DMMU, %g2
2834#endif
2835tsb_update_tl1:
2836	srlx	%g2, TTARGET_CTX_SHIFT, %g7
2837	brz,pn	%g7, tsb_kernel
2838#ifdef sun4v
2839	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
2840#else
2841	  srlx	%g3, TTE_SZ_SHFT, %g7
2842#endif
2843
2844tsb_user:
2845#ifdef sun4v
2846	cmp	%g7, TTE4M
2847	bge,pn	%icc, tsb_user4m
2848	  nop
2849#else /* sun4v */
2850	cmp	%g7, TTESZ_VALID | TTE4M
2851	be,pn	%icc, tsb_user4m
2852	  srlx	%g3, TTE_SZ2_SHFT, %g7
2853	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
2854#ifdef ITLB_32M_256M_SUPPORT
2855	bnz,pn	%icc, tsb_user4m
2856	  nop
2857#else /* ITLB_32M_256M_SUPPORT */
2858	bnz,a,pn %icc, tsb_user_pn_synth
2859	 cmp	%g5, FAST_IMMU_MISS_TT
2860#endif /* ITLB_32M_256M_SUPPORT */
2861#endif /* sun4v */
2862
2863tsb_user8k:
2864	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = first TSB ptr
2865
2866#ifndef UTSB_PHYS
2867	mov	ASI_N, %g7	! user TSBs accessed by VA
2868	mov	%g7, %asi
2869#endif /* UTSB_PHYS */
2870
2871	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
2872
2873#ifdef sun4v
2874	cmp	%g5, T_INSTR_MMU_MISS
2875	be,a,pn	%xcc, 9f
2876	  mov	%g3, %g5
2877#endif /* sun4v */
2878	cmp	%g5, FAST_IMMU_MISS_TT
2879	be,pn	%xcc, 9f
2880	  mov	%g3, %g5
2881
2882	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2883	! trapstat wants TTE in %g5
2884	retry
28859:
2886	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2887	! trapstat wants TTE in %g5
2888	retry
2889
2890tsb_user4m:
2891	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
28924:
2893	brz,pn	%g1, 5f	/* Check to see if we have 2nd TSB programmed */
2894	  nop
2895
2896#ifndef UTSB_PHYS
2897	mov	ASI_N, %g7	! user TSBs accessed by VA
2898	mov	%g7, %asi
2899#endif /* UTSB_PHYS */
2900
2901        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
2902
29035:
2904#ifdef sun4v
2905        cmp     %g5, T_INSTR_MMU_MISS
2906        be,a,pn %xcc, 9f
2907          mov   %g3, %g5
2908#endif /* sun4v */
2909        cmp     %g5, FAST_IMMU_MISS_TT
2910        be,pn   %xcc, 9f
2911        mov     %g3, %g5
2912
2913        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2914        ! trapstat wants TTE in %g5
2915        retry
29169:
2917        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2918        ! trapstat wants TTE in %g5
2919        retry
2920
2921#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
2922	/*
2923	 * Panther ITLB synthesis.
2924	 * The Panther 32M and 256M ITLB code simulates these two large page
2925	 * sizes with 4M pages, to provide support for programs, for example
2926	 * Java, that may copy instructions into a 32M or 256M data page and
2927	 * then execute them. The code below generates the 4M pfn bits and
2928	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
2929	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
2930	 * are ignored by the hardware.
2931	 *
2932	 * Now, load into TSB/TLB.  At this point:
2933	 * g2 = tagtarget
2934	 * g3 = tte
2935	 * g4 = patte
2936	 * g5 = tt
2937	 * g6 = tsbmiss area
2938	 */
2939tsb_user_pn_synth:
2940	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
2941	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
2942	bz,pn %icc, 4b				/* if not, been here before */
2943	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
2944	brz,a,pn %g1, 5f			/* no 2nd tsb */
2945	  mov	%g3, %g5
2946
2947	mov	MMU_TAG_ACCESS, %g7
2948	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
2949	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
2950
2951	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
2952	mov	%g7, %asi
2953	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */
29545:
2955        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2956        retry
2957
2958tsb_user_itlb_synth:
2959	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
2960
2961	mov	MMU_TAG_ACCESS, %g7
2962	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
2963	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
2964	brz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
2965	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
2966
2967	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
2968	mov	%g7, %asi
2969	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */
29707:
2971	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
2972        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2973        retry
2974#endif /* sun4v && ITLB_32M_256M_SUPPORT */
2975
2976tsb_kernel:
2977#ifdef sun4v
2978	cmp	%g7, TTE4M
2979	bge,pn	%icc, 5f
2980#else
2981	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
2982	be,pn	%icc, 5f
2983#endif
2984	  nop
2985	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8k tsbptr
2986	ba,pt	%xcc, 6f
2987	  nop
29885:
2989	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4m tsbptr
2990	brz,pn	%g1, 3f		/* skip programming if 4m TSB ptr is NULL */
2991	  nop
29926:
2993#ifndef sun4v
2994tsb_kernel_patch_asi:
2995	or	%g0, RUNTIME_PATCH, %g6
2996	mov	%g6, %asi	! XXX avoid writing to %asi !!
2997#endif
2998	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
29993:
3000#ifdef sun4v
3001	cmp	%g5, T_INSTR_MMU_MISS
3002	be,a,pn	%icc, 1f
3003	  mov	%g3, %g5			! trapstat wants TTE in %g5
3004#endif /* sun4v */
3005	cmp	%g5, FAST_IMMU_MISS_TT
3006	be,pn	%icc, 1f
3007	  mov	%g3, %g5			! trapstat wants TTE in %g5
3008	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3009	! trapstat wants TTE in %g5
3010	retry
30111:
3012	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3013	! trapstat wants TTE in %g5
3014	retry
3015
3016tsb_ism:
3017	/*
3018	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3019	 * page size down to smallest.
3020	 *
3021	 * g2 = vaddr + ctx	aka tag access register
3022	 * g3 = ismmap->ism_seg
3023	 * g4 = physical address of ismmap->ism_sfmmu
3024	 * g6 = tsbmiss area
3025	 */
3026	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3027	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3028	  mov	PTL1_BAD_ISM, %g1
3029						/* g5 = pa of imap_vb_shift */
3030	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3031	lduha	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3032	srlx	%g3, %g4, %g3			/* clr size field */
3033	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3034	sllx	%g3, %g4, %g3			/* g3 = ism vbase */
3035	and	%g2, %g1, %g4			/* g4 = ctx number */
3036	andn	%g2, %g1, %g1			/* g1 = tlb miss vaddr */
3037	sub	%g1, %g3, %g2			/* g2 = offset in ISM seg */
3038	or	%g2, %g4, %g2			/* g2 = tagacc (vaddr + ctx) */
3039
3040	/*
3041	 * ISM pages are always locked down.
3042	 * If we can't find the tte then pagefault
3043	 * and let the spt segment driver resovle it.
3044	 *
3045	 * g2 = ISM vaddr (offset in ISM seg)
3046	 * g6 = tsb miss area
3047	 * g7 = ISM hatid
3048	 */
3049	sub	%g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3050	lduha	[%g5]ASI_MEM, %g4		/* g5 = pa of imap_hatflags */
3051	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3052	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3053	  nop
3054
3055tsb_ism_32M:
3056	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3057	brz,pn	%g5, tsb_ism_256M
3058	  nop
3059
3060	/*
3061	 * 32M hash.
3062	 */
3063
3064	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
3065	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3066	    tsb_ism_4M)
3067	/* NOT REACHED */
3068
3069tsb_ism_32M_found:
3070	brlz,pt %g3, tsb_validtte
3071	  nop
3072	ba,pt	%xcc, tsb_ism_4M
3073	  nop
3074
3075tsb_ism_256M:
3076	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3077	brz,a,pn %g5, ptl1_panic
3078	  mov	PTL1_BAD_ISM, %g1
3079
3080	/*
3081	 * 256M hash.
3082	 */
3083	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
3084	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3085	    tsb_ism_4M)
3086
3087tsb_ism_256M_found:
3088	brlz,pt %g3, tsb_validtte
3089	  nop
3090
3091tsb_ism_4M:
3092	/*
3093	 * 4M hash.
3094	 */
3095	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
3096	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3097	    tsb_ism_8K)
3098	/* NOT REACHED */
3099
3100tsb_ism_4M_found:
3101	brlz,pt %g3, tsb_validtte
3102	  nop
3103
3104tsb_ism_8K:
3105	/*
3106	 * 8K and 64K hash.
3107	 */
3108
3109	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
3110	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3111	    tsb_pagefault)
3112	/* NOT REACHED */
3113
3114tsb_ism_8K_found:
3115	brlz,pt	%g3, tsb_validtte
3116	  nop
3117
3118tsb_pagefault:
3119	rdpr	%tt, %g7
3120	cmp	%g7, FAST_PROT_TT
3121	be,a,pn	%icc, tsb_protfault
3122	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3123
3124tsb_protfault:
3125	/*
3126	 * we get here if we couldn't find a valid tte in the hash.
3127	 *
3128	 * If user and we are at tl>1 we go to window handling code.
3129	 *
3130	 * If kernel and the fault is on the same page as our stack
3131	 * pointer, then we know the stack is bad and the trap handler
3132	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3133	 *
3134	 * If this is a kernel trap and tl>1, panic.
3135	 *
3136	 * Otherwise we call pagefault.
3137	 */
3138	cmp	%g7, FAST_IMMU_MISS_TT
3139#ifdef sun4v
3140	MMU_FAULT_STATUS_AREA(%g4)
3141	ldx	[%g4 + MMFSA_I_CTX], %g5
3142	ldx	[%g4 + MMFSA_D_CTX], %g4
3143	move	%icc, %g5, %g4
3144	cmp	%g7, T_INSTR_MMU_MISS
3145	move	%icc, %g5, %g4
3146#else
3147	mov	MMU_TAG_ACCESS, %g4
3148	ldxa	[%g4]ASI_DMMU, %g2
3149	ldxa	[%g4]ASI_IMMU, %g5
3150	move	%icc, %g5, %g2
3151	cmp	%g7, T_INSTR_MMU_MISS
3152	move	%icc, %g5, %g2
3153	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3154#endif
3155	brnz,pn	%g4, 3f				/* skip if not kernel */
3156	  rdpr	%tl, %g5
3157
3158	add	%sp, STACK_BIAS, %g3
3159	srlx	%g3, MMU_PAGESHIFT, %g3
3160	srlx	%g2, MMU_PAGESHIFT, %g4
3161	cmp	%g3, %g4
3162	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3163	  mov	PTL1_BAD_STACK, %g1
3164
3165	cmp	%g5, 1
3166	ble,pt	%icc, 2f
3167	  nop
3168	TSTAT_CHECK_TL1(2f, %g1, %g2)
3169	rdpr	%tt, %g2
3170	cmp	%g2, FAST_PROT_TT
3171	mov	PTL1_BAD_KPROT_FAULT, %g1
3172	movne	%icc, PTL1_BAD_KMISS, %g1
3173	ba,pt	%icc, ptl1_panic
3174	  nop
3175
31762:
3177	/*
3178	 * We are taking a pagefault in the kernel on a kernel address.  If
3179	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3180	 * want to call sfmmu_pagefault -- we will instead note that a fault
3181	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3182	 * (instead of a "retry").  This will step over the faulting
3183	 * instruction.
3184	 */
3185	CPU_INDEX(%g1, %g2)
3186	set	cpu_core, %g2
3187	sllx	%g1, CPU_CORE_SHIFT, %g1
3188	add	%g1, %g2, %g1
3189	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3190	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3191	bz	sfmmu_pagefault
3192	or	%g2, CPU_DTRACE_BADADDR, %g2
3193	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3194	GET_MMU_D_ADDR(%g3, %g4)
3195	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3196	done
3197
31983:
3199	cmp	%g5, 1
3200	ble,pt	%icc, 4f
3201	  nop
3202	TSTAT_CHECK_TL1(4f, %g1, %g2)
3203	ba,pt	%icc, sfmmu_window_trap
3204	  nop
3205
32064:
3207	/*
3208	 * We are taking a pagefault on a non-kernel address.  If we are in
3209	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3210	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3211	 */
3212	CPU_INDEX(%g1, %g2)
3213	set	cpu_core, %g2
3214	sllx	%g1, CPU_CORE_SHIFT, %g1
3215	add	%g1, %g2, %g1
3216	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3217	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3218	bz	sfmmu_pagefault
3219	or	%g2, CPU_DTRACE_BADADDR, %g2
3220	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3221	GET_MMU_D_ADDR(%g3, %g4)
3222	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3223
3224	/*
3225	 * Be sure that we're actually taking this miss from the kernel --
3226	 * otherwise we have managed to return to user-level with
3227	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3228	 */
3229	rdpr	%tstate, %g2
3230	btst	TSTATE_PRIV, %g2
3231	bz,a	ptl1_panic
3232	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3233	done
3234
3235	ALTENTRY(tsb_tl0_noctxt)
3236	/*
3237	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3238	 * if it is, indicated that we have faulted and issue a done.
3239	 */
3240	CPU_INDEX(%g5, %g6)
3241	set	cpu_core, %g6
3242	sllx	%g5, CPU_CORE_SHIFT, %g5
3243	add	%g5, %g6, %g5
3244	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3245	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3246	bz	1f
3247	or	%g6, CPU_DTRACE_BADADDR, %g6
3248	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3249	GET_MMU_D_ADDR(%g3, %g4)
3250	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3251
3252	/*
3253	 * Be sure that we're actually taking this miss from the kernel --
3254	 * otherwise we have managed to return to user-level with
3255	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3256	 */
3257	rdpr	%tstate, %g5
3258	btst	TSTATE_PRIV, %g5
3259	bz,a	ptl1_panic
3260	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3261	done
3262
32631:
3264	rdpr	%tt, %g5
3265	cmp	%g5, FAST_IMMU_MISS_TT
3266#ifdef sun4v
3267	MMU_FAULT_STATUS_AREA(%g2)
3268	be,a,pt	%icc, 2f
3269	  ldx	[%g2 + MMFSA_I_CTX], %g3
3270	cmp	%g5, T_INSTR_MMU_MISS
3271	be,a,pt	%icc, 2f
3272	  ldx	[%g2 + MMFSA_I_CTX], %g3
3273	ldx	[%g2 + MMFSA_D_CTX], %g3
32742:
3275#else
3276	mov	MMU_TAG_ACCESS, %g2
3277	be,a,pt	%icc, 2f
3278	  ldxa	[%g2]ASI_IMMU, %g3
3279	ldxa	[%g2]ASI_DMMU, %g3
32802:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3281#endif
3282	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3283	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3284	rdpr	%tl, %g5
3285	cmp	%g5, 1
3286	ble,pt	%icc, sfmmu_mmu_trap
3287	  nop
3288	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3289	ba,pt	%icc, sfmmu_window_trap
3290	  nop
3291	SET_SIZE(sfmmu_tsb_miss)
3292
3293#if (1<< TSBMISS_SHIFT) != TSBMISS_SIZE
3294#error - TSBMISS_SHIFT does not correspond to size of tsbmiss struct
3295#endif
3296
3297#endif /* lint */
3298
3299#if defined (lint)
3300/*
3301 * This routine will look for a user or kernel vaddr in the hash
3302 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3303 * grab any locks.  It should only be used by other sfmmu routines.
3304 */
3305/* ARGSUSED */
3306pfn_t
3307sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3308{
3309	return(0);
3310}
3311
3312#else /* lint */
3313
3314	ENTRY_NP(sfmmu_vatopfn)
3315 	/*
3316 	 * disable interrupts
3317 	 */
3318 	rdpr	%pstate, %o3
3319#ifdef DEBUG
3320	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
3321	bnz,pt	%icc, 1f			/* disabled, panic	 */
3322	  nop
3323
3324	sethi	%hi(panicstr), %g1
3325	ldx	[%g1 + %lo(panicstr)], %g1
3326	tst	%g1
3327	bnz,pt	%icc, 1f
3328	  nop
3329
3330	save	%sp, -SA(MINFRAME), %sp
3331	sethi	%hi(sfmmu_panic1), %o0
3332	call	panic
3333	 or	%o0, %lo(sfmmu_panic1), %o0
33341:
3335#endif
3336	/*
3337	 * disable interrupts to protect the TSBMISS area
3338	 */
3339	andn    %o3, PSTATE_IE, %o5
3340	wrpr    %o5, 0, %pstate
3341
3342	/*
3343	 * o0 = vaddr
3344	 * o1 = sfmmup
3345	 * o2 = ttep
3346	 */
3347	CPU_TSBMISS_AREA(%g1, %o5)
3348	ldn	[%g1 + TSBMISS_KHATID], %o4
3349	cmp	%o4, %o1
3350	bne,pn	%ncc, vatopfn_nokernel
3351	  mov	TTE64K, %g5			/* g5 = rehash # */
3352	mov %g1,%o5				/* o5 = tsbmiss_area */
3353	/*
3354	 * o0 = vaddr
3355	 * o1 & o4 = hatid
3356	 * o2 = ttep
3357	 * o5 = tsbmiss area
3358	 */
3359	mov	HBLK_RANGE_SHIFT, %g6
33601:
3361
3362	/*
3363	 * o0 = vaddr
3364	 * o1 = sfmmup
3365	 * o2 = ttep
3366	 * o3 = old %pstate
3367	 * o4 = hatid
3368	 * o5 = tsbmiss
3369	 * g5 = rehash #
3370	 * g6 = hmeshift
3371	 *
3372	 * The first arg to GET_TTE is actually tagaccess register
3373	 * not just vaddr. Since this call is for kernel we need to clear
3374	 * any lower vaddr bits that would be interpreted as ctx bits.
3375	 */
3376	set     TAGACC_CTX_MASK, %g1
3377	andn    %o0, %g1, %o0
3378	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
3379		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3380
3381kvtop_hblk_found:
3382	/*
3383	 * o0 = vaddr
3384	 * o1 = sfmmup
3385	 * o2 = ttep
3386	 * g1 = tte
3387	 * g2 = tte pa
3388	 * g3 = tte va
3389	 * o2 = tsbmiss area
3390	 * o1 = hat id
3391	 */
3392	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3393	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3394	stx %g1,[%o2]				/* put tte into *ttep */
3395	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3396	/*
3397	 * o0 = vaddr
3398	 * o1 = sfmmup
3399	 * o2 = ttep
3400	 * g1 = pfn
3401	 */
3402	ba,pt	%xcc, 6f
3403	  mov	%g1, %o0
3404
3405kvtop_nohblk:
3406	/*
3407	 * we get here if we couldn't find valid hblk in hash.  We rehash
3408	 * if neccesary.
3409	 */
3410	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3411#ifdef sun4v
3412	cmp	%g5, MAX_HASHCNT
3413#else
3414	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3415#endif
3416	be,a,pn	%icc, 6f
3417	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3418	mov	%o1, %o4			/* restore hatid */
3419#ifdef sun4v
3420        add	%g5, 2, %g5
3421	cmp	%g5, 3
3422	move	%icc, MMU_PAGESHIFT4M, %g6
3423	ba,pt	%icc, 1b
3424	movne	%icc, MMU_PAGESHIFT256M, %g6
3425#else
3426        inc	%g5
3427	cmp	%g5, 2
3428	move	%icc, MMU_PAGESHIFT512K, %g6
3429	ba,pt	%icc, 1b
3430	movne	%icc, MMU_PAGESHIFT4M, %g6
3431#endif
34326:
3433	retl
3434 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3435
3436tsb_suspend:
3437	/*
3438	 * o0 = vaddr
3439	 * o1 = sfmmup
3440	 * o2 = ttep
3441	 * g1 = tte
3442	 * g2 = tte pa
3443	 * g3 = tte va
3444	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
3445	 */
3446	stx %g1,[%o2]				/* put tte into *ttep */
3447	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
3448	  sub	%g0, 1, %o0			/* output = -1 (PFN_INVALID) */
3449	TTETOPFN(%g1, %o0, vatopfn_l3, %g2, %g3, %g4)
3450	/*
3451	 * o0 = PFN return value PFN_INVALID, PFN_SUSPENDED, or pfn#
3452	 * o1 = sfmmup
3453	 * o2 = ttep
3454	 * g1 = pfn
3455	 */
3456	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
34578:
3458	retl
3459	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
3460
3461vatopfn_nokernel:
3462	/*
3463	 * This routine does NOT support user addresses
3464	 * There is a routine in C that supports this.
3465	 * The only reason why we don't have the C routine
3466	 * support kernel addresses as well is because
3467	 * we do va_to_pa while holding the hashlock.
3468	 */
3469 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3470	save	%sp, -SA(MINFRAME), %sp
3471	sethi	%hi(sfmmu_panic3), %o0
3472	call	panic
3473	 or	%o0, %lo(sfmmu_panic3), %o0
3474
3475	SET_SIZE(sfmmu_vatopfn)
3476#endif /* lint */
3477
3478
3479
3480#if !defined(lint)
3481
3482/*
3483 * kpm lock used between trap level tsbmiss handler and kpm C level.
3484 */
3485#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
3486	mov     0xff, tmp1						;\
3487label1:									;\
3488	casa    [kpmlckp]asi, %g0, tmp1					;\
3489	brnz,pn tmp1, label1						;\
3490	mov     0xff, tmp1						;\
3491	membar  #LoadLoad
3492
3493#define KPMLOCK_EXIT(kpmlckp, asi)					\
3494	membar  #LoadStore|#StoreStore					;\
3495	sta     %g0, [kpmlckp]asi
3496
3497/*
3498 * Lookup a memseg for a given pfn and if found, return the physical
3499 * address of the corresponding struct memseg in mseg, otherwise
3500 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
3501 * tsbmp, %asi is assumed to be ASI_MEM.
3502 * This lookup is done by strictly traversing only the physical memseg
3503 * linkage. The more generic approach, to check the virtual linkage
3504 * before using the physical (used e.g. with hmehash buckets), cannot
3505 * be used here. Memory DR operations can run in parallel to this
3506 * lookup w/o any locks and updates of the physical and virtual linkage
3507 * cannot be done atomically wrt. to each other. Because physical
3508 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
3509 * as "physical NULL" pointer.
3510 */
3511#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
3512	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
3513	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
3514	udivx	pfn, mseg, mseg						;\
3515	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
3516	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
3517	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
3518	add	tmp1, mseg, tmp1					;\
3519	ldxa	[tmp1]%asi, mseg					;\
3520	cmp	mseg, MSEG_NULLPTR_PA					;\
3521	be,pn	%xcc, label/**/1		/* if not found */	;\
3522	  nop								;\
3523	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3524	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3525	blu,pn	%xcc, label/**/1					;\
3526	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3527	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3528	bgeu,pn	%xcc, label/**/1					;\
3529	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
3530	mulx	tmp1, PAGE_SIZE, tmp1					;\
3531	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
3532	add	tmp2, tmp1, tmp1			/* pp */	;\
3533	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
3534	cmp	tmp2, pfn						;\
3535	be,pt	%xcc, label/**/_ok			/* found */	;\
3536label/**/1:								;\
3537	/* brute force lookup */					;\
3538	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
3539	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
3540label/**/2:								;\
3541	cmp	mseg, MSEG_NULLPTR_PA					;\
3542	be,pn	%xcc, label/**/_ok		/* if not found */	;\
3543	  nop								;\
3544	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3545	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3546	blu,a,pt %xcc, label/**/2					;\
3547	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3548	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3549	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3550	bgeu,a,pt %xcc, label/**/2					;\
3551	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3552label/**/_ok:
3553
3554	/*
3555	 * kpm tsb miss handler large pages
3556	 * g1 = 8K kpm TSB entry pointer
3557	 * g2 = tag access register
3558	 * g3 = 4M kpm TSB entry pointer
3559	 */
3560	ALTENTRY(sfmmu_kpm_dtsb_miss)
3561	TT_TRACE(trace_tsbmiss)
3562
3563	CPU_INDEX(%g7, %g6)
3564	sethi	%hi(kpmtsbm_area), %g6
3565	sllx	%g7, KPMTSBM_SHIFT, %g7
3566	or	%g6, %lo(kpmtsbm_area), %g6
3567	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3568
3569	/* check enable flag */
3570	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3571	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3572	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3573	  nop
3574
3575	/* VA range check */
3576	ldx	[%g6 + KPMTSBM_VBASE], %g7
3577	cmp	%g2, %g7
3578	blu,pn	%xcc, sfmmu_tsb_miss
3579	  ldx	[%g6 + KPMTSBM_VEND], %g5
3580	cmp	%g2, %g5
3581	bgeu,pn	%xcc, sfmmu_tsb_miss
3582	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
3583
3584	/*
3585	 * check TL tsbmiss handling flag
3586	 * bump tsbmiss counter
3587	 */
3588	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3589#ifdef	DEBUG
3590	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
3591	inc	%g5
3592	brz,pn	%g3, sfmmu_kpm_exception
3593	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3594#else
3595	inc	%g5
3596	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3597#endif
3598	/*
3599	 * At this point:
3600	 *  g1 = 8K kpm TSB pointer (not used)
3601	 *  g2 = tag access register
3602	 *  g3 = clobbered
3603	 *  g6 = per-CPU kpm tsbmiss area
3604	 *  g7 = kpm_vbase
3605	 */
3606
3607	/* vaddr2pfn */
3608	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3609	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3610	srax    %g4, %g3, %g2			/* which alias range (r) */
3611	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3612	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3613
3614	/*
3615	 * Setup %asi
3616	 * mseg_pa = page_numtomemseg_nolock(pfn)
3617	 * if (mseg_pa == NULL) sfmmu_kpm_exception
3618	 * g2=pfn
3619	 */
3620	mov	ASI_MEM, %asi
3621	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
3622	cmp	%g3, MSEG_NULLPTR_PA
3623	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3624	  nop
3625
3626	/*
3627	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
3628	 * g2=pfn g3=mseg_pa
3629	 */
3630	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
3631	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3632	srlx	%g2, %g5, %g4
3633	sllx	%g4, %g5, %g4
3634	sub	%g4, %g7, %g4
3635	srlx	%g4, %g5, %g4
3636
3637	/*
3638	 * Validate inx value
3639	 * g2=pfn g3=mseg_pa g4=inx
3640	 */
3641#ifdef	DEBUG
3642	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3643	cmp	%g4, %g5			/* inx - nkpmpgs */
3644	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3645	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3646#else
3647	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3648#endif
3649	/*
3650	 * kp = &mseg_pa->kpm_pages[inx]
3651	 */
3652	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
3653	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
3654	add	%g5, %g4, %g5			/* kp */
3655
3656	/*
3657	 * KPMP_HASH(kp)
3658	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
3659	 */
3660	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3661	sub	%g7, 1, %g7			/* mask */
3662	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
3663	add	%g5, %g1, %g5			/* y = ksp + x */
3664	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3665
3666	/*
3667	 * Calculate physical kpm_page pointer
3668	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3669	 */
3670	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
3671	add	%g1, %g4, %g1			/* kp_pa */
3672
3673	/*
3674	 * Calculate physical hash lock address
3675	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
3676	 */
3677	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
3678	sllx	%g5, KPMHLK_SHIFT, %g5
3679	add	%g4, %g5, %g3
3680	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
3681
3682	/*
3683	 * Assemble tte
3684	 * g1=kp_pa g2=pfn g3=hlck_pa
3685	 */
3686#ifdef sun4v
3687	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
3688	sllx	%g5, 32, %g5
3689	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3690	or	%g4, TTE4M, %g4
3691	or	%g5, %g4, %g5
3692#else
3693	sethi	%hi(TTE_VALID_INT), %g4
3694	mov	TTE4M, %g5
3695	sllx	%g5, TTE_SZ_SHFT_INT, %g5
3696	or	%g5, %g4, %g5			/* upper part */
3697	sllx	%g5, 32, %g5
3698	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3699	or	%g5, %g4, %g5
3700#endif
3701	sllx	%g2, MMU_PAGESHIFT, %g4
3702	or	%g5, %g4, %g5			/* tte */
3703	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
3704	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3705
3706	/*
3707	 * tsb dropin
3708	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
3709	 */
3710
3711	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
3712	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
3713
3714	/* use C-handler if there's no go for dropin */
3715	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
3716	cmp	%g7, -1
3717	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
3718	  nop
3719
3720#ifdef	DEBUG
3721	/* double check refcnt */
3722	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
3723	brz,pn	%g7, 5f			/* let C-handler deal with this */
3724	  nop
3725#endif
3726
3727#ifndef sun4v
3728	ldub	[%g6 + KPMTSBM_FLAGS], %g7
3729	mov	ASI_N, %g1
3730	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
3731	movnz	%icc, ASI_MEM, %g1
3732	mov	%g1, %asi
3733#endif
3734
3735	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
3736	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
3737
3738	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
3739	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
3740
3741	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
3742
3743	/* KPMLOCK_EXIT(kpmlckp, asi) */
3744	KPMLOCK_EXIT(%g3, ASI_MEM)
3745
3746	/*
3747	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3748	 * point to trapstat's TSB miss return code (note that trapstat
3749	 * itself will patch the correct offset to add).
3750	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
3751	 */
3752	rdpr	%tl, %g7
3753	cmp	%g7, 1
3754	ble	%icc, 0f
3755	sethi	%hi(KERNELBASE), %g6
3756	rdpr	%tpc, %g7
3757	or	%g6, %lo(KERNELBASE), %g6
3758	cmp	%g7, %g6
3759	bgeu	%xcc, 0f
3760	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
3761	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3762	wrpr	%g7, %tpc
3763	add	%g7, 4, %g7
3764	wrpr	%g7, %tnpc
37650:
3766	retry
37675:
3768	/* g3=hlck_pa */
3769	KPMLOCK_EXIT(%g3, ASI_MEM)
3770	ba,pt	%icc, sfmmu_kpm_exception
3771	  nop
3772	SET_SIZE(sfmmu_kpm_dtsb_miss)
3773
3774	/*
3775	 * kpm tsbmiss handler for smallpages
3776	 * g1 = 8K kpm TSB pointer
3777	 * g2 = tag access register
3778	 * g3 = 4M kpm TSB pointer
3779	 */
3780	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
3781	TT_TRACE(trace_tsbmiss)
3782	CPU_INDEX(%g7, %g6)
3783	sethi	%hi(kpmtsbm_area), %g6
3784	sllx	%g7, KPMTSBM_SHIFT, %g7
3785	or	%g6, %lo(kpmtsbm_area), %g6
3786	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3787
3788	/* check enable flag */
3789	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3790	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3791	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3792	  nop
3793
3794	/*
3795	 * VA range check
3796	 * On fail: goto sfmmu_tsb_miss
3797	 */
3798	ldx	[%g6 + KPMTSBM_VBASE], %g7
3799	cmp	%g2, %g7
3800	blu,pn	%xcc, sfmmu_tsb_miss
3801	  ldx	[%g6 + KPMTSBM_VEND], %g5
3802	cmp	%g2, %g5
3803	bgeu,pn	%xcc, sfmmu_tsb_miss
3804	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
3805
3806	/*
3807	 * check TL tsbmiss handling flag
3808	 * bump tsbmiss counter
3809	 */
3810	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3811#ifdef	DEBUG
3812	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
3813	inc	%g5
3814	brz,pn	%g1, sfmmu_kpm_exception
3815	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3816#else
3817	inc	%g5
3818	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3819#endif
3820	/*
3821	 * At this point:
3822	 *  g1 = clobbered
3823	 *  g2 = tag access register
3824	 *  g3 = 4M kpm TSB pointer (not used)
3825	 *  g6 = per-CPU kpm tsbmiss area
3826	 *  g7 = kpm_vbase
3827	 */
3828
3829	/* vaddr2pfn */
3830	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3831	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3832	srax    %g4, %g3, %g2			/* which alias range (r) */
3833	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3834	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3835
3836	/*
3837	 * Setup %asi
3838	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
3839	 * if (mseg not found) sfmmu_kpm_exception
3840	 * g2=pfn
3841	 */
3842	mov	ASI_MEM, %asi
3843	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
3844	cmp	%g3, MSEG_NULLPTR_PA
3845	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3846	  nop
3847
3848	/*
3849	 * inx = pfn - mseg_pa->kpm_pbase
3850	 * g2=pfn g3=mseg_pa
3851	 */
3852	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3853	sub	%g2, %g7, %g4
3854
3855#ifdef	DEBUG
3856	/*
3857	 * Validate inx value
3858	 * g2=pfn g3=mseg_pa g4=inx
3859	 */
3860	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3861	cmp	%g4, %g5			/* inx - nkpmpgs */
3862	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3863	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3864#else
3865	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3866#endif
3867	/* ksp = &mseg_pa->kpm_spages[inx] */
3868	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
3869	add	%g5, %g4, %g5			/* ksp */
3870
3871	/*
3872	 * KPMP_SHASH(kp)
3873	 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
3874	 */
3875	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3876	sub	%g7, 1, %g7			/* mask */
3877	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
3878	add	%g5, %g1, %g5			/* y = ksp + x */
3879	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3880
3881	/*
3882	 * Calculate physical kpm_spage pointer
3883	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3884	 */
3885	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
3886	add	%g1, %g4, %g1			/* ksp_pa */
3887
3888	/*
3889	 * Calculate physical hash lock address.
3890	 * Note: Changes in kpm_shlk_t must be reflected here.
3891	 * g1=ksp_pa g2=pfn g5=hashinx
3892	 */
3893	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
3894	sllx	%g5, KPMSHLK_SHIFT, %g5
3895	add	%g4, %g5, %g3			/* hlck_pa */
3896
3897	/*
3898	 * Assemble tte
3899	 * g1=ksp_pa g2=pfn g3=hlck_pa
3900	 */
3901	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
3902	sllx	%g5, 32, %g5
3903	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3904	or	%g5, %g4, %g5
3905	sllx	%g2, MMU_PAGESHIFT, %g4
3906	or	%g5, %g4, %g5			/* tte */
3907	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
3908	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3909
3910	/*
3911	 * tsb dropin
3912	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
3913	 */
3914
3915	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
3916	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
3917
3918	/* use C-handler if there's no go for dropin */
3919	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
3920	cmp	%g7, -1
3921	bne,pn	%xcc, 5f
3922	  nop
3923
3924#ifndef sun4v
3925	ldub	[%g6 + KPMTSBM_FLAGS], %g7
3926	mov	ASI_N, %g1
3927	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
3928	movnz	%icc, ASI_MEM, %g1
3929	mov	%g1, %asi
3930#endif
3931
3932	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
3933	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
3934
3935	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
3936	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
3937
3938	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
3939
3940	/* KPMLOCK_EXIT(kpmlckp, asi) */
3941	KPMLOCK_EXIT(%g3, ASI_MEM)
3942
3943	/*
3944	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3945	 * point to trapstat's TSB miss return code (note that trapstat
3946	 * itself will patch the correct offset to add).
3947	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
3948	 */
3949	rdpr	%tl, %g7
3950	cmp	%g7, 1
3951	ble	%icc, 0f
3952	sethi	%hi(KERNELBASE), %g6
3953	rdpr	%tpc, %g7
3954	or	%g6, %lo(KERNELBASE), %g6
3955	cmp	%g7, %g6
3956	bgeu	%xcc, 0f
3957	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
3958	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3959	wrpr	%g7, %tpc
3960	add	%g7, 4, %g7
3961	wrpr	%g7, %tnpc
39620:
3963	retry
39645:
3965	/* g3=hlck_pa */
3966	KPMLOCK_EXIT(%g3, ASI_MEM)
3967	ba,pt	%icc, sfmmu_kpm_exception
3968	  nop
3969	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
3970
3971#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
3972#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
3973#endif
3974
3975#endif /* lint */
3976
3977#ifdef	lint
3978/*
3979 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
3980 * Called from C-level, sets/clears "go" indication for trap level handler.
3981 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
3982 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
3983 * Assumes khl_mutex is held when called from C-level.
3984 */
3985/* ARGSUSED */
3986void
3987sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
3988{
3989}
3990
3991/*
3992 * kpm_smallpages: stores val to byte at address mapped within
3993 * low level lock brackets. The old value is returned.
3994 * Called from C-level.
3995 */
3996/* ARGSUSED */
3997int
3998sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
3999{
4000	return (0);
4001}
4002
4003#else /* lint */
4004
4005	.seg	".data"
4006sfmmu_kpm_tsbmtl_panic:
4007	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4008	.byte	0
4009sfmmu_kpm_stsbmtl_panic:
4010	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4011	.byte	0
4012	.align	4
4013	.seg	".text"
4014
4015	ENTRY_NP(sfmmu_kpm_tsbmtl)
4016	rdpr	%pstate, %o3
4017	/*
4018	 * %o0 = &kp_refcntc
4019	 * %o1 = &khl_lock
4020	 * %o2 = 0/1 (off/on)
4021	 * %o3 = pstate save
4022	 */
4023#ifdef DEBUG
4024	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4025	bnz,pt %icc, 1f				/* disabled, panic	 */
4026	  nop
4027	save	%sp, -SA(MINFRAME), %sp
4028	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4029	call	panic
4030	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4031	ret
4032	restore
40331:
4034#endif /* DEBUG */
4035	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4036
4037	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4038	mov	-1, %o5
4039	brz,a	%o2, 2f
4040	  mov	0, %o5
40412:
4042	sth	%o5, [%o0]
4043	KPMLOCK_EXIT(%o1, ASI_N)
4044
4045	retl
4046	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4047	SET_SIZE(sfmmu_kpm_tsbmtl)
4048
4049	ENTRY_NP(sfmmu_kpm_stsbmtl)
4050	rdpr	%pstate, %o3
4051	/*
4052	 * %o0 = &mapped
4053	 * %o1 = &kshl_lock
4054	 * %o2 = val
4055	 * %o3 = pstate save
4056	 */
4057#ifdef DEBUG
4058	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4059	bnz,pt %icc, 1f				/* disabled, panic	 */
4060	  nop
4061	save	%sp, -SA(MINFRAME), %sp
4062	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4063	call	panic
4064	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4065	ret
4066	restore
40671:
4068#endif /* DEBUG */
4069	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4070
4071	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4072	ldsb	[%o0], %o5
4073	stb	%o2, [%o0]
4074	KPMLOCK_EXIT(%o1, ASI_N)
4075
4076	mov	%o5, %o0			/* return old val */
4077	retl
4078	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4079	SET_SIZE(sfmmu_kpm_stsbmtl)
4080
4081#endif /* lint */
4082
4083#ifndef lint
4084#ifdef sun4v
4085	/*
4086	 * User/kernel data miss w// multiple TSBs
4087	 * The first probe covers 8K, 64K, and 512K page sizes,
4088	 * because 64K and 512K mappings are replicated off 8K
4089	 * pointer.  Second probe covers 4M page size only.
4090	 *
4091	 * MMU fault area contains miss address and context.
4092	 */
4093	ALTENTRY(sfmmu_slow_dmmu_miss)
4094	GET_MMU_D_TAGACC_CTX(%g2, %g3)	! %g2 = tagacc, %g3 = ctx
4095
4096slow_miss_common:
4097	/*
4098	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4099	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4100	 */
4101	brnz,pt	%g3, 8f			! check for user context
4102	  nop
4103
4104	/*
4105	 * Kernel miss
4106	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4107	 * branch to sfmmu_tsb_miss_tt to handle it.
4108	 */
4109	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4110sfmmu_dslow_patch_ktsb_base:
4111	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4112sfmmu_dslow_patch_ktsb_szcode:
4113	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4114
4115	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4116	! %g1 = First TSB entry pointer, as TSB miss handler expects
4117
4118	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4119sfmmu_dslow_patch_ktsb4m_base:
4120	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4121sfmmu_dslow_patch_ktsb4m_szcode:
4122	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4123
4124	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4125	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4126	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4127	.empty
4128
41298:
4130	/*
4131	 * User miss
4132	 * Get first TSB pointer in %g1
4133	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4134	 * Branch to sfmmu_tsb_miss_tt to handle it
4135	 */
4136	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4137	/* %g1 = first TSB entry ptr now, %g2 preserved */
4138
4139	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4140	brlz,a,pt %g3, sfmmu_tsb_miss_tt	/* done if no 2nd TSB */
4141	  mov	%g0, %g3
4142
4143	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4144	/* %g3 = second TSB entry ptr now, %g2 preserved */
41459:
4146	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4147	.empty
4148	SET_SIZE(sfmmu_slow_dmmu_miss)
4149
4150
4151	/*
4152	 * User/kernel instruction miss w/ multiple TSBs
4153	 * The first probe covers 8K, 64K, and 512K page sizes,
4154	 * because 64K and 512K mappings are replicated off 8K
4155	 * pointer.  Second probe covers 4M page size only.
4156	 *
4157	 * MMU fault area contains miss address and context.
4158	 */
4159	ALTENTRY(sfmmu_slow_immu_miss)
4160	MMU_FAULT_STATUS_AREA(%g2)
4161	ldx	[%g2 + MMFSA_I_CTX], %g3
4162	ldx	[%g2 + MMFSA_I_ADDR], %g2
4163	srlx	%g2, MMU_PAGESHIFT, %g2	! align address to page boundry
4164	sllx	%g2, MMU_PAGESHIFT, %g2
4165	ba,pt	%xcc, slow_miss_common
4166	or	%g2, %g3, %g2
4167	SET_SIZE(sfmmu_slow_immu_miss)
4168
4169#endif /* sun4v */
4170#endif	/* lint */
4171
4172#ifndef lint
4173
4174/*
4175 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4176 */
4177	.seg	".data"
4178	.align	64
4179	.global tsbmiss_area
4180tsbmiss_area:
4181	.skip	(TSBMISS_SIZE * NCPU)
4182
4183	.align	64
4184	.global kpmtsbm_area
4185kpmtsbm_area:
4186	.skip	(KPMTSBM_SIZE * NCPU)
4187#endif	/* lint */
4188