xref: /titanic_50/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 2ef9abdc6ea9bad985430325b12b90938a8cd18f)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#if defined(lint)
32#include <sys/types.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/asm_linkage.h>
38#include <sys/machtrap.h>
39#include <sys/machasi.h>
40#include <sys/sun4asi.h>
41#include <sys/pte.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44#include <vm/seg_spt.h>
45#include <sys/machparam.h>
46#include <sys/privregs.h>
47#include <sys/scb.h>
48#include <sys/intreg.h>
49#include <sys/machthread.h>
50#include <sys/intr.h>
51#include <sys/clock.h>
52#include <sys/trapstat.h>
53
54#ifdef TRAPTRACE
55#include <sys/traptrace.h>
56
57/*
58 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
59 */
60#define	TT_TRACE(label)		\
61	ba	label		;\
62	rd	%pc, %g7
63#else
64
65#define	TT_TRACE(label)
66
67#endif /* TRAPTRACE */
68
69#ifndef	lint
70
71#if (TTE_SUSPEND_SHIFT > 0)
72#define	TTE_SUSPEND_INT_SHIFT(reg)				\
73	sllx	reg, TTE_SUSPEND_SHIFT, reg
74#else
75#define	TTE_SUSPEND_INT_SHIFT(reg)
76#endif
77
78#endif /* lint */
79
80#ifndef	lint
81
82/*
83 * Assumes TSBE_TAG is 0
84 * Assumes TSBE_INTHI is 0
85 * Assumes TSBREG.split is 0
86 */
87
88#if TSBE_TAG != 0
89#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
90#endif
91
92#if TSBTAG_INTHI != 0
93#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
94#endif
95
96/*
97 * The following code assumes the tsb is not split.
98 *
99 * With TSBs no longer shared between processes, it's no longer
100 * necessary to hash the context bits into the tsb index to get
101 * tsb coloring; the new implementation treats the TSB as a
102 * direct-mapped, virtually-addressed cache.
103 *
104 * In:
105 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
106 *    tsbbase = base address of TSB (clobbered)
107 *    tagacc = tag access register (clobbered)
108 *    szc = size code of TSB (ro)
109 *    tmp = scratch reg
110 * Out:
111 *    tsbbase = pointer to entry in TSB
112 */
113#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
114	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
115	srlx	tagacc, vpshift, tagacc 				;\
116	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
117	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
118	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
119	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
120	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
121
122/*
123 * When the kpm TSB is used it is assumed that it is direct mapped
124 * using (vaddr>>vpshift)%tsbsz as the index.
125 *
126 * Note that, for now, the kpm TSB and kernel TSB are the same for
127 * each mapping size.  However that need not always be the case.  If
128 * the trap handlers are updated to search a different TSB for kpm
129 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
130 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
131 *
132 * In:
133 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
134 *    vaddr = virtual address (clobbered)
135 *    tsbp, szc, tmp = scratch
136 * Out:
137 *    tsbp = pointer to entry in TSB
138 */
139#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
140	cmp	vpshift, MMU_PAGESHIFT					;\
141	bne,pn	%icc, 1f		/* branch if large case */	;\
142	  sethi	%hi(kpmsm_tsbsz), szc					;\
143	sethi	%hi(kpmsm_tsbbase), tsbp				;\
144	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
145	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
146	ba,pt	%icc, 2f						;\
147	  nop								;\
1481:	sethi	%hi(kpm_tsbsz), szc					;\
149	sethi	%hi(kpm_tsbbase), tsbp					;\
150	ld	[szc + %lo(kpm_tsbsz)], szc				;\
151	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1522:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
153
154/*
155 * Lock the TSBE at virtual address tsbep.
156 *
157 * tsbep = TSBE va (ro)
158 * tmp1, tmp2 = scratch registers (clobbered)
159 * label = label to jump to if we fail to lock the tsb entry
160 * %asi = ASI to use for TSB access
161 *
162 * NOTE that we flush the TSB using fast VIS instructions that
163 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
164 * not be treated as a locked entry or we'll get stuck spinning on
165 * an entry that isn't locked but really invalid.
166 */
167
168#if defined(UTSB_PHYS)
169
170#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
171	lda	[tsbep]ASI_MEM, tmp1					;\
172	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
173	cmp	tmp1, tmp2 						;\
174	be,a,pn	%icc, label		/* if locked ignore */		;\
175	  nop								;\
176	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
177	cmp	tmp1, tmp2 						;\
178	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
179	  nop								;\
180	/* tsbe lock acquired */					;\
181	membar #StoreStore
182
183#else /* UTSB_PHYS */
184
185#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
186	lda	[tsbep]%asi, tmp1					;\
187	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
188	cmp	tmp1, tmp2 						;\
189	be,a,pn	%icc, label		/* if locked ignore */		;\
190	  nop								;\
191	casa	[tsbep]%asi, tmp1, tmp2					;\
192	cmp	tmp1, tmp2 						;\
193	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
194	  nop								;\
195	/* tsbe lock acquired */					;\
196	membar #StoreStore
197
198#endif /* UTSB_PHYS */
199
200/*
201 * Atomically write TSBE at virtual address tsbep.
202 *
203 * tsbep = TSBE va (ro)
204 * tte = TSBE TTE (ro)
205 * tagtarget = TSBE tag (ro)
206 * %asi = ASI to use for TSB access
207 */
208
209#if defined(UTSB_PHYS)
210
211#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
212	add	tsbep, TSBE_TTE, tmp1					;\
213	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
214	membar #StoreStore						;\
215	add	tsbep, TSBE_TAG, tmp1					;\
216	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
217
218#else /* UTSB_PHYS */
219
220#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
221	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
222	membar #StoreStore						;\
223	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
224
225#endif /* UTSB_PHYS */
226
227/*
228 * Load an entry into the TSB at TL > 0.
229 *
230 * tsbep = pointer to the TSBE to load as va (ro)
231 * tte = value of the TTE retrieved and loaded (wo)
232 * tagtarget = tag target register.  To get TSBE tag to load,
233 *   we need to mask off the context and leave only the va (clobbered)
234 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
235 * tmp1, tmp2 = scratch registers
236 * label = label to jump to if we fail to lock the tsb entry
237 * %asi = ASI to use for TSB access
238 */
239
240#if defined(UTSB_PHYS)
241
242#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
243	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
244	/*								;\
245	 * I don't need to update the TSB then check for the valid tte.	;\
246	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
247	 * we always invalidate the hash table before we unload the TSB.;\
248	 */								;\
249	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
250	ldxa	[ttepa]ASI_MEM, tte					;\
251	TTE_CLR_SOFTEXEC_ML(tte)					;\
252	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
253	sethi	%hi(TSBTAG_INVALID), tmp2				;\
254	add	tsbep, TSBE_TAG, tmp1					;\
255	brgez,a,pn tte, label						;\
256	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
257	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
258label:
259
260#else /* UTSB_PHYS */
261
262#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
263	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
264	/*								;\
265	 * I don't need to update the TSB then check for the valid tte.	;\
266	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
267	 * we always invalidate the hash table before we unload the TSB.;\
268	 */								;\
269	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
270	ldxa	[ttepa]ASI_MEM, tte					;\
271	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
272	sethi	%hi(TSBTAG_INVALID), tmp2				;\
273	brgez,a,pn tte, label						;\
274	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
275	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
276label:
277
278#endif /* UTSB_PHYS */
279
280/*
281 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
282 *   for ITLB synthesis.
283 *
284 * tsbep = pointer to the TSBE to load as va (ro)
285 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
286 *   with exec_perm turned off and exec_synth turned on
287 * tagtarget = tag target register.  To get TSBE tag to load,
288 *   we need to mask off the context and leave only the va (clobbered)
289 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
290 * tmp1, tmp2 = scratch registers
291 * label = label to use for branch (text)
292 * %asi = ASI to use for TSB access
293 */
294
295#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
296	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
297	/*								;\
298	 * I don't need to update the TSB then check for the valid tte.	;\
299	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
300	 * we always invalidate the hash table before we unload the TSB.;\
301	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
302	 * and exec_synth bit to 1.					;\
303	 */								;\
304	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
305	mov	tte, tmp1						;\
306	ldxa	[ttepa]ASI_MEM, tte					;\
307	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
308	sethi	%hi(TSBTAG_INVALID), tmp2				;\
309	brgez,a,pn tte, label						;\
310	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
311	or	tte, tmp1, tte						;\
312	andn	tte, TTE_EXECPRM_INT, tte				;\
313	or	tte, TTE_E_SYNTH_INT, tte				;\
314	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
315label:
316
317/*
318 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
319 *
320 * tte = value of the TTE, used to get tte_size bits (ro)
321 * tagaccess = tag access register, used to get 4M pfn bits (ro)
322 * pfn = 4M pfn bits shifted to offset for tte (out)
323 * tmp1 = scratch register
324 * label = label to use for branch (text)
325 */
326
327#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
328	/*								;\
329	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
330	 * Return them, shifted, in pfn.				;\
331	 */								;\
332	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
333	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
334	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
335	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
336	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
337	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
338label:									;\
339	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
340
341/*
342 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
343 * for ITLB synthesis.
344 *
345 * tte = value of the TTE, used to get tte_size bits (rw)
346 * tmp1 = scratch register
347 */
348
349#define	SET_TTE4M_PN(tte, tmp)						\
350	/*								;\
351	 * Set 4M pagesize tte bits. 					;\
352	 */								;\
353	set	TTE4M, tmp						;\
354	sllx	tmp, TTE_SZ_SHFT, tmp					;\
355	or	tte, tmp, tte
356
357/*
358 * Load an entry into the TSB at TL=0.
359 *
360 * tsbep = pointer to the TSBE to load as va (ro)
361 * tteva = pointer to the TTE to load as va (ro)
362 * tagtarget = TSBE tag to load (which contains no context), synthesized
363 * to match va of MMU tag target register only (ro)
364 * tmp1, tmp2 = scratch registers (clobbered)
365 * label = label to use for branches (text)
366 * %asi = ASI to use for TSB access
367 */
368
369#if defined(UTSB_PHYS)
370
371#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
372	/* can't rd tteva after locking tsb because it can tlb miss */	;\
373	ldx	[tteva], tteva			/* load tte */		;\
374	TTE_CLR_SOFTEXEC_ML(tteva)					;\
375	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
376	sethi	%hi(TSBTAG_INVALID), tmp2				;\
377	add	tsbep, TSBE_TAG, tmp1					;\
378	brgez,a,pn tteva, label						;\
379	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
380	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
381label:
382
383#else /* UTSB_PHYS */
384
385#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
386	/* can't rd tteva after locking tsb because it can tlb miss */	;\
387	ldx	[tteva], tteva			/* load tte */		;\
388	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
389	sethi	%hi(TSBTAG_INVALID), tmp2				;\
390	brgez,a,pn tteva, label						;\
391	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
392	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
393label:
394
395#endif /* UTSB_PHYS */
396
397/*
398 * Invalidate a TSB entry in the TSB.
399 *
400 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
401 *	 about this earlier to ensure this is true.  Thus when we are
402 *	 directly referencing tsbep below, we are referencing the tte_tag
403 *	 field of the TSBE.  If this  offset ever changes, the code below
404 *	 will need to be modified.
405 *
406 * tsbep = pointer to TSBE as va (ro)
407 * tag = invalidation is done if this matches the TSBE tag (ro)
408 * tmp1 - tmp3 = scratch registers (clobbered)
409 * label = label name to use for branches (text)
410 * %asi = ASI to use for TSB access
411 */
412
413#if defined(UTSB_PHYS)
414
415#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
416	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
417	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
418label/**/1:								;\
419	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
420	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
421	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
422	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
423	cmp	tag, tmp3		/* compare tags */		;\
424	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
425	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
426	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
427	cmp	tmp1, tmp3		/* if not successful */		;\
428	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
429	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
430label/**/2:
431
432#else /* UTSB_PHYS */
433
434#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
435	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
436	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
437label/**/1:								;\
438	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
439	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
440	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
441	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
442	cmp	tag, tmp3		/* compare tags */		;\
443	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
444	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
445	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
446	cmp	tmp1, tmp3		/* if not successful */		;\
447	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
448	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
449label/**/2:
450
451#endif /* UTSB_PHYS */
452
453#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
454#error	- TSB_SOFTSZ_MASK too small
455#endif
456
457
458/*
459 * An implementation of setx which will be hot patched at run time.
460 * since it is being hot patched, there is no value passed in.
461 * Thus, essentially we are implementing
462 *	setx value, tmp, dest
463 * where value is RUNTIME_PATCH (aka 0) in this case.
464 */
465#define	RUNTIME_PATCH_SETX(dest, tmp)					\
466	sethi	%hh(RUNTIME_PATCH), tmp					;\
467	sethi	%lm(RUNTIME_PATCH), dest				;\
468	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
469	or	dest, %lo(RUNTIME_PATCH), dest				;\
470	sllx	tmp, 32, tmp						;\
471	nop				/* for perf reasons */		;\
472	or	tmp, dest, dest		/* contents of patched value */
473
474#endif /* lint */
475
476
477#if defined (lint)
478
479/*
480 * sfmmu related subroutines
481 */
482uint_t
483sfmmu_disable_intrs()
484{ return(0); }
485
486/* ARGSUSED */
487void
488sfmmu_enable_intrs(uint_t pstate_save)
489{}
490
491/* ARGSUSED */
492int
493sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
494{ return(0); }
495
496/*
497 * Use cas, if tte has changed underneath us then reread and try again.
498 * In the case of a retry, it will update sttep with the new original.
499 */
500/* ARGSUSED */
501int
502sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
503{ return(0); }
504
505/*
506 * Use cas, if tte has changed underneath us then return 1, else return 0
507 */
508/* ARGSUSED */
509int
510sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
511{ return(0); }
512
513/* ARGSUSED */
514void
515sfmmu_copytte(tte_t *sttep, tte_t *dttep)
516{}
517
518/*ARGSUSED*/
519struct tsbe *
520sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
521{ return(0); }
522
523/*ARGSUSED*/
524uint64_t
525sfmmu_make_tsbtag(caddr_t va)
526{ return(0); }
527
528#else	/* lint */
529
530	.seg	".data"
531	.global	sfmmu_panic1
532sfmmu_panic1:
533	.asciz	"sfmmu_asm: interrupts already disabled"
534
535	.global	sfmmu_panic3
536sfmmu_panic3:
537	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
538
539	.global	sfmmu_panic4
540sfmmu_panic4:
541	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
542
543	.global	sfmmu_panic5
544sfmmu_panic5:
545	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
546
547	.global	sfmmu_panic6
548sfmmu_panic6:
549	.asciz	"sfmmu_asm: interrupts not disabled"
550
551	.global	sfmmu_panic7
552sfmmu_panic7:
553	.asciz	"sfmmu_asm: kernel as"
554
555	.global	sfmmu_panic8
556sfmmu_panic8:
557	.asciz	"sfmmu_asm: gnum is zero"
558
559	.global	sfmmu_panic9
560sfmmu_panic9:
561	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
562
563	.global	sfmmu_panic10
564sfmmu_panic10:
565	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
566
567	.global	sfmmu_panic11
568sfmmu_panic11:
569	.asciz	"sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
570
571        ENTRY(sfmmu_disable_intrs)
572        rdpr    %pstate, %o0
573#ifdef DEBUG
574	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
575#endif /* DEBUG */
576        retl
577          wrpr   %o0, PSTATE_IE, %pstate
578        SET_SIZE(sfmmu_disable_intrs)
579
580	ENTRY(sfmmu_enable_intrs)
581        retl
582          wrpr    %g0, %o0, %pstate
583        SET_SIZE(sfmmu_enable_intrs)
584
585/*
586 * This routine is called both by resume() and sfmmu_get_ctx() to
587 * allocate a new context for the process on a MMU.
588 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
589 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
590 * is the case when sfmmu_alloc_ctx is called from resume().
591 *
592 * The caller must disable interrupts before entering this routine.
593 * To reduce ctx switch overhead, the code contains both 'fast path' and
594 * 'slow path' code. The fast path code covers the common case where only
595 * a quick check is needed and the real ctx allocation is not required.
596 * It can be done without holding the per-process (PP) lock.
597 * The 'slow path' code must be protected by the PP Lock and performs ctx
598 * allocation.
599 * Hardware context register and HAT mmu cnum are updated accordingly.
600 *
601 * %o0 - sfmmup
602 * %o1 - allocflag
603 * %o2 - CPU
604 * %o3 - sfmmu private/shared flag
605 *
606 * ret - 0: no ctx is allocated
607 *       1: a ctx is allocated
608 */
609        ENTRY_NP(sfmmu_alloc_ctx)
610
611#ifdef DEBUG
612	sethi   %hi(ksfmmup), %g1
613	ldx     [%g1 + %lo(ksfmmup)], %g1
614	cmp     %g1, %o0
615	bne,pt   %xcc, 0f
616	  nop
617
618	sethi   %hi(panicstr), %g1		! if kernel as, panic
619        ldx     [%g1 + %lo(panicstr)], %g1
620        tst     %g1
621        bnz,pn  %icc, 7f
622          nop
623
624	sethi	%hi(sfmmu_panic7), %o0
625	call	panic
626	  or	%o0, %lo(sfmmu_panic7), %o0
627
6287:
629	retl
630	  mov	%g0, %o0			! %o0 = ret = 0
631
6320:
633	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
634#endif /* DEBUG */
635
636	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
637
638	! load global mmu_ctxp info
639	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
640        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
641
642	! load global mmu_ctxp gnum
643	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
644
645#ifdef DEBUG
646	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
647	bne,pt	%xcc, 3f
648	  nop
649
650	sethi   %hi(panicstr), %g1	! test if panicstr is already set
651        ldx     [%g1 + %lo(panicstr)], %g1
652        tst     %g1
653        bnz,pn  %icc, 1f
654          nop
655
656	sethi	%hi(sfmmu_panic8), %o0
657	call	panic
658	  or	%o0, %lo(sfmmu_panic8), %o0
6591:
660	retl
661	  mov	%g0, %o0			! %o0 = ret = 0
6623:
663#endif
664
665	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
666
667	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
668	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
669
670	/*
671	 * %g5 = sfmmu gnum returned
672	 * %g6 = sfmmu cnum returned
673	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
674	 * %g4 = scratch
675	 *
676	 * Fast path code, do a quick check.
677	 */
678	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
679
680	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
681	bne,pt	%icc, 1f			! valid hat cnum, check gnum
682	  nop
683
684	! cnum == INVALID, check allocflag
685	mov	%g0, %g4	! %g4 = ret = 0
686	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
687	  mov	%g6, %o1
688
689	! (invalid HAT cnum) && (allocflag == 1)
690	ba,pt	%icc, 2f
691	  nop
6921:
693	! valid HAT cnum, check gnum
694	cmp	%g5, %o4
695	mov	1, %g4				!%g4 = ret = 1
696	be,a,pt	%icc, 8f			! gnum unchanged, go to done
697	  mov	%g6, %o1
698
6992:
700	/*
701	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
702	 * followed by the 'slow path' code.
703	 */
704	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7053:
706	brz	%g3, 5f
707	  nop
7084:
709	brnz,a,pt       %g3, 4b				! spin if lock is 1
710	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
711	ba	%xcc, 3b				! retry the lock
712	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
713
7145:
715	membar  #LoadLoad
716	/*
717	 * %g5 = sfmmu gnum returned
718	 * %g6 = sfmmu cnum returned
719	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
720	 * %g4 = scratch
721	 */
722	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
723
724	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
725	bne,pt	%icc, 1f			! valid hat cnum, check gnum
726	  nop
727
728	! cnum == INVALID, check allocflag
729	mov	%g0, %g4	! %g4 = ret = 0
730	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
731	  mov	%g6, %o1
732
733	! (invalid HAT cnum) && (allocflag == 1)
734	ba,pt	%icc, 6f
735	  nop
7361:
737	! valid HAT cnum, check gnum
738	cmp	%g5, %o4
739	mov	1, %g4				! %g4 = ret  = 1
740	be,a,pt	%icc, 2f			! gnum unchanged, go to done
741	  mov	%g6, %o1
742
743	ba,pt	%icc, 6f
744	  nop
7452:
746	membar  #LoadStore|#StoreStore
747	ba,pt %icc, 8f
748	  clrb  [%o0 + SFMMU_CTX_LOCK]
7496:
750	/*
751	 * We get here if we do not have a valid context, or
752	 * the HAT gnum does not match global gnum. We hold
753	 * sfmmu_ctx_lock spinlock. Allocate that context.
754	 *
755	 * %o3 = mmu_ctxp
756	 */
757	add	%o3, MMU_CTX_CNUM, %g3
758	ld	[%o3 + MMU_CTX_NCTXS], %g4
759
760	/*
761         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
762         * %g3 = mmu cnum address
763	 * %g4 = mmu nctxs
764	 *
765	 * %o0 = sfmmup
766	 * %o1 = mmu current cnum value (used as new cnum)
767	 * %o4 = mmu gnum
768	 *
769	 * %o5 = scratch
770	 */
771	ld	[%g3], %o1
7720:
773	cmp	%o1, %g4
774	bl,a,pt %icc, 1f
775	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
776
777	/*
778	 * cnum reachs max, bail, so wrap around can be performed later.
779	 */
780	set	INVALID_CONTEXT, %o1
781	mov	%g0, %g4		! %g4 = ret = 0
782
783	membar  #LoadStore|#StoreStore
784	ba,pt	%icc, 8f
785	  clrb	[%o0 + SFMMU_CTX_LOCK]
7861:
787	! %g3 = addr of mmu_ctxp->cnum
788	! %o5 = mmu_ctxp->cnum + 1
789	cas	[%g3], %o1, %o5
790	cmp	%o1, %o5
791	bne,a,pn %xcc, 0b	! cas failed
792	  ld	[%g3], %o1
793
794#ifdef DEBUG
795        set	MAX_SFMMU_CTX_VAL, %o5
796	cmp	%o1, %o5
797	ble,pt %icc, 2f
798	  nop
799
800	sethi	%hi(sfmmu_panic9), %o0
801	call	panic
802	  or	%o0, %lo(sfmmu_panic9), %o0
8032:
804#endif
805	! update hat gnum and cnum
806	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
807	or	%o4, %o1, %o4
808	stx	%o4, [%g2 + SFMMU_CTXS]
809
810	membar  #LoadStore|#StoreStore
811	clrb	[%o0 + SFMMU_CTX_LOCK]
812
813	mov	1, %g4			! %g4 = ret = 1
8148:
815	/*
816	 * program the secondary context register
817	 *
818	 * %o1 = cnum
819	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
820	 */
821
822	/*
823	 * When we come here and context is invalid, we want to set both
824	 * private and shared ctx regs to INVALID. In order to
825	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
826	 * so that private ctx reg will be set to invalid.
827	 * Note that on sun4v values written to private context register are
828	 * automatically written to corresponding shared context register as
829	 * well. On sun4u SET_SECCTX() will invalidate shared context register
830	 * when it sets a private secondary context register.
831	 */
832
833	cmp	%o1, INVALID_CONTEXT
834	be,a,pn	%icc, 9f
835	  clr	%g1
8369:
837
838#ifdef	sun4u
839	ldub	[%o0 + SFMMU_CEXT], %o2
840	sll	%o2, CTXREG_EXT_SHIFT, %o2
841	or	%o1, %o2, %o1
842#endif /* sun4u */
843
844	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
845
846        retl
847          mov   %g4, %o0                        ! %o0 = ret
848
849	SET_SIZE(sfmmu_alloc_ctx)
850
851	ENTRY_NP(sfmmu_modifytte)
852	ldx	[%o2], %g3			/* current */
853	ldx	[%o0], %g1			/* original */
8542:
855	ldx	[%o1], %g2			/* modified */
856	cmp	%g2, %g3			/* is modified = current? */
857	be,a,pt	%xcc,1f				/* yes, don't write */
858	stx	%g3, [%o0]			/* update new original */
859	casx	[%o2], %g1, %g2
860	cmp	%g1, %g2
861	be,pt	%xcc, 1f			/* cas succeeded - return */
862	  nop
863	ldx	[%o2], %g3			/* new current */
864	stx	%g3, [%o0]			/* save as new original */
865	ba,pt	%xcc, 2b
866	  mov	%g3, %g1
8671:	retl
868	membar	#StoreLoad
869	SET_SIZE(sfmmu_modifytte)
870
871	ENTRY_NP(sfmmu_modifytte_try)
872	ldx	[%o1], %g2			/* modified */
873	ldx	[%o2], %g3			/* current */
874	ldx	[%o0], %g1			/* original */
875	cmp	%g3, %g2			/* is modified = current? */
876	be,a,pn %xcc,1f				/* yes, don't write */
877	mov	0, %o1				/* as if cas failed. */
878
879	casx	[%o2], %g1, %g2
880	membar	#StoreLoad
881	cmp	%g1, %g2
882	movne	%xcc, -1, %o1			/* cas failed. */
883	move	%xcc, 1, %o1			/* cas succeeded. */
8841:
885	stx	%g2, [%o0]			/* report "current" value */
886	retl
887	mov	%o1, %o0
888	SET_SIZE(sfmmu_modifytte_try)
889
890	ENTRY_NP(sfmmu_copytte)
891	ldx	[%o0], %g1
892	retl
893	stx	%g1, [%o1]
894	SET_SIZE(sfmmu_copytte)
895
896
897	/*
898	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
899	 * %o0 = TSB base address (in), pointer to TSB entry (out)
900	 * %o1 = vaddr (in)
901	 * %o2 = vpshift (in)
902	 * %o3 = tsb size code (in)
903	 * %o4 = scratch register
904	 */
905	ENTRY_NP(sfmmu_get_tsbe)
906	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
907	retl
908	nop
909	SET_SIZE(sfmmu_get_tsbe)
910
911	/*
912	 * Return a TSB tag for the given va.
913	 * %o0 = va (in/clobbered)
914	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
915	 */
916	ENTRY_NP(sfmmu_make_tsbtag)
917	retl
918	srln	%o0, TTARGET_VA_SHIFT, %o0
919	SET_SIZE(sfmmu_make_tsbtag)
920
921#endif /* lint */
922
923/*
924 * Other sfmmu primitives
925 */
926
927
928#if defined (lint)
929void
930sfmmu_patch_ktsb(void)
931{
932}
933
934void
935sfmmu_kpm_patch_tlbm(void)
936{
937}
938
939void
940sfmmu_kpm_patch_tsbm(void)
941{
942}
943
944void
945sfmmu_patch_shctx(void)
946{
947}
948
949void
950sfmmu_patch_pgsz_reg(void)
951{
952}
953
954/* ARGSUSED */
955void
956sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
957{
958}
959
960/* ARGSUSED */
961void
962sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
963{
964}
965
966/* ARGSUSED */
967void
968sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
969{
970}
971
972/* ARGSUSED */
973void
974sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
975{
976}
977
978#else /* lint */
979
980#define	I_SIZE		4
981
982	ENTRY_NP(sfmmu_fix_ktlb_traptable)
983	/*
984	 * %o0 = start of patch area
985	 * %o1 = size code of TSB to patch
986	 * %o3 = scratch
987	 */
988	/* fix sll */
989	ld	[%o0], %o3			/* get sll */
990	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
991	st	%o3, [%o0]			/* write sll */
992	flush	%o0
993	/* fix srl */
994	add	%o0, I_SIZE, %o0		/* goto next instr. */
995	ld	[%o0], %o3			/* get srl */
996	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
997	st	%o3, [%o0]			/* write srl */
998	retl
999	flush	%o0
1000	SET_SIZE(sfmmu_fix_ktlb_traptable)
1001
1002	ENTRY_NP(sfmmu_fixup_ktsbbase)
1003	/*
1004	 * %o0 = start of patch area
1005	 * %o5 = kernel virtual or physical tsb base address
1006	 * %o2, %o3 are used as scratch registers.
1007	 */
1008	/* fixup sethi instruction */
1009	ld	[%o0], %o3
1010	srl	%o5, 10, %o2			! offset is bits 32:10
1011	or	%o3, %o2, %o3			! set imm22
1012	st	%o3, [%o0]
1013	/* fixup offset of lduw/ldx */
1014	add	%o0, I_SIZE, %o0		! next instr
1015	ld	[%o0], %o3
1016	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
1017	or	%o3, %o2, %o3
1018	st	%o3, [%o0]
1019	retl
1020	flush	%o0
1021	SET_SIZE(sfmmu_fixup_ktsbbase)
1022
1023	ENTRY_NP(sfmmu_fixup_setx)
1024	/*
1025	 * %o0 = start of patch area
1026	 * %o4 = 64 bit value to patch
1027	 * %o2, %o3 are used as scratch registers.
1028	 *
1029	 * Note: Assuming that all parts of the instructions which need to be
1030	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1031	 *
1032	 * Note the implementation of setx which is being patched is as follows:
1033	 *
1034	 * sethi   %hh(RUNTIME_PATCH), tmp
1035	 * sethi   %lm(RUNTIME_PATCH), dest
1036	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1037	 * or      dest, %lo(RUNTIME_PATCH), dest
1038	 * sllx    tmp, 32, tmp
1039	 * nop
1040	 * or      tmp, dest, dest
1041	 *
1042	 * which differs from the implementation in the
1043	 * "SPARC Architecture Manual"
1044	 */
1045	/* fixup sethi instruction */
1046	ld	[%o0], %o3
1047	srlx	%o4, 42, %o2			! bits [63:42]
1048	or	%o3, %o2, %o3			! set imm22
1049	st	%o3, [%o0]
1050	/* fixup sethi instruction */
1051	add	%o0, I_SIZE, %o0		! next instr
1052	ld	[%o0], %o3
1053	sllx	%o4, 32, %o2			! clear upper bits
1054	srlx	%o2, 42, %o2			! bits [31:10]
1055	or	%o3, %o2, %o3			! set imm22
1056	st	%o3, [%o0]
1057	/* fixup or instruction */
1058	add	%o0, I_SIZE, %o0		! next instr
1059	ld	[%o0], %o3
1060	srlx	%o4, 32, %o2			! bits [63:32]
1061	and	%o2, 0x3ff, %o2			! bits [41:32]
1062	or	%o3, %o2, %o3			! set imm
1063	st	%o3, [%o0]
1064	/* fixup or instruction */
1065	add	%o0, I_SIZE, %o0		! next instr
1066	ld	[%o0], %o3
1067	and	%o4, 0x3ff, %o2			! bits [9:0]
1068	or	%o3, %o2, %o3			! set imm
1069	st	%o3, [%o0]
1070	retl
1071	flush	%o0
1072	SET_SIZE(sfmmu_fixup_setx)
1073
1074	ENTRY_NP(sfmmu_fixup_or)
1075	/*
1076	 * %o0 = start of patch area
1077	 * %o4 = 32 bit value to patch
1078	 * %o2, %o3 are used as scratch registers.
1079	 * Note: Assuming that all parts of the instructions which need to be
1080	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1081	 */
1082	ld	[%o0], %o3
1083	and	%o4, 0x3ff, %o2			! bits [9:0]
1084	or	%o3, %o2, %o3			! set imm
1085	st	%o3, [%o0]
1086	retl
1087	flush	%o0
1088	SET_SIZE(sfmmu_fixup_or)
1089
1090	ENTRY_NP(sfmmu_fixup_shiftx)
1091	/*
1092	 * %o0 = start of patch area
1093	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1094	 * %o2, %o3 are used as scratch registers.
1095	 *
1096	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1097	 * so we do a simple add.  The caller must be careful to prevent
1098	 * overflow, which could easily occur if the initial value is nonzero!
1099	 */
1100	ld	[%o0], %o3			! %o3 = instruction to patch
1101	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1102	add	%o2, %o4, %o2			! %o2 = new imm value
1103	andn	%o3, 0x3f, %o3			! clear old imm value
1104	and	%o2, 0x3f, %o2			! truncate new imm value
1105	or	%o3, %o2, %o3			! set new imm value
1106	st	%o3, [%o0]			! store updated instruction
1107	retl
1108	flush	%o0
1109	SET_SIZE(sfmmu_fixup_shiftx)
1110
1111	ENTRY_NP(sfmmu_fixup_mmu_asi)
1112	/*
1113	 * Patch imm_asi of all ldda instructions in the MMU
1114	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1115	 * starting from the itlb miss handler (trap 0x64).
1116	 * %o0 = address of tt[0,1]_itlbmiss
1117	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1118	 * %o3 = number of instructions to search
1119	 * %o4 = reserved by caller: called from leaf routine
1120	 */
11211:	ldsw	[%o0], %o2			! load instruction to %o2
1122	brgez,pt %o2, 2f
1123	  srl	%o2, 30, %o5
1124	btst	1, %o5				! test bit 30; skip if not set
1125	bz,pt	%icc, 2f
1126	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1127	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1128	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1129	brnz,pt	%o5, 2f				! skip if not a match
1130	  or	%o2, %o1, %o2			! or in imm_asi
1131	st	%o2, [%o0]			! write patched instruction
11322:	dec	%o3
1133	brnz,a,pt %o3, 1b			! loop until we're done
1134	  add	%o0, I_SIZE, %o0
1135	retl
1136	flush	%o0
1137	SET_SIZE(sfmmu_fixup_mmu_asi)
1138
1139	/*
1140	 * Patch immediate ASI used to access the TSB in the
1141	 * trap table.
1142	 * inputs: %o0 = value of ktsb_phys
1143	 */
1144	ENTRY_NP(sfmmu_patch_mmu_asi)
1145	mov	%o7, %o4			! save return pc in %o4
1146	mov	ASI_QUAD_LDD_PHYS, %o3		! set QUAD_LDD_PHYS by default
1147
1148#ifdef sun4v
1149
1150	/*
1151	 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
1152	 */
1153
1154	brnz,pt %o0, do_patch
1155	nop
1156
1157	sethi	%hi(sfmmu_panic11), %o0
1158	call	panic
1159	  or	%o0, %lo(sfmmu_panic11), %o0
1160do_patch:
1161
1162#else /* sun4v */
1163	/*
1164	 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
1165	 * Note that ASI_NQUAD_LD is not defined/used for sun4v
1166	 */
1167	movrz	%o0, ASI_NQUAD_LD, %o3
1168
1169#endif /* sun4v */
1170
1171	sll	%o3, 5, %o1			! imm_asi offset
1172	mov	6, %o3				! number of instructions
1173	sethi	%hi(dktsb), %o0			! to search
1174	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1175	  or	%o0, %lo(dktsb), %o0
1176	mov	6, %o3				! number of instructions
1177	sethi	%hi(dktsb4m), %o0		! to search
1178	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1179	  or	%o0, %lo(dktsb4m), %o0
1180	mov	6, %o3				! number of instructions
1181	sethi	%hi(iktsb), %o0			! to search
1182	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1183	  or	%o0, %lo(iktsb), %o0
1184	mov	6, %o3				! number of instructions
1185	sethi	%hi(iktsb4m), %o0		! to search
1186	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1187	  or	%o0, %lo(iktsb4m), %o0
1188	mov	%o4, %o7			! retore return pc -- leaf
1189	retl
1190	nop
1191	SET_SIZE(sfmmu_patch_mmu_asi)
1192
1193
1194	ENTRY_NP(sfmmu_patch_ktsb)
1195	/*
1196	 * We need to fix iktsb, dktsb, et. al.
1197	 */
1198	save	%sp, -SA(MINFRAME), %sp
1199	set	ktsb_phys, %o1
1200	ld	[%o1], %o4
1201	set	ktsb_base, %o5
1202	set	ktsb4m_base, %l1
1203	brz,pt	%o4, 1f
1204	  nop
1205	set	ktsb_pbase, %o5
1206	set	ktsb4m_pbase, %l1
12071:
1208	sethi	%hi(ktsb_szcode), %o1
1209	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1210
1211	sethi	%hi(iktsb), %o0
1212	call	sfmmu_fix_ktlb_traptable
1213	  or	%o0, %lo(iktsb), %o0
1214
1215	sethi	%hi(dktsb), %o0
1216	call	sfmmu_fix_ktlb_traptable
1217	  or	%o0, %lo(dktsb), %o0
1218
1219	sethi	%hi(ktsb4m_szcode), %o1
1220	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1221
1222	sethi	%hi(iktsb4m), %o0
1223	call	sfmmu_fix_ktlb_traptable
1224	  or	%o0, %lo(iktsb4m), %o0
1225
1226	sethi	%hi(dktsb4m), %o0
1227	call	sfmmu_fix_ktlb_traptable
1228	  or	%o0, %lo(dktsb4m), %o0
1229
1230#ifndef sun4v
1231	mov	ASI_N, %o2
1232	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1233	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1234	sethi	%hi(tsb_kernel_patch_asi), %o0
1235	call	sfmmu_fixup_or
1236	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1237#endif /* !sun4v */
1238
1239	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1240
1241	sethi	%hi(dktsbbase), %o0
1242	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1243	  or	%o0, %lo(dktsbbase), %o0
1244
1245	sethi	%hi(iktsbbase), %o0
1246	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1247	  or	%o0, %lo(iktsbbase), %o0
1248
1249	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1250	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1251	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1252
1253#ifdef sun4v
1254	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1255	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1256	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1257#endif /* sun4v */
1258
1259	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1260
1261	sethi	%hi(dktsb4mbase), %o0
1262	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1263	  or	%o0, %lo(dktsb4mbase), %o0
1264
1265	sethi	%hi(iktsb4mbase), %o0
1266	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1267	  or	%o0, %lo(iktsb4mbase), %o0
1268
1269	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1270	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1271	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1272
1273#ifdef sun4v
1274	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1275	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1276	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1277#endif /* sun4v */
1278
1279	set	ktsb_szcode, %o4
1280	ld	[%o4], %o4
1281	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1282	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1283	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1284
1285#ifdef sun4v
1286	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1287	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1288	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1289#endif /* sun4v */
1290
1291	set	ktsb4m_szcode, %o4
1292	ld	[%o4], %o4
1293	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1294	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1295	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1296
1297#ifdef sun4v
1298	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1299	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1300	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1301#endif /* sun4v */
1302
1303	ret
1304	restore
1305	SET_SIZE(sfmmu_patch_ktsb)
1306
1307	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1308	/*
1309	 * Fixup trap handlers in common segkpm case.  This is reserved
1310	 * for future use should kpm TSB be changed to be other than the
1311	 * kernel TSB.
1312	 */
1313	retl
1314	nop
1315	SET_SIZE(sfmmu_kpm_patch_tlbm)
1316
1317	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1318	/*
1319	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1320	 * in the case where we are using large pages for
1321	 * seg_kpm (and hence must probe the second TSB for
1322	 * seg_kpm VAs)
1323	 */
1324	set	dktsb4m_kpmcheck_small, %o0
1325	MAKE_NOP_INSTR(%o1)
1326	st	%o1, [%o0]
1327	flush	%o0
1328	retl
1329	nop
1330	SET_SIZE(sfmmu_kpm_patch_tsbm)
1331
1332	ENTRY_NP(sfmmu_patch_utsb)
1333#ifdef UTSB_PHYS
1334	retl
1335	nop
1336#else /* UTSB_PHYS */
1337	/*
1338	 * We need to hot patch utsb_vabase and utsb4m_vabase
1339	 */
1340	save	%sp, -SA(MINFRAME), %sp
1341
1342	/* patch value of utsb_vabase */
1343	set	utsb_vabase, %o1
1344	ldx	[%o1], %o4
1345	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1346	call	sfmmu_fixup_setx
1347	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1348	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1349	call	sfmmu_fixup_setx
1350	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1351	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1352	call	sfmmu_fixup_setx
1353	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1354
1355	/* patch value of utsb4m_vabase */
1356	set	utsb4m_vabase, %o1
1357	ldx	[%o1], %o4
1358	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1359	call	sfmmu_fixup_setx
1360	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1361	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1362	call	sfmmu_fixup_setx
1363	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1364	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1365	call	sfmmu_fixup_setx
1366	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1367
1368	/*
1369	 * Patch TSB base register masks and shifts if needed.
1370	 * By default the TSB base register contents are set up for 4M slab.
1371	 * If we're using a smaller slab size and reserved VA range we need
1372	 * to patch up those values here.
1373	 */
1374	set	tsb_slab_shift, %o1
1375	set	MMU_PAGESHIFT4M, %o4
1376	lduw	[%o1], %o3
1377	subcc	%o4, %o3, %o4
1378	bz,pt	%icc, 1f
1379	  /* delay slot safe */
1380
1381	/* patch reserved VA range size if needed. */
1382	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1383	call	sfmmu_fixup_shiftx
1384	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1385	call	sfmmu_fixup_shiftx
1386	  add	%o0, I_SIZE, %o0
1387	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1388	call	sfmmu_fixup_shiftx
1389	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1390	call	sfmmu_fixup_shiftx
1391	  add	%o0, I_SIZE, %o0
13921:
1393	/* patch TSBREG_VAMASK used to set up TSB base register */
1394	set	tsb_slab_mask, %o1
1395	ldx	[%o1], %o4
1396	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1397	call	sfmmu_fixup_or
1398	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1399	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1400	call	sfmmu_fixup_or
1401	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1402
1403	ret
1404	restore
1405#endif /* UTSB_PHYS */
1406	SET_SIZE(sfmmu_patch_utsb)
1407
1408	ENTRY_NP(sfmmu_patch_shctx)
1409#ifdef sun4u
1410	retl
1411	  nop
1412#else /* sun4u */
1413	set	sfmmu_shctx_cpu_mondo_patch, %o0
1414	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1415	st	%o1, [%o0]
1416	flush	%o0
1417	MAKE_NOP_INSTR(%o1)
1418	add	%o0, I_SIZE, %o0	! next instr
1419	st	%o1, [%o0]
1420	flush	%o0
1421
1422	set	sfmmu_shctx_user_rtt_patch, %o0
1423	st      %o1, [%o0]		! nop 1st instruction
1424	flush	%o0
1425	add     %o0, I_SIZE, %o0
1426	st      %o1, [%o0]		! nop 2nd instruction
1427	flush	%o0
1428	add     %o0, I_SIZE, %o0
1429	st      %o1, [%o0]		! nop 3rd instruction
1430	flush	%o0
1431	add     %o0, I_SIZE, %o0
1432	st      %o1, [%o0]		! nop 4th instruction
1433	flush	%o0
1434	add     %o0, I_SIZE, %o0
1435	st      %o1, [%o0]		! nop 5th instruction
1436	flush	%o0
1437	add     %o0, I_SIZE, %o0
1438	st      %o1, [%o0]		! nop 6th instruction
1439	retl
1440	flush	%o0
1441#endif /* sun4u */
1442	SET_SIZE(sfmmu_patch_shctx)
1443
1444	ENTRY_NP(sfmmu_patch_pgsz_reg)
1445#ifdef sun4u
1446	retl
1447	  nop
1448#else /* sun4u */
1449	set	sfmmu_pgsz_load_mmustate_patch, %o0
1450	MAKE_NOP_INSTR(%o1)
1451	st	%o1, [%o0]
1452	retl
1453	flush	%o0
1454#endif /* sun4u */
1455	SET_SIZE(sfmmu_patch_pgsz_reg)
1456
1457	/*
1458	 * Routine that loads an entry into a tsb using virtual addresses.
1459	 * Locking is required since all cpus can use the same TSB.
1460	 * Note that it is no longer required to have a valid context
1461	 * when calling this function.
1462	 */
1463	ENTRY_NP(sfmmu_load_tsbe)
1464	/*
1465	 * %o0 = pointer to tsbe to load
1466	 * %o1 = tsb tag
1467	 * %o2 = virtual pointer to TTE
1468	 * %o3 = 1 if physical address in %o0 else 0
1469	 */
1470	rdpr	%pstate, %o5
1471#ifdef DEBUG
1472	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1473#endif /* DEBUG */
1474
1475	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1476
1477	SETUP_TSB_ASI(%o3, %g3)
1478	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
1479
1480	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1481
1482	retl
1483	membar	#StoreStore|#StoreLoad
1484	SET_SIZE(sfmmu_load_tsbe)
1485
1486	/*
1487	 * Flush TSB of a given entry if the tag matches.
1488	 */
1489	ENTRY(sfmmu_unload_tsbe)
1490	/*
1491	 * %o0 = pointer to tsbe to be flushed
1492	 * %o1 = tag to match
1493	 * %o2 = 1 if physical address in %o0 else 0
1494	 */
1495	SETUP_TSB_ASI(%o2, %g1)
1496	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1497	retl
1498	membar	#StoreStore|#StoreLoad
1499	SET_SIZE(sfmmu_unload_tsbe)
1500
1501	/*
1502	 * Routine that loads a TTE into the kpm TSB from C code.
1503	 * Locking is required since kpm TSB is shared among all CPUs.
1504	 */
1505	ENTRY_NP(sfmmu_kpm_load_tsb)
1506	/*
1507	 * %o0 = vaddr
1508	 * %o1 = ttep
1509	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1510	 */
1511	rdpr	%pstate, %o5			! %o5 = saved pstate
1512#ifdef DEBUG
1513	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1514#endif /* DEBUG */
1515	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1516
1517#ifndef sun4v
1518	sethi	%hi(ktsb_phys), %o4
1519	mov	ASI_N, %o3
1520	ld	[%o4 + %lo(ktsb_phys)], %o4
1521	movrnz	%o4, ASI_MEM, %o3
1522	mov	%o3, %asi
1523#endif /* !sun4v */
1524	mov	%o0, %g1			! %g1 = vaddr
1525
1526	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1527	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1528	/* %g2 = tsbep, %g1 clobbered */
1529
1530	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1531	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1532	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
1533
1534	wrpr	%g0, %o5, %pstate		! enable interrupts
1535	retl
1536	  membar #StoreStore|#StoreLoad
1537	SET_SIZE(sfmmu_kpm_load_tsb)
1538
1539	/*
1540	 * Routine that shoots down a TTE in the kpm TSB or in the
1541	 * kernel TSB depending on virtpg. Locking is required since
1542	 * kpm/kernel TSB is shared among all CPUs.
1543	 */
1544	ENTRY_NP(sfmmu_kpm_unload_tsb)
1545	/*
1546	 * %o0 = vaddr
1547	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1548	 */
1549#ifndef sun4v
1550	sethi	%hi(ktsb_phys), %o4
1551	mov	ASI_N, %o3
1552	ld	[%o4 + %lo(ktsb_phys)], %o4
1553	movrnz	%o4, ASI_MEM, %o3
1554	mov	%o3, %asi
1555#endif /* !sun4v */
1556	mov	%o0, %g1			! %g1 = vaddr
1557
1558	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1559	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1560	/* %g2 = tsbep, %g1 clobbered */
1561
1562	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1563	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1564	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1565
1566	retl
1567	  membar	#StoreStore|#StoreLoad
1568	SET_SIZE(sfmmu_kpm_unload_tsb)
1569
1570#endif /* lint */
1571
1572
1573#if defined (lint)
1574
1575/*ARGSUSED*/
1576pfn_t
1577sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1578{ return(0); }
1579
1580#else /* lint */
1581
1582	ENTRY_NP(sfmmu_ttetopfn)
1583	ldx	[%o0], %g1			/* read tte */
1584	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1585	/*
1586	 * g1 = pfn
1587	 */
1588	retl
1589	mov	%g1, %o0
1590	SET_SIZE(sfmmu_ttetopfn)
1591
1592#endif /* !lint */
1593
1594/*
1595 * These macros are used to update global sfmmu hme hash statistics
1596 * in perf critical paths. It is only enabled in debug kernels or
1597 * if SFMMU_STAT_GATHER is defined
1598 */
1599#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1600#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1601	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1602	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1603	cmp	tmp1, hatid						;\
1604	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1605	set	sfmmu_global_stat, tmp1					;\
1606	add	tmp1, tmp2, tmp1					;\
1607	ld	[tmp1], tmp2						;\
1608	inc	tmp2							;\
1609	st	tmp2, [tmp1]
1610
1611#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1612	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1613	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1614	cmp	tmp1, hatid						;\
1615	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1616	set	sfmmu_global_stat, tmp1					;\
1617	add	tmp1, tmp2, tmp1					;\
1618	ld	[tmp1], tmp2						;\
1619	inc	tmp2							;\
1620	st	tmp2, [tmp1]
1621
1622
1623#else /* DEBUG || SFMMU_STAT_GATHER */
1624
1625#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1626
1627#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1628
1629#endif  /* DEBUG || SFMMU_STAT_GATHER */
1630
1631/*
1632 * This macro is used to update global sfmmu kstas in non
1633 * perf critical areas so they are enabled all the time
1634 */
1635#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1636	sethi	%hi(sfmmu_global_stat), tmp1				;\
1637	add	tmp1, statname, tmp1					;\
1638	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1639	inc	tmp2							;\
1640	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1641
1642/*
1643 * These macros are used to update per cpu stats in non perf
1644 * critical areas so they are enabled all the time
1645 */
1646#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1647	ld	[tsbarea + stat], tmp1					;\
1648	inc	tmp1							;\
1649	st	tmp1, [tsbarea + stat]
1650
1651/*
1652 * These macros are used to update per cpu stats in non perf
1653 * critical areas so they are enabled all the time
1654 */
1655#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1656	lduh	[tsbarea + stat], tmp1					;\
1657	inc	tmp1							;\
1658	stuh	tmp1, [tsbarea + stat]
1659
1660#if defined(KPM_TLBMISS_STATS_GATHER)
1661	/*
1662	 * Count kpm dtlb misses separately to allow a different
1663	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1664	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1665	 */
1666#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1667	brgez	tagacc, label	/* KPM VA? */				;\
1668	nop								;\
1669	CPU_INDEX(tmp1, tsbma)						;\
1670	sethi	%hi(kpmtsbm_area), tsbma				;\
1671	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1672	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1673	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1674	/* VA range check */						;\
1675	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1676	cmp	tagacc, val						;\
1677	blu,pn	%xcc, label						;\
1678	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1679	cmp	tagacc, tmp1						;\
1680	bgeu,pn	%xcc, label						;\
1681	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1682	inc	val							;\
1683	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1684label:
1685#else
1686#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1687#endif	/* KPM_TLBMISS_STATS_GATHER */
1688
1689#if defined (lint)
1690/*
1691 * The following routines are jumped to from the mmu trap handlers to do
1692 * the setting up to call systrap.  They are separate routines instead of
1693 * being part of the handlers because the handlers would exceed 32
1694 * instructions and since this is part of the slow path the jump
1695 * cost is irrelevant.
1696 */
1697void
1698sfmmu_pagefault(void)
1699{
1700}
1701
1702void
1703sfmmu_mmu_trap(void)
1704{
1705}
1706
1707void
1708sfmmu_window_trap(void)
1709{
1710}
1711
1712void
1713sfmmu_kpm_exception(void)
1714{
1715}
1716
1717#else /* lint */
1718
1719#ifdef	PTL1_PANIC_DEBUG
1720	.seg	".data"
1721	.global	test_ptl1_panic
1722test_ptl1_panic:
1723	.word	0
1724	.align	8
1725
1726	.seg	".text"
1727	.align	4
1728#endif	/* PTL1_PANIC_DEBUG */
1729
1730
1731	ENTRY_NP(sfmmu_pagefault)
1732	SET_GL_REG(1)
1733	USE_ALTERNATE_GLOBALS(%g5)
1734	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1735	rdpr	%tt, %g6
1736	cmp	%g6, FAST_IMMU_MISS_TT
1737	be,a,pn	%icc, 1f
1738	  mov	T_INSTR_MMU_MISS, %g3
1739	cmp	%g6, T_INSTR_MMU_MISS
1740	be,a,pn	%icc, 1f
1741	  mov	T_INSTR_MMU_MISS, %g3
1742	mov	%g5, %g2
1743	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1744	cmp	%g6, FAST_DMMU_MISS_TT
1745	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1746	cmp	%g6, T_DATA_MMU_MISS
1747	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1748
1749#ifdef  PTL1_PANIC_DEBUG
1750	/* check if we want to test the tl1 panic */
1751	sethi	%hi(test_ptl1_panic), %g4
1752	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1753	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1754	cmp	%g1, %g0
1755	bne,a,pn %icc, ptl1_panic
1756	  or	%g0, PTL1_BAD_DEBUG, %g1
1757#endif	/* PTL1_PANIC_DEBUG */
17581:
1759	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1760	/*
1761	 * g2 = tag access reg
1762	 * g3.l = type
1763	 * g3.h = 0
1764	 */
1765	sethi	%hi(trap), %g1
1766	or	%g1, %lo(trap), %g1
17672:
1768	ba,pt	%xcc, sys_trap
1769	  mov	-1, %g4
1770	SET_SIZE(sfmmu_pagefault)
1771
1772	ENTRY_NP(sfmmu_mmu_trap)
1773	SET_GL_REG(1)
1774	USE_ALTERNATE_GLOBALS(%g5)
1775	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1776	rdpr	%tt, %g6
1777	cmp	%g6, FAST_IMMU_MISS_TT
1778	be,a,pn	%icc, 1f
1779	  mov	T_INSTR_MMU_MISS, %g3
1780	cmp	%g6, T_INSTR_MMU_MISS
1781	be,a,pn	%icc, 1f
1782	  mov	T_INSTR_MMU_MISS, %g3
1783	mov	%g5, %g2
1784	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1785	cmp	%g6, FAST_DMMU_MISS_TT
1786	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1787	cmp	%g6, T_DATA_MMU_MISS
1788	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17891:
1790	/*
1791	 * g2 = tag access reg
1792	 * g3 = type
1793	 */
1794	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1795	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1796	ba,pt	%xcc, sys_trap
1797	  mov	-1, %g4
1798	/*NOTREACHED*/
1799	SET_SIZE(sfmmu_mmu_trap)
1800
1801	ENTRY_NP(sfmmu_suspend_tl)
1802	SET_GL_REG(1)
1803	USE_ALTERNATE_GLOBALS(%g5)
1804	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1805	rdpr	%tt, %g6
1806	cmp	%g6, FAST_IMMU_MISS_TT
1807	be,a,pn	%icc, 1f
1808	  mov	T_INSTR_MMU_MISS, %g3
1809	mov	%g5, %g2
1810	cmp	%g6, FAST_DMMU_MISS_TT
1811	move	%icc, T_DATA_MMU_MISS, %g3
1812	movne	%icc, T_DATA_PROT, %g3
18131:
1814	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1815	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1816	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1817	ba,pt	%xcc, sys_trap
1818	  mov	PIL_15, %g4
1819	/*NOTREACHED*/
1820	SET_SIZE(sfmmu_suspend_tl)
1821
1822	/*
1823	 * No %g registers in use at this point.
1824	 */
1825	ENTRY_NP(sfmmu_window_trap)
1826	rdpr	%tpc, %g1
1827#ifdef sun4v
1828#ifdef DEBUG
1829	/* We assume previous %gl was 1 */
1830	rdpr	%tstate, %g4
1831	srlx	%g4, TSTATE_GL_SHIFT, %g4
1832	and	%g4, TSTATE_GL_MASK, %g4
1833	cmp	%g4, 1
1834	bne,a,pn %icc, ptl1_panic
1835	  mov	PTL1_BAD_WTRAP, %g1
1836#endif /* DEBUG */
1837	/* user miss at tl>1. better be the window handler or user_rtt */
1838	/* in user_rtt? */
1839	set	rtt_fill_start, %g4
1840	cmp	%g1, %g4
1841	blu,pn %xcc, 6f
1842	 .empty
1843	set	rtt_fill_end, %g4
1844	cmp	%g1, %g4
1845	bgeu,pn %xcc, 6f
1846	 nop
1847	set	fault_rtt_fn1, %g1
1848	wrpr	%g0, %g1, %tnpc
1849	ba,a	7f
18506:
1851	! must save this trap level before descending trap stack
1852	! no need to save %tnpc, either overwritten or discarded
1853	! already got it: rdpr	%tpc, %g1
1854	rdpr	%tstate, %g6
1855	rdpr	%tt, %g7
1856	! trap level saved, go get underlying trap type
1857	rdpr	%tl, %g5
1858	sub	%g5, 1, %g3
1859	wrpr	%g3, %tl
1860	rdpr	%tt, %g2
1861	wrpr	%g5, %tl
1862	! restore saved trap level
1863	wrpr	%g1, %tpc
1864	wrpr	%g6, %tstate
1865	wrpr	%g7, %tt
1866#else /* sun4v */
1867	/* user miss at tl>1. better be the window handler */
1868	rdpr	%tl, %g5
1869	sub	%g5, 1, %g3
1870	wrpr	%g3, %tl
1871	rdpr	%tt, %g2
1872	wrpr	%g5, %tl
1873#endif /* sun4v */
1874	and	%g2, WTRAP_TTMASK, %g4
1875	cmp	%g4, WTRAP_TYPE
1876	bne,pn	%xcc, 1f
1877	 nop
1878	/* tpc should be in the trap table */
1879	set	trap_table, %g4
1880	cmp	%g1, %g4
1881	blt,pn %xcc, 1f
1882	 .empty
1883	set	etrap_table, %g4
1884	cmp	%g1, %g4
1885	bge,pn %xcc, 1f
1886	 .empty
1887	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1888	add	%g1, WTRAP_FAULTOFF, %g1
1889	wrpr	%g0, %g1, %tnpc
18907:
1891	/*
1892	 * some wbuf handlers will call systrap to resolve the fault
1893	 * we pass the trap type so they figure out the correct parameters.
1894	 * g5 = trap type, g6 = tag access reg
1895	 */
1896
1897	/*
1898	 * only use g5, g6, g7 registers after we have switched to alternate
1899	 * globals.
1900	 */
1901	SET_GL_REG(1)
1902	USE_ALTERNATE_GLOBALS(%g5)
1903	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1904	rdpr	%tt, %g7
1905	cmp	%g7, FAST_IMMU_MISS_TT
1906	be,a,pn	%icc, ptl1_panic
1907	  mov	PTL1_BAD_WTRAP, %g1
1908	cmp	%g7, T_INSTR_MMU_MISS
1909	be,a,pn	%icc, ptl1_panic
1910	  mov	PTL1_BAD_WTRAP, %g1
1911	mov	T_DATA_PROT, %g5
1912	cmp	%g7, FAST_DMMU_MISS_TT
1913	move	%icc, T_DATA_MMU_MISS, %g5
1914	cmp	%g7, T_DATA_MMU_MISS
1915	move	%icc, T_DATA_MMU_MISS, %g5
1916	! XXXQ AGS re-check out this one
1917	done
19181:
1919	CPU_PADDR(%g1, %g4)
1920	add	%g1, CPU_TL1_HDLR, %g1
1921	lda	[%g1]ASI_MEM, %g4
1922	brnz,a,pt %g4, sfmmu_mmu_trap
1923	  sta	%g0, [%g1]ASI_MEM
1924	ba,pt	%icc, ptl1_panic
1925	  mov	PTL1_BAD_TRAP, %g1
1926	SET_SIZE(sfmmu_window_trap)
1927
1928	ENTRY_NP(sfmmu_kpm_exception)
1929	/*
1930	 * We have accessed an unmapped segkpm address or a legal segkpm
1931	 * address which is involved in a VAC alias conflict prevention.
1932	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1933	 * set. If it is, we will instead note that a fault has occurred
1934	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1935	 * a "retry"). This will step over the faulting instruction.
1936	 * Note that this means that a legal segkpm address involved in
1937	 * a VAC alias conflict prevention (a rare case to begin with)
1938	 * cannot be used in DTrace.
1939	 */
1940	CPU_INDEX(%g1, %g2)
1941	set	cpu_core, %g2
1942	sllx	%g1, CPU_CORE_SHIFT, %g1
1943	add	%g1, %g2, %g1
1944	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1945	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1946	bz	0f
1947	or	%g2, CPU_DTRACE_BADADDR, %g2
1948	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1949	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1950	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1951	done
19520:
1953	TSTAT_CHECK_TL1(1f, %g1, %g2)
19541:
1955	SET_GL_REG(1)
1956	USE_ALTERNATE_GLOBALS(%g5)
1957	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1958	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1959	/*
1960	 * g2=tagacc g3.l=type g3.h=0
1961	 */
1962	sethi	%hi(trap), %g1
1963	or	%g1, %lo(trap), %g1
1964	ba,pt	%xcc, sys_trap
1965	mov	-1, %g4
1966	SET_SIZE(sfmmu_kpm_exception)
1967
1968#endif /* lint */
1969
1970#if defined (lint)
1971
1972void
1973sfmmu_tsb_miss(void)
1974{
1975}
1976
1977void
1978sfmmu_kpm_dtsb_miss(void)
1979{
1980}
1981
1982void
1983sfmmu_kpm_dtsb_miss_small(void)
1984{
1985}
1986
1987#else /* lint */
1988
1989#if (IMAP_SEG != 0)
1990#error - ism_map->ism_seg offset is not zero
1991#endif
1992
1993/*
1994 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1995 * tlb miss and branches to label "ismhit". If this is not an ISM
1996 * process or an ISM tlb miss it falls thru.
1997 *
1998 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1999 * this process.
2000 * If so, it will branch to label "ismhit".  If not, it will fall through.
2001 *
2002 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
2003 * so that any other threads of this process will not try and walk the ism
2004 * maps while they are being changed.
2005 *
2006 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2007 *       will make sure of that. This means we can terminate our search on
2008 *       the first zero mapping we find.
2009 *
2010 * Parameters:
2011 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
2012 * tsbmiss	= address of tsb miss area (in)
2013 * ismseg	= contents of ism_seg for this ism map (out)
2014 * ismhat	= physical address of imap_ismhat for this ism map (out)
2015 * tmp1		= scratch reg (CLOBBERED)
2016 * tmp2		= scratch reg (CLOBBERED)
2017 * tmp3		= scratch reg (CLOBBERED)
2018 * label:    temporary labels
2019 * ismhit:   label where to jump to if an ism dtlb miss
2020 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2021 */
2022#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2023	label, ismhit)							\
2024	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
2025	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
2026	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
2027label/**/1:								;\
2028	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
2029	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
2030label/**/2:								;\
2031	brz,pt  ismseg, label/**/3		/* no mapping */	;\
2032	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
2033	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
2034	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
2035	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
2036	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
2037	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
2038	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
2039	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
2040	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
2041	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
2042	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
2043									;\
2044	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
2045	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2046	cmp	ismhat, tmp1						;\
2047	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2048	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2049									;\
2050	add	tmp3, IBLK_NEXTPA, tmp1					;\
2051	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2052	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2053	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2054label/**/3:
2055
2056/*
2057 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2058 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2059 * Parameters:
2060 * tagacc = reg containing virtual address
2061 * hatid = reg containing sfmmu pointer
2062 * hmeshift = constant/register to shift vaddr to obtain vapg
2063 * hmebp = register where bucket pointer will be stored
2064 * vapg = register where virtual page will be stored
2065 * tmp1, tmp2 = tmp registers
2066 */
2067
2068
2069#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2070	vapg, label, tmp1, tmp2)					\
2071	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2072	brnz,a,pt tmp1, label/**/1					;\
2073	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2074	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2075	ba,pt	%xcc, label/**/2					;\
2076	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2077label/**/1:								;\
2078	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2079label/**/2:								;\
2080	srlx	tagacc, hmeshift, vapg					;\
2081	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2082	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2083	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2084	add	hmebp, tmp1, hmebp
2085
2086/*
2087 * hashtag includes bspage + hashno (64 bits).
2088 */
2089
2090#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2091	sllx	vapg, hmeshift, vapg					;\
2092	mov	hashno, hblktag						;\
2093	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
2094	or	vapg, hblktag, hblktag
2095
2096/*
2097 * Function to traverse hmeblk hash link list and find corresponding match.
2098 * The search is done using physical pointers. It returns the physical address
2099 * pointer to the hmeblk that matches with the tag provided.
2100 * Parameters:
2101 * hmebp	= register that points to hme hash bucket, also used as
2102 *		  tmp reg (clobbered)
2103 * hmeblktag	= register with hmeblk tag match
2104 * hatid	= register with hatid
2105 * hmeblkpa	= register where physical ptr will be stored
2106 * tmp1		= tmp reg
2107 * label: temporary label
2108 */
2109
2110#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, 	\
2111	tmp1, label)							\
2112	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2113	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2114	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2115label/**/1:								;\
2116	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2117	be,pn   %xcc, label/**/2					;\
2118	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2119	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2120	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2121	add	hmebp, CLONGSIZE, hmebp					;\
2122	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2123	xor	tmp1, hmeblktag, tmp1					;\
2124	xor	hmebp, hatid, hmebp					;\
2125	or	hmebp, tmp1, hmebp					;\
2126	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2127	  add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2128	ba,pt	%xcc, label/**/1					;\
2129	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2130label/**/2:
2131
2132/*
2133 * Function to traverse hmeblk hash link list and find corresponding match.
2134 * The search is done using physical pointers. It returns the physical address
2135 * pointer to the hmeblk that matches with the tag
2136 * provided.
2137 * Parameters:
2138 * hmeblktag	= register with hmeblk tag match (rid field is 0)
2139 * hatid	= register with hatid (pointer to SRD)
2140 * hmeblkpa	= register where physical ptr will be stored
2141 * tmp1		= tmp reg
2142 * tmp2		= tmp reg
2143 * label: temporary label
2144 */
2145
2146#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea,	\
2147	tmp1, tmp2, label)			 			\
2148label/**/1:								;\
2149	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2150	be,pn   %xcc, label/**/4					;\
2151	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
2152	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
2153	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2154	add	tmp2, CLONGSIZE, tmp2					;\
2155	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
2156	xor	tmp1, hmeblktag, tmp1					;\
2157	xor	tmp2, hatid, tmp2					;\
2158	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
2159	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2160label/**/2:								;\
2161	ba,pt	%xcc, label/**/1					;\
2162	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
2163label/**/3:								;\
2164	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
2165	bgeu,pt	%xcc, label/**/2					;\
2166	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2167	and	tmp1, BT_ULMASK, tmp2					;\
2168	srlx	tmp1, BT_ULSHIFT, tmp1					;\
2169	sllx	tmp1, CLONGSHIFT, tmp1					;\
2170	add	tsbarea, tmp1, tmp1					;\
2171	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
2172	srlx	tmp1, tmp2, tmp1					;\
2173	btst	0x1, tmp1						;\
2174	bz,pn	%xcc, label/**/2					;\
2175	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2176label/**/4:
2177
2178#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2179#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2180#endif
2181
2182/*
2183 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2184 * he offset for the corresponding hment.
2185 * Parameters:
2186 * In:
2187 *	vaddr = register with virtual address
2188 *	hmeblkpa = physical pointer to hme_blk
2189 * Out:
2190 *	hmentoff = register where hment offset will be stored
2191 *	hmemisc = hblk_misc
2192 * Scratch:
2193 *	tmp1
2194 */
2195#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2196	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2197	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2198	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2199	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2200	  or	%g0, HMEBLK_HME1, hmentoff				;\
2201	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2202	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2203	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2204	add	tmp1, HMEBLK_HME1, hmentoff				;\
2205label1:
2206
2207/*
2208 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2209 *
2210 * tagacc	= (pseudo-)tag access register (in)
2211 * hatid	= sfmmu pointer for TSB miss (in)
2212 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2213 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2214 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2215 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2216 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2217 *		  for this page size.
2218 * hashno	= constant/register hash number
2219 * tmp		= temp value - clobbered
2220 * label	= temporary label for branching within macro.
2221 * foundlabel	= label to jump to when tte is found.
2222 * suspendlabel= label to jump to when tte is suspended.
2223 * exitlabel	= label to jump to when tte is not found.
2224 *
2225 */
2226#define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
2227		 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
2228									;\
2229	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2230	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2231	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2232		hmeblkpa, label/**/5, hmemisc, tmp)			;\
2233									;\
2234	/*								;\
2235	 * tagacc = tagacc						;\
2236	 * hatid = hatid						;\
2237	 * tsbarea = tsbarea						;\
2238	 * tte   = hmebp (hme bucket pointer)				;\
2239	 * hmeblkpa  = vapg  (virtual page)				;\
2240	 * hmemisc, tmp = scratch					;\
2241	 */								;\
2242	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2243	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2244									;\
2245	/*								;\
2246	 * tagacc = tagacc						;\
2247	 * hatid = hatid						;\
2248	 * tte   = hmebp						;\
2249	 * hmeblkpa  = CLOBBERED					;\
2250	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2251	 * tmp  = scratch						;\
2252	 */								;\
2253	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2254	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, 	 		\
2255		tsbarea, tagacc, label/**/1)				;\
2256	/*								;\
2257	 * tagacc = CLOBBERED						;\
2258	 * tte = CLOBBERED						;\
2259	 * hmeblkpa = hmeblkpa						;\
2260	 * tmp = scratch						;\
2261	 */								;\
2262	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2263	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
2264	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2265	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2266	  nop								;\
2267label/**/4:								;\
2268	/*								;\
2269	 * We have found the hmeblk containing the hment.		;\
2270	 * Now we calculate the corresponding tte.			;\
2271	 *								;\
2272	 * tagacc = tagacc						;\
2273	 * hatid = hatid						;\
2274	 * tte   = clobbered						;\
2275	 * hmeblkpa  = hmeblkpa						;\
2276	 * hmemisc  = hblktag						;\
2277	 * tmp = scratch						;\
2278	 */								;\
2279	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2280		label/**/2)						;\
2281									;\
2282	/*								;\
2283	 * tagacc = tagacc						;\
2284	 * hatid = hmentoff						;\
2285	 * tte   = clobbered						;\
2286	 * hmeblkpa  = hmeblkpa						;\
2287	 * hmemisc  = hblk_misc						;\
2288	 * tmp = scratch						;\
2289	 */								;\
2290									;\
2291	add	hatid, SFHME_TTE, hatid					;\
2292	add	hmeblkpa, hatid, hmeblkpa				;\
2293	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2294	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2295	set	TTE_SUSPEND, hatid					;\
2296	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2297	btst	tte, hatid						;\
2298	bz,pt	%xcc, foundlabel					;\
2299	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2300									;\
2301	/*								;\
2302	 * Mapping is suspended, so goto suspend label.			;\
2303	 */								;\
2304	ba,pt	%xcc, suspendlabel					;\
2305	  nop
2306
2307/*
2308 * GET_SHME_TTE is similar to GET_TTE() except it searches
2309 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2310 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2311 * either 0 (not part of scd) or 1 (part of scd).
2312 */
2313#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, 	\
2314		hmeshift, hashno, tmp, label, foundlabel,		\
2315		suspendlabel, exitlabel)				\
2316									;\
2317	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2318	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2319	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2320		hmeblkpa, label/**/5, hmemisc, tmp)			;\
2321									;\
2322	/*								;\
2323	 * tagacc = tagacc						;\
2324	 * hatid = hatid						;\
2325	 * tsbarea = tsbarea						;\
2326	 * tte   = hmebp (hme bucket pointer)				;\
2327	 * hmeblkpa  = vapg  (virtual page)				;\
2328	 * hmemisc, tmp = scratch					;\
2329	 */								;\
2330	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2331									;\
2332	/*								;\
2333	 * tagacc = tagacc						;\
2334	 * hatid = hatid						;\
2335	 * tsbarea = tsbarea						;\
2336	 * tte   = hmebp						;\
2337	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2338	 * hmeblkpa  = CLOBBERED					;\
2339	 * tmp = scratch						;\
2340	 */								;\
2341	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2342									;\
2343	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2344	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2345	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2346									;\
2347label/**/8:								;\
2348	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa,			\
2349		tsbarea, tagacc, tte, label/**/1)			;\
2350	/*								;\
2351	 * tagacc = CLOBBERED						;\
2352	 * tte = CLOBBERED						;\
2353	 * hmeblkpa = hmeblkpa						;\
2354	 * tmp = scratch						;\
2355	 */								;\
2356	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2357	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
2358	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2359	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2360	  nop								;\
2361label/**/4:								;\
2362	/*								;\
2363	 * We have found the hmeblk containing the hment.		;\
2364	 * Now we calculate the corresponding tte.			;\
2365	 *								;\
2366	 * tagacc = tagacc						;\
2367	 * hatid = hatid						;\
2368	 * tte   = clobbered						;\
2369	 * hmeblkpa  = hmeblkpa						;\
2370	 * hmemisc  = hblktag						;\
2371	 * tsbarea = tsbmiss area					;\
2372	 * tmp = scratch						;\
2373	 */								;\
2374	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2375		label/**/2)						;\
2376									;\
2377	/*								;\
2378	 * tagacc = tagacc						;\
2379	 * hatid = hmentoff						;\
2380	 * tte = clobbered						;\
2381	 * hmeblkpa  = hmeblkpa						;\
2382	 * hmemisc  = hblk_misc						;\
2383	 * tsbarea = tsbmiss area					;\
2384	 * tmp = scratch						;\
2385	 */								;\
2386									;\
2387	add	hatid, SFHME_TTE, hatid					;\
2388	add	hmeblkpa, hatid, hmeblkpa				;\
2389	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2390	brlz,pt tte, label/**/6						;\
2391	  nop								;\
2392	btst	HBLK_SZMASK, hmemisc					;\
2393	bnz,a,pt %icc, label/**/7					;\
2394	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2395									;\
2396	/*								;\
2397 	 * We found an invalid 8K tte in shme.				;\
2398	 * it may not belong to shme's region since			;\
2399	 * region size/alignment granularity is 8K but different	;\
2400	 * regions don't share hmeblks. Continue the search.		;\
2401	 */								;\
2402	sub	hmeblkpa, hatid, hmeblkpa				;\
2403	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2404	srlx	tagacc, hmeshift, tte					;\
2405	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2406	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2407	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2408	ba,a,pt	%xcc, label/**/8					;\
2409label/**/6:								;\
2410	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2411	/*                                  				;\
2412	 * hmemisc is set to 1 if this is a shared mapping. It will	;\
2413	 * be cleared by CHECK_SHARED_PGSZ if this pagesize is not	;\
2414	 * allowed, in order to limit the number of entries in the	;\
2415	 * pagesize register.						;\
2416	 */								;\
2417	CHECK_SHARED_PGSZ(tsbarea, tte, hatid, hmemisc, label/**/9)	;\
2418	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2419label/**/7:								;\
2420	set	TTE_SUSPEND, hatid					;\
2421	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2422	btst	tte, hatid						;\
2423	bz,pt	%xcc, foundlabel					;\
2424	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2425									;\
2426	/*								;\
2427	 * Mapping is suspended, so goto suspend label.			;\
2428	 */								;\
2429	ba,pt	%xcc, suspendlabel					;\
2430	  nop
2431
2432	/*
2433	 * KERNEL PROTECTION HANDLER
2434	 *
2435	 * g1 = tsb8k pointer register (clobbered)
2436	 * g2 = tag access register (ro)
2437	 * g3 - g7 = scratch registers
2438	 *
2439	 * Note: This function is patched at runtime for performance reasons.
2440	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2441	 */
2442	ENTRY_NP(sfmmu_kprot_trap)
2443	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2444sfmmu_kprot_patch_ktsb_base:
2445	RUNTIME_PATCH_SETX(%g1, %g6)
2446	/* %g1 = contents of ktsb_base or ktsb_pbase */
2447sfmmu_kprot_patch_ktsb_szcode:
2448	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2449
2450	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2451	! %g1 = First TSB entry pointer, as TSB miss handler expects
2452
2453	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2454sfmmu_kprot_patch_ktsb4m_base:
2455	RUNTIME_PATCH_SETX(%g3, %g6)
2456	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2457sfmmu_kprot_patch_ktsb4m_szcode:
2458	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2459
2460	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2461	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2462
2463        CPU_TSBMISS_AREA(%g6, %g7)
2464        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2465	ba,pt	%xcc, sfmmu_tsb_miss_tt
2466	  nop
2467
2468	/*
2469	 * USER PROTECTION HANDLER
2470	 *
2471	 * g1 = tsb8k pointer register (ro)
2472	 * g2 = tag access register (ro)
2473	 * g3 = faulting context (clobbered, currently not used)
2474	 * g4 - g7 = scratch registers
2475	 */
2476	ALTENTRY(sfmmu_uprot_trap)
2477#ifdef sun4v
2478	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2479	/* %g1 = first TSB entry ptr now, %g2 preserved */
2480
2481	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2482	brlz,pt %g3, 9f				/* check for 2nd TSB */
2483	  nop
2484
2485	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2486	/* %g3 = second TSB entry ptr now, %g2 preserved */
2487
2488#else /* sun4v */
2489#ifdef UTSB_PHYS
2490	/* g1 = first TSB entry ptr */
2491	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2492	brlz,pt %g3, 9f			/* check for 2nd TSB */
2493	  nop
2494
2495	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2496	/* %g3 = second TSB entry ptr now, %g2 preserved */
2497#else /* UTSB_PHYS */
2498	brgez,pt %g1, 9f		/* check for 2nd TSB */
2499	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2500
2501	mov	%g2, %g7
2502	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2503	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2504	mov	%g1, %g7
2505	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2506#endif /* UTSB_PHYS */
2507#endif /* sun4v */
25089:
2509	CPU_TSBMISS_AREA(%g6, %g7)
2510	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2511	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2512	  nop
2513
2514	/*
2515	 * Kernel 8K page iTLB miss.  We also get here if we took a
2516	 * fast instruction access mmu miss trap while running in
2517	 * invalid context.
2518	 *
2519	 * %g1 = 8K TSB pointer register (not used, clobbered)
2520	 * %g2 = tag access register (used)
2521	 * %g3 = faulting context id (used)
2522	 * %g7 = TSB tag to match (used)
2523	 */
2524	.align	64
2525	ALTENTRY(sfmmu_kitlb_miss)
2526	brnz,pn %g3, tsb_tl0_noctxt
2527	  nop
2528
2529	/* kernel miss */
2530	/* get kernel tsb pointer */
2531	/* we patch the next set of instructions at run time */
2532	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2533iktsbbase:
2534	RUNTIME_PATCH_SETX(%g4, %g5)
2535	/* %g4 = contents of ktsb_base or ktsb_pbase */
2536
2537iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2538	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2539	or	%g4, %g1, %g1			! form tsb ptr
2540	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2541	cmp	%g4, %g7
2542	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2543	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2544
2545	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2546	bz,pn	%icc, exec_fault
2547	  nop
2548	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2549	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2550	retry
2551
2552iktsb4mbase:
2553        RUNTIME_PATCH_SETX(%g4, %g6)
2554        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2555iktsb4m:
2556	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2557        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2558	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2559	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2560	cmp	%g4, %g7
2561	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2562	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2563	bz,pn	%icc, exec_fault
2564	  nop
2565	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2566	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2567	retry
2568
2569	/*
2570	 * Kernel dTLB miss.  We also get here if we took a fast data
2571	 * access mmu miss trap while running in invalid context.
2572	 *
2573	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2574	 *	We select the TSB miss handler to branch to depending on
2575	 *	the virtual address of the access.  In the future it may
2576	 *	be desirable to separate kpm TTEs into their own TSB,
2577	 *	in which case all that needs to be done is to set
2578	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2579	 *	early in the miss if we detect a kpm VA to a new handler.
2580	 *
2581	 * %g1 = 8K TSB pointer register (not used, clobbered)
2582	 * %g2 = tag access register (used)
2583	 * %g3 = faulting context id (used)
2584	 */
2585	.align	64
2586	ALTENTRY(sfmmu_kdtlb_miss)
2587	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2588	  nop
2589
2590	/* Gather some stats for kpm misses in the TLB. */
2591	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2592	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2593
2594	/*
2595	 * Get first TSB offset and look for 8K/64K/512K mapping
2596	 * using the 8K virtual page as the index.
2597	 *
2598	 * We patch the next set of instructions at run time;
2599	 * any changes here require sfmmu_patch_ktsb changes too.
2600	 */
2601dktsbbase:
2602	RUNTIME_PATCH_SETX(%g7, %g6)
2603	/* %g7 = contents of ktsb_base or ktsb_pbase */
2604
2605dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2606	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2607
2608	/*
2609	 * At this point %g1 is our index into the TSB.
2610	 * We just masked off enough bits of the VA depending
2611	 * on our TSB size code.
2612	 */
2613	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2614	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2615	cmp	%g6, %g4			! compare tag
2616	bne,pn	%xcc, dktsb4m_kpmcheck_small
2617	  add	%g7, %g1, %g1			/* form tsb ptr */
2618	TT_TRACE(trace_tsbhit)
2619	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2620	/* trapstat expects tte in %g5 */
2621	retry
2622
2623	/*
2624	 * If kpm is using large pages, the following instruction needs
2625	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2626	 * so that we will probe the 4M TSB regardless of the VA.  In
2627	 * the case kpm is using small pages, we know no large kernel
2628	 * mappings are located above 0x80000000.00000000 so we skip the
2629	 * probe as an optimization.
2630	 */
2631dktsb4m_kpmcheck_small:
2632	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2633	  /* delay slot safe, below */
2634
2635	/*
2636	 * Get second TSB offset and look for 4M mapping
2637	 * using 4M virtual page as the TSB index.
2638	 *
2639	 * Here:
2640	 * %g1 = 8K TSB pointer.  Don't squash it.
2641	 * %g2 = tag access register (we still need it)
2642	 */
2643	srlx	%g2, MMU_PAGESHIFT4M, %g3
2644
2645	/*
2646	 * We patch the next set of instructions at run time;
2647	 * any changes here require sfmmu_patch_ktsb changes too.
2648	 */
2649dktsb4mbase:
2650	RUNTIME_PATCH_SETX(%g7, %g6)
2651	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2652dktsb4m:
2653	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2654	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2655
2656	/*
2657	 * At this point %g3 is our index into the TSB.
2658	 * We just masked off enough bits of the VA depending
2659	 * on our TSB size code.
2660	 */
2661	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2662	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2663	cmp	%g6, %g4			! compare tag
2664
2665dktsb4m_tsbmiss:
2666	bne,pn	%xcc, dktsb4m_kpmcheck
2667	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2668	TT_TRACE(trace_tsbhit)
2669	/* we don't check TTE size here since we assume 4M TSB is separate */
2670	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2671	/* trapstat expects tte in %g5 */
2672	retry
2673
2674	/*
2675	 * So, we failed to find a valid TTE to match the faulting
2676	 * address in either TSB.  There are a few cases that could land
2677	 * us here:
2678	 *
2679	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2680	 *    to sfmmu_tsb_miss_tt to handle the miss.
2681	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2682	 *    4M TSB.  Let segkpm handle it.
2683	 *
2684	 * Note that we shouldn't land here in the case of a kpm VA when
2685	 * kpm_smallpages is active -- we handled that case earlier at
2686	 * dktsb4m_kpmcheck_small.
2687	 *
2688	 * At this point:
2689	 *  g1 = 8K-indexed primary TSB pointer
2690	 *  g2 = tag access register
2691	 *  g3 = 4M-indexed secondary TSB pointer
2692	 */
2693dktsb4m_kpmcheck:
2694	cmp	%g2, %g0
2695	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2696	  nop
2697	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2698	  nop
2699
2700#ifdef sun4v
2701	/*
2702	 * User instruction miss w/ single TSB.
2703	 * The first probe covers 8K, 64K, and 512K page sizes,
2704	 * because 64K and 512K mappings are replicated off 8K
2705	 * pointer.
2706	 *
2707	 * g1 = tsb8k pointer register
2708	 * g2 = tag access register
2709	 * g3 - g6 = scratch registers
2710	 * g7 = TSB tag to match
2711	 */
2712	.align	64
2713	ALTENTRY(sfmmu_uitlb_fastpath)
2714
2715	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2716	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2717	ba,pn	%xcc, sfmmu_tsb_miss_tt
2718	  mov	-1, %g3
2719
2720	/*
2721	 * User data miss w/ single TSB.
2722	 * The first probe covers 8K, 64K, and 512K page sizes,
2723	 * because 64K and 512K mappings are replicated off 8K
2724	 * pointer.
2725	 *
2726	 * g1 = tsb8k pointer register
2727	 * g2 = tag access register
2728	 * g3 - g6 = scratch registers
2729	 * g7 = TSB tag to match
2730	 */
2731	.align 64
2732	ALTENTRY(sfmmu_udtlb_fastpath)
2733
2734	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2735	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2736	ba,pn	%xcc, sfmmu_tsb_miss_tt
2737	  mov	-1, %g3
2738
2739	/*
2740	 * User instruction miss w/ multiple TSBs (sun4v).
2741	 * The first probe covers 8K, 64K, and 512K page sizes,
2742	 * because 64K and 512K mappings are replicated off 8K
2743	 * pointer.  Second probe covers 4M page size only.
2744	 *
2745	 * Just like sfmmu_udtlb_slowpath, except:
2746	 *   o Uses ASI_ITLB_IN
2747	 *   o checks for execute permission
2748	 *   o No ISM prediction.
2749	 *
2750	 * g1 = tsb8k pointer register
2751	 * g2 = tag access register
2752	 * g3 - g6 = scratch registers
2753	 * g7 = TSB tag to match
2754	 */
2755	.align	64
2756	ALTENTRY(sfmmu_uitlb_slowpath)
2757
2758	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2759	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2760	/* g4 - g5 = clobbered here */
2761
2762	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2763	/* g1 = first TSB pointer, g3 = second TSB pointer */
2764	srlx	%g2, TAG_VALO_SHIFT, %g7
2765	PROBE_2ND_ITSB(%g3, %g7)
2766	/* NOT REACHED */
2767
2768#else /* sun4v */
2769
2770	/*
2771	 * User instruction miss w/ multiple TSBs (sun4u).
2772	 * The first probe covers 8K, 64K, and 512K page sizes,
2773	 * because 64K and 512K mappings are replicated off 8K
2774	 * pointer.  Probe of 1st TSB has already been done prior to entry
2775	 * into this routine. For the UTSB_PHYS case we probe up to 3
2776	 * valid other TSBs in the following order:
2777	 * 1) shared TSB for 4M-256M pages
2778	 * 2) private TSB for 4M-256M pages
2779	 * 3) shared TSB for 8K-512K pages
2780	 *
2781	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
2782	 * 4M-256M pages.
2783	 *
2784	 * Just like sfmmu_udtlb_slowpath, except:
2785	 *   o Uses ASI_ITLB_IN
2786	 *   o checks for execute permission
2787	 *   o No ISM prediction.
2788	 *
2789	 * g1 = tsb8k pointer register
2790	 * g2 = tag access register
2791	 * g4 - g6 = scratch registers
2792	 * g7 = TSB tag to match
2793	 */
2794	.align	64
2795	ALTENTRY(sfmmu_uitlb_slowpath)
2796
2797#ifdef UTSB_PHYS
2798
2799       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2800        brlz,pt %g6, 1f
2801          nop
2802        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2803        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
28041:
2805        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2806        brlz,pt %g3, 2f
2807          nop
2808        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2809        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
28102:
2811        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2812        brlz,pt %g6, sfmmu_tsb_miss_tt
2813          nop
2814        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2815        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
2816        ba,pn   %xcc, sfmmu_tsb_miss_tt
2817          nop
2818
2819#else /* UTSB_PHYS */
2820	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2821	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2822	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2823	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2824	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2825	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2826       /* g1 = first TSB pointer, g3 = second TSB pointer */
2827        srlx    %g2, TAG_VALO_SHIFT, %g7
2828        PROBE_2ND_ITSB(%g3, %g7, isynth)
2829	ba,pn	%xcc, sfmmu_tsb_miss_tt
2830	  nop
2831
2832#endif /* UTSB_PHYS */
2833#endif /* sun4v */
2834
2835#if defined(sun4u) && defined(UTSB_PHYS)
2836
2837        /*
2838	 * We come here for ism predict DTLB_MISS case or if
2839	 * if probe in first TSB failed.
2840         */
2841
2842        .align 64
2843        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
2844
2845	/*
2846         * g1 = tsb8k pointer register
2847         * g2 = tag access register
2848         * g4 - %g6 = scratch registers
2849         * g7 = TSB tag to match
2850	 */
2851
2852	/*
2853	 * ISM non-predict probe order
2854         * probe 1ST_TSB (8K index)
2855         * probe 2ND_TSB (4M index)
2856         * probe 4TH_TSB (4M index)
2857         * probe 3RD_TSB (8K index)
2858	 *
2859	 * We already probed first TSB in DTLB_MISS handler.
2860	 */
2861
2862        /*
2863         * Private 2ND TSB 4M-256 pages
2864         */
2865	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2866	brlz,pt %g3, 1f
2867	  nop
2868        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2869        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2870
2871	/*
2872	 * Shared Context 4TH TSB 4M-256 pages
2873	 */
28741:
2875	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2876	brlz,pt %g6, 2f
2877	  nop
2878        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2879        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
2880
2881        /*
2882         * Shared Context 3RD TSB 8K-512K pages
2883         */
28842:
2885	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2886	brlz,pt %g6, sfmmu_tsb_miss_tt
2887	  nop
2888        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2889        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
2890	ba,pn	%xcc, sfmmu_tsb_miss_tt
2891	  nop
2892
2893	.align 64
2894        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
2895
2896	/*
2897         * g1 = tsb8k pointer register
2898         * g2 = tag access register
2899         * g4 - g6 = scratch registers
2900         * g7 = TSB tag to match
2901	 */
2902
2903	/*
2904	 * ISM predict probe order
2905	 * probe 4TH_TSB (4M index)
2906	 * probe 2ND_TSB (4M index)
2907	 * probe 1ST_TSB (8K index)
2908	 * probe 3RD_TSB (8K index)
2909
2910	/*
2911	 * Shared Context 4TH TSB 4M-256 pages
2912	 */
2913	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2914	brlz,pt %g6, 4f
2915	  nop
2916        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2917        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
2918
2919        /*
2920         * Private 2ND TSB 4M-256 pages
2921         */
29224:
2923	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2924	brlz,pt %g3, 5f
2925	  nop
2926        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2927        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
2928
29295:
2930        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
2931
2932        /*
2933         * Shared Context 3RD TSB 8K-512K pages
2934         */
2935	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2936	brlz,pt %g6, 6f
2937	  nop
2938        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2939        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
29406:
2941	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
2942	  nop
2943
2944#else /* sun4u && UTSB_PHYS */
2945
2946       .align 64
2947        ALTENTRY(sfmmu_udtlb_slowpath)
2948
2949	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2950	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2951	  mov	%g1, %g3
2952
2953udtlb_miss_probefirst:
2954	/*
2955	 * g1 = 8K TSB pointer register
2956	 * g2 = tag access register
2957	 * g3 = (potentially) second TSB entry ptr
2958	 * g6 = ism pred.
2959	 * g7 = vpg_4m
2960	 */
2961#ifdef sun4v
2962	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2963	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2964
2965	/*
2966	 * Here:
2967	 *   g1 = first TSB pointer
2968	 *   g2 = tag access reg
2969	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2970	 */
2971	brgz,pn	%g6, sfmmu_tsb_miss_tt
2972	  nop
2973#else /* sun4v */
2974	mov	%g1, %g4
2975	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2976	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2977
2978	/*
2979	 * Here:
2980	 *   g1 = first TSB pointer
2981	 *   g2 = tag access reg
2982	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2983	 */
2984	brgz,pn	%g6, sfmmu_tsb_miss_tt
2985	  nop
2986	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2987	/* fall through in 8K->4M probe order */
2988#endif /* sun4v */
2989
2990udtlb_miss_probesecond:
2991	/*
2992	 * Look in the second TSB for the TTE
2993	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2994	 * g2 = tag access reg
2995	 * g3 = 8K TSB pointer register
2996	 * g6 = ism pred.
2997	 * g7 = vpg_4m
2998	 */
2999#ifdef sun4v
3000	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
3001	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3002	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
3003#else /* sun4v */
3004	mov	%g3, %g7
3005	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
3006	/* %g2 clobbered, %g3 =second tsbe ptr */
3007	mov	MMU_TAG_ACCESS, %g2
3008	ldxa	[%g2]ASI_DMMU, %g2
3009#endif /* sun4v */
3010
3011	srlx	%g2, TAG_VALO_SHIFT, %g7
3012	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3013	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
3014	brgz,pn	%g6, udtlb_miss_probefirst
3015	  nop
3016
3017	/* fall through to sfmmu_tsb_miss_tt */
3018#endif /* sun4u && UTSB_PHYS */
3019
3020
3021	ALTENTRY(sfmmu_tsb_miss_tt)
3022	TT_TRACE(trace_tsbmiss)
3023	/*
3024	 * We get here if there is a TSB miss OR a write protect trap.
3025	 *
3026	 * g1 = First TSB entry pointer
3027	 * g2 = tag access register
3028	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
3029	 * g4 - g7 = scratch registers
3030	 */
3031
3032	ALTENTRY(sfmmu_tsb_miss)
3033
3034	/*
3035	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3036	 * point to trapstat's TSB miss return code (note that trapstat
3037	 * itself will patch the correct offset to add).
3038	 */
3039	rdpr	%tl, %g7
3040	cmp	%g7, 1
3041	ble,pt	%xcc, 0f
3042	  sethi	%hi(KERNELBASE), %g6
3043	rdpr	%tpc, %g7
3044	or	%g6, %lo(KERNELBASE), %g6
3045	cmp	%g7, %g6
3046	bgeu,pt	%xcc, 0f
3047	/* delay slot safe */
3048
3049	ALTENTRY(tsbmiss_trapstat_patch_point)
3050	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3051	wrpr	%g7, %tpc
3052	add	%g7, 4, %g7
3053	wrpr	%g7, %tnpc
30540:
3055	CPU_TSBMISS_AREA(%g6, %g7)
3056	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
3057	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
3058
3059	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
3060	brz,a,pn %g3, 1f			/* skip ahead if kernel */
3061	  ldn	[%g6 + TSBMISS_KHATID], %g7
3062	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
3063	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
3064
3065	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3066
3067	cmp	%g3, INVALID_CONTEXT
3068	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
3069	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3070
3071#if defined(sun4v) || defined(UTSB_PHYS)
3072        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
3073        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
3074        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
3075#endif /* sun4v || UTSB_PHYS */
3076
3077	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3078	/*
3079	 * The miss wasn't in an ISM segment.
3080	 *
3081	 * %g1 %g3, %g4, %g5, %g7 all clobbered
3082	 * %g2 = (pseudo) tag access
3083	 */
3084
3085	ba,pt	%icc, 2f
3086	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3087
30881:
3089	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3090	/*
3091	 * 8K and 64K hash.
3092	 */
30932:
3094
3095	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3096		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
3097		sfmmu_suspend_tl, tsb_512K)
3098	/* NOT REACHED */
3099
3100tsb_512K:
3101	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3102	brz,pn	%g5, 3f
3103	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3104	and	%g4, HAT_512K_FLAG, %g5
3105
3106	/*
3107	 * Note that there is a small window here where we may have
3108	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3109	 * flag yet, so we will skip searching the 512k hash list.
3110	 * In this case we will end up in pagefault which will find
3111	 * the mapping and return.  So, in this instance we will end up
3112	 * spending a bit more time resolving this TSB miss, but it can
3113	 * only happen once per process and even then, the chances of that
3114	 * are very small, so it's not worth the extra overhead it would
3115	 * take to close this window.
3116	 */
3117	brz,pn	%g5, tsb_4M
3118	  nop
31193:
3120	/*
3121	 * 512K hash
3122	 */
3123
3124	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3125		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
3126		sfmmu_suspend_tl, tsb_4M)
3127	/* NOT REACHED */
3128
3129tsb_4M:
3130	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3131	brz,pn	%g5, 4f
3132	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3133	and	%g4, HAT_4M_FLAG, %g5
3134	brz,pn	%g5, tsb_32M
3135	  nop
31364:
3137	/*
3138	 * 4M hash
3139	 */
3140
3141	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3142		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
3143		sfmmu_suspend_tl, tsb_32M)
3144	/* NOT REACHED */
3145
3146tsb_32M:
3147	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3148#ifdef	sun4v
3149        brz,pn	%g5, 6f
3150#else
3151	brz,pn  %g5, tsb_pagefault
3152#endif
3153	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3154	and	%g4, HAT_32M_FLAG, %g5
3155	brz,pn	%g5, tsb_256M
3156	  nop
31575:
3158	/*
3159	 * 32M hash
3160	 */
3161
3162	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3163		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
3164		sfmmu_suspend_tl, tsb_256M)
3165	/* NOT REACHED */
3166
3167#if defined(sun4u) && !defined(UTSB_PHYS)
3168#define tsb_shme        tsb_pagefault
3169#endif
3170tsb_256M:
3171	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3172	and	%g4, HAT_256M_FLAG, %g5
3173	brz,pn	%g5, tsb_shme
3174	  nop
31756:
3176	/*
3177	 * 256M hash
3178	 */
3179
3180	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3181	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
3182	    sfmmu_suspend_tl, tsb_shme)
3183	/* NOT REACHED */
3184
3185tsb_checktte:
3186	/*
3187	 * g1 = hblk_misc
3188	 * g2 = tagacc
3189	 * g3 = tte
3190	 * g4 = tte pa
3191	 * g6 = tsbmiss area
3192	 * g7 = hatid
3193	 */
3194	brlz,a,pt %g3, tsb_validtte
3195	  rdpr	%tt, %g7
3196
3197#if defined(sun4u) && !defined(UTSB_PHYS)
3198#undef tsb_shme
3199	ba      tsb_pagefault
3200	  nop
3201#else /* sun4u && !UTSB_PHYS */
3202
3203tsb_shme:
3204	/*
3205	 * g2 = tagacc
3206	 * g6 = tsbmiss area
3207	 */
3208	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3209	brz,pn	%g5, tsb_pagefault
3210	  nop
3211	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3212	brz,pn	%g7, tsb_pagefault
3213	  nop
3214
3215	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3216		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
3217		sfmmu_suspend_tl, tsb_shme_512K)
3218	/* NOT REACHED */
3219
3220tsb_shme_512K:
3221	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3222	and	%g4, HAT_512K_FLAG, %g5
3223	brz,pn	%g5, tsb_shme_4M
3224	  nop
3225
3226	/*
3227	 * 512K hash
3228	 */
3229
3230	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3231		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
3232		sfmmu_suspend_tl, tsb_shme_4M)
3233	/* NOT REACHED */
3234
3235tsb_shme_4M:
3236	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3237	and	%g4, HAT_4M_FLAG, %g5
3238	brz,pn	%g5, tsb_shme_32M
3239	  nop
32404:
3241	/*
3242	 * 4M hash
3243	 */
3244	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3245		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
3246		sfmmu_suspend_tl, tsb_shme_32M)
3247	/* NOT REACHED */
3248
3249tsb_shme_32M:
3250	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3251	and	%g4, HAT_32M_FLAG, %g5
3252	brz,pn	%g5, tsb_shme_256M
3253	  nop
3254
3255	/*
3256	 * 32M hash
3257	 */
3258
3259	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3260		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
3261		sfmmu_suspend_tl, tsb_shme_256M)
3262	/* NOT REACHED */
3263
3264tsb_shme_256M:
3265	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3266	and	%g4, HAT_256M_FLAG, %g5
3267	brz,pn	%g5, tsb_pagefault
3268	  nop
3269
3270	/*
3271	 * 256M hash
3272	 */
3273
3274	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3275	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
3276	    sfmmu_suspend_tl, tsb_pagefault)
3277	/* NOT REACHED */
3278
3279tsb_shme_checktte:
3280
3281	brgez,pn %g3, tsb_pagefault
3282	  rdpr	%tt, %g7
3283	/*
3284	 * g1 = ctx1 flag
3285	 * g3 = tte
3286	 * g4 = tte pa
3287	 * g6 = tsbmiss area
3288	 * g7 = tt
3289	 */
3290
3291	brz,pt  %g1, tsb_validtte
3292	  nop
3293	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3294	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3295	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3296
3297	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3298	ba	tsb_validtte
3299#endif /* sun4u && !UTSB_PHYS */
3300
3301tsb_ism_validtte:
3302#ifdef sun4v
3303	/*
3304	 * Check pagesize against bitmap for Rock page size register,
3305	 * for ism mappings.
3306	 *
3307	 * %g1, %g2 = scratch
3308	 * %g3 = tte
3309	 * g4 = tte pa
3310	 * g5 = tte va
3311	 * g6 = tsbmiss area
3312	 * %g7 = tt
3313	 */
3314	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3315	and     %g1, HAT_CHKCTX1_FLAG, %g2
3316	/*
3317	 * Clear the HAT_CHKCTX1_FLAG in %g2 if this shared pagesize is not allowed
3318	 * to limit the number of entries in the pagesize search register.
3319	 */
3320	CHECK_SHARED_PGSZ(%g6, %g3, %g7, %g2, ism_chk_pgsz)
3321	andn	%g1, HAT_CHKCTX1_FLAG, %g1
3322	or      %g1, %g2, %g1
3323	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3324	brz     %g2, tsb_validtte
3325	  rdpr  %tt, %g7
3326	SAVE_CTX1(%g7, %g1, %g2, tsb_shctxl)
3327#endif /* sun4v */
3328
3329tsb_validtte:
3330	/*
3331	 * g3 = tte
3332	 * g4 = tte pa
3333	 * g6 = tsbmiss area
3334	 * g7 = tt
3335	 */
3336
3337	/*
3338	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3339	 */
3340	cmp	%g7, FAST_PROT_TT
3341	bne,pt	%icc, 4f
3342	  nop
3343
3344	TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
3345	    tsb_protfault)
3346
3347	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3348#ifdef sun4v
3349	MMU_FAULT_STATUS_AREA(%g7)
3350	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
3351#else /* sun4v */
3352	mov     MMU_TAG_ACCESS, %g5
3353	ldxa    [%g5]ASI_DMMU, %g5
3354#endif /* sun4v */
3355	ba,pt	%xcc, tsb_update_tl1
3356	  nop
33574:
3358	/*
3359	 * ITLB translation was found but execute permission is
3360	 * disabled. If we have software execute permission (soft exec
3361	 * bit is set), then enable hardware execute permission.
3362	 * Otherwise continue with a protection violation.
3363	 */
3364	cmp     %g7, T_INSTR_MMU_MISS
3365	be,pn	%icc, 5f
3366	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3367	cmp     %g7, FAST_IMMU_MISS_TT
3368	bne,pt %icc, 3f
3369	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
33705:
3371	bnz,pn %icc, 3f
3372	  TTE_CHK_SOFTEXEC_ML(%g3)		/* check soft execute */
3373	bz,pn %icc, tsb_protfault
3374	  nop
3375	TTE_SET_EXEC_ML(%g3, %g4, %g7, tsb_lset_exec)
33763:
3377	/*
3378	 * Set reference bit if not already set
3379	 */
3380	TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
3381
3382	/*
3383	 * Now, load into TSB/TLB.  At this point:
3384	 * g3 = tte
3385	 * g4 = patte
3386	 * g6 = tsbmiss area
3387	 */
3388	rdpr	%tt, %g7
3389#ifdef sun4v
3390	MMU_FAULT_STATUS_AREA(%g2)
3391	cmp	%g7, T_INSTR_MMU_MISS
3392	be,a,pt	%icc, 9f
3393	  nop
3394	cmp	%g7, FAST_IMMU_MISS_TT
3395	be,a,pt	%icc, 9f
3396	  nop
3397	add	%g2, MMFSA_D_, %g2
33989:
3399	ldx	[%g2 + MMFSA_CTX_], %g7
3400	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3401	ldx	[%g2 + MMFSA_ADDR_], %g2
3402	mov	%g2, %g5		! load the fault addr for later use
3403	srlx	%g2, TTARGET_VA_SHIFT, %g2
3404	or	%g2, %g7, %g2
3405#else /* sun4v */
3406	mov     MMU_TAG_ACCESS, %g5
3407	cmp     %g7, FAST_IMMU_MISS_TT
3408	be,a,pt %icc, 9f
3409	   ldxa  [%g0]ASI_IMMU, %g2
3410	ldxa    [%g0]ASI_DMMU, %g2
3411	ba,pt   %icc, tsb_update_tl1
3412	   ldxa  [%g5]ASI_DMMU, %g5
34139:
3414	ldxa    [%g5]ASI_IMMU, %g5
3415#endif /* sun4v */
3416
3417tsb_update_tl1:
3418	TTE_CLR_SOFTEXEC_ML(%g3)
3419	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3420	brz,pn	%g7, tsb_kernel
3421#ifdef sun4v
3422	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3423#else  /* sun4v */
3424	  srlx	%g3, TTE_SZ_SHFT, %g7
3425#endif /* sun4v */
3426
3427tsb_user:
3428#ifdef sun4v
3429	cmp	%g7, TTE4M
3430	bge,pn	%icc, tsb_user4m
3431	  nop
3432#else /* sun4v */
3433	cmp	%g7, TTESZ_VALID | TTE4M
3434	be,pn	%icc, tsb_user4m
3435	  srlx	%g3, TTE_SZ2_SHFT, %g7
3436	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3437#ifdef ITLB_32M_256M_SUPPORT
3438	bnz,pn	%icc, tsb_user4m
3439	  nop
3440#else /* ITLB_32M_256M_SUPPORT */
3441	bnz,a,pn %icc, tsb_user_pn_synth
3442	 nop
3443#endif /* ITLB_32M_256M_SUPPORT */
3444#endif /* sun4v */
3445
3446tsb_user8k:
3447#if defined(sun4v) || defined(UTSB_PHYS)
3448	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3449	and	%g7, HAT_CHKCTX1_FLAG, %g1
3450	brz,a,pn %g1, 1f
3451	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
3452	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3453	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
3454	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3455        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
34561:
3457#else /* defined(sun4v) || defined(UTSB_PHYS) */
3458	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
3459#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3460
3461#ifndef UTSB_PHYS
3462	mov	ASI_N, %g7	! user TSBs accessed by VA
3463	mov	%g7, %asi
3464#endif /* !UTSB_PHYS */
3465
3466	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
3467
3468	rdpr    %tt, %g5
3469#ifdef sun4v
3470	cmp	%g5, T_INSTR_MMU_MISS
3471	be,a,pn	%xcc, 9f
3472	  mov	%g3, %g5
3473#endif /* sun4v */
3474	cmp	%g5, FAST_IMMU_MISS_TT
3475	be,pn	%xcc, 9f
3476	  mov	%g3, %g5
3477
3478	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3479	! trapstat wants TTE in %g5
3480	retry
34819:
3482	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3483	! trapstat wants TTE in %g5
3484	retry
3485
3486tsb_user4m:
3487#if defined(sun4v) || defined(UTSB_PHYS)
3488	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3489	and	%g7, HAT_CHKCTX1_FLAG, %g1
3490	brz,a,pn %g1, 4f
3491	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
3492	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3493	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
3494	  nop
3495        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3496
3497#else /* defined(sun4v) || defined(UTSB_PHYS) */
3498	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
3499#endif /* defined(sun4v) || defined(UTSB_PHYS) */
35004:
3501	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3502	  nop
3503
3504#ifndef UTSB_PHYS
3505	mov	ASI_N, %g7	! user TSBs accessed by VA
3506	mov	%g7, %asi
3507#endif /* UTSB_PHYS */
3508
3509        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
3510
35115:
3512	rdpr    %tt, %g5
3513#ifdef sun4v
3514        cmp     %g5, T_INSTR_MMU_MISS
3515        be,a,pn %xcc, 9f
3516          mov   %g3, %g5
3517#endif /* sun4v */
3518        cmp     %g5, FAST_IMMU_MISS_TT
3519        be,pn   %xcc, 9f
3520        mov     %g3, %g5
3521
3522        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3523        ! trapstat wants TTE in %g5
3524        retry
35259:
3526        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3527        ! trapstat wants TTE in %g5
3528        retry
3529
3530#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3531	/*
3532	 * Panther ITLB synthesis.
3533	 * The Panther 32M and 256M ITLB code simulates these two large page
3534	 * sizes with 4M pages, to provide support for programs, for example
3535	 * Java, that may copy instructions into a 32M or 256M data page and
3536	 * then execute them. The code below generates the 4M pfn bits and
3537	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3538	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3539	 * are ignored by the hardware.
3540	 *
3541	 * Now, load into TSB/TLB.  At this point:
3542	 * g2 = tagtarget
3543	 * g3 = tte
3544	 * g4 = patte
3545	 * g5 = tt
3546	 * g6 = tsbmiss area
3547	 */
3548tsb_user_pn_synth:
3549	rdpr %tt, %g5
3550	cmp    %g5, FAST_IMMU_MISS_TT
3551	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3552	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3553	bz,pn %icc, 4b				/* if not, been here before */
3554	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3555	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3556	  mov	%g3, %g5
3557
3558	mov	MMU_TAG_ACCESS, %g7
3559	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3560	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3561
3562	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3563	mov	%g7, %asi
3564	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
35655:
3566        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3567        retry
3568
3569tsb_user_itlb_synth:
3570	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3571
3572	mov	MMU_TAG_ACCESS, %g7
3573	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3574	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3575	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3576	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3577
3578	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3579	mov	%g7, %asi
3580	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
35817:
3582	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3583        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3584        retry
3585#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3586
3587tsb_kernel:
3588	rdpr	%tt, %g5
3589#ifdef sun4v
3590	cmp	%g7, TTE4M
3591	bge,pn	%icc, 5f
3592#else
3593	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3594	be,pn	%icc, 5f
3595#endif /* sun4v */
3596	  nop
3597	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
3598	ba,pt	%xcc, 6f
3599	  nop
36005:
3601	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
3602	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
3603	  nop
36046:
3605#ifndef sun4v
3606tsb_kernel_patch_asi:
3607	or	%g0, RUNTIME_PATCH, %g6
3608	mov	%g6, %asi	! XXX avoid writing to %asi !!
3609#endif
3610	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
36113:
3612#ifdef sun4v
3613	cmp	%g5, T_INSTR_MMU_MISS
3614	be,a,pn	%icc, 1f
3615	  mov	%g3, %g5			! trapstat wants TTE in %g5
3616#endif /* sun4v */
3617	cmp	%g5, FAST_IMMU_MISS_TT
3618	be,pn	%icc, 1f
3619	  mov	%g3, %g5			! trapstat wants TTE in %g5
3620	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3621	! trapstat wants TTE in %g5
3622	retry
36231:
3624	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3625	! trapstat wants TTE in %g5
3626	retry
3627
3628tsb_ism:
3629	/*
3630	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3631	 * page size down to smallest.
3632	 *
3633	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3634	 *	register
3635	 * g3 = ismmap->ism_seg
3636	 * g4 = physical address of ismmap->ism_sfmmu
3637	 * g6 = tsbmiss area
3638	 */
3639	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3640	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3641	  mov	PTL1_BAD_ISM, %g1
3642						/* g5 = pa of imap_vb_shift */
3643	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3644	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3645	srlx	%g3, %g4, %g3			/* clr size field */
3646	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3647	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3648	and     %g2, %g1, %g4                   /* g4 = ctx number */
3649	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3650	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3651	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3652	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3653	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3654#if defined(sun4v) || defined(UTSB_PHYS)
3655	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3656	brz,pt %g5, tsb_chk4M_ism
3657	  nop
3658	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3659	or      %g5, HAT_CHKCTX1_FLAG, %g5
3660	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3661#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3662	/*
3663	 * ISM pages are always locked down.
3664	 * If we can't find the tte then pagefault
3665	 * and let the spt segment driver resolve it.
3666	 *
3667	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3668	 * g4 = imap_hatflags
3669	 * g6 = tsb miss area
3670	 * g7 = ISM hatid
3671	 */
3672
3673tsb_chk4M_ism:
3674	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3675	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3676	  nop
3677
3678tsb_ism_32M:
3679	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3680	brz,pn	%g5, tsb_ism_256M
3681	  nop
3682
3683	/*
3684	 * 32M hash.
3685	 */
3686
3687	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
3688	    TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3689	    tsb_ism_4M)
3690	/* NOT REACHED */
3691
3692tsb_ism_32M_found:
3693	brlz,a,pt %g3, tsb_ism_validtte
3694	  rdpr	%tt, %g7
3695	ba,pt	%xcc, tsb_ism_4M
3696	  nop
3697
3698tsb_ism_256M:
3699	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3700	brz,a,pn %g5, ptl1_panic
3701	  mov	PTL1_BAD_ISM, %g1
3702
3703	/*
3704	 * 256M hash.
3705	 */
3706	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
3707	    TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3708	    tsb_ism_4M)
3709
3710tsb_ism_256M_found:
3711	brlz,a,pt %g3, tsb_ism_validtte
3712	  rdpr	%tt, %g7
3713
3714tsb_ism_4M:
3715	/*
3716	 * 4M hash.
3717	 */
3718	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
3719	    TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3720	    tsb_ism_8K)
3721	/* NOT REACHED */
3722
3723tsb_ism_4M_found:
3724	brlz,a,pt %g3, tsb_ism_validtte
3725	  rdpr	%tt, %g7
3726
3727tsb_ism_8K:
3728	/*
3729	 * 8K and 64K hash.
3730	 */
3731
3732	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
3733	    TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3734	    tsb_pagefault)
3735	/* NOT REACHED */
3736
3737tsb_ism_8K_found:
3738	brlz,a,pt %g3, tsb_ism_validtte
3739	  rdpr	%tt, %g7
3740
3741tsb_pagefault:
3742	rdpr	%tt, %g7
3743	cmp	%g7, FAST_PROT_TT
3744	be,a,pn	%icc, tsb_protfault
3745	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3746
3747tsb_protfault:
3748	/*
3749	 * we get here if we couldn't find a valid tte in the hash.
3750	 *
3751	 * If user and we are at tl>1 we go to window handling code.
3752	 *
3753	 * If kernel and the fault is on the same page as our stack
3754	 * pointer, then we know the stack is bad and the trap handler
3755	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3756	 *
3757	 * If this is a kernel trap and tl>1, panic.
3758	 *
3759	 * Otherwise we call pagefault.
3760	 */
3761	cmp	%g7, FAST_IMMU_MISS_TT
3762#ifdef sun4v
3763	MMU_FAULT_STATUS_AREA(%g4)
3764	ldx	[%g4 + MMFSA_I_CTX], %g5
3765	ldx	[%g4 + MMFSA_D_CTX], %g4
3766	move	%icc, %g5, %g4
3767	cmp	%g7, T_INSTR_MMU_MISS
3768	move	%icc, %g5, %g4
3769#else
3770	mov	MMU_TAG_ACCESS, %g4
3771	ldxa	[%g4]ASI_DMMU, %g2
3772	ldxa	[%g4]ASI_IMMU, %g5
3773	move	%icc, %g5, %g2
3774	cmp	%g7, T_INSTR_MMU_MISS
3775	move	%icc, %g5, %g2
3776	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3777#endif /* sun4v */
3778	brnz,pn	%g4, 3f				/* skip if not kernel */
3779	  rdpr	%tl, %g5
3780
3781	add	%sp, STACK_BIAS, %g3
3782	srlx	%g3, MMU_PAGESHIFT, %g3
3783	srlx	%g2, MMU_PAGESHIFT, %g4
3784	cmp	%g3, %g4
3785	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3786	  mov	PTL1_BAD_STACK, %g1
3787
3788	cmp	%g5, 1
3789	ble,pt	%icc, 2f
3790	  nop
3791	TSTAT_CHECK_TL1(2f, %g1, %g2)
3792	rdpr	%tt, %g2
3793	cmp	%g2, FAST_PROT_TT
3794	mov	PTL1_BAD_KPROT_FAULT, %g1
3795	movne	%icc, PTL1_BAD_KMISS, %g1
3796	ba,pt	%icc, ptl1_panic
3797	  nop
3798
37992:
3800	/*
3801	 * We are taking a pagefault in the kernel on a kernel address.  If
3802	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3803	 * want to call sfmmu_pagefault -- we will instead note that a fault
3804	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3805	 * (instead of a "retry").  This will step over the faulting
3806	 * instruction.
3807	 */
3808	CPU_INDEX(%g1, %g2)
3809	set	cpu_core, %g2
3810	sllx	%g1, CPU_CORE_SHIFT, %g1
3811	add	%g1, %g2, %g1
3812	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3813	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3814	bz	sfmmu_pagefault
3815	or	%g2, CPU_DTRACE_BADADDR, %g2
3816	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3817	GET_MMU_D_ADDR(%g3, %g4)
3818	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3819	done
3820
38213:
3822	cmp	%g5, 1
3823	ble,pt	%icc, 4f
3824	  nop
3825	TSTAT_CHECK_TL1(4f, %g1, %g2)
3826	ba,pt	%icc, sfmmu_window_trap
3827	  nop
3828
38294:
3830	/*
3831	 * We are taking a pagefault on a non-kernel address.  If we are in
3832	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3833	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3834	 */
3835	CPU_INDEX(%g1, %g2)
3836	set	cpu_core, %g2
3837	sllx	%g1, CPU_CORE_SHIFT, %g1
3838	add	%g1, %g2, %g1
3839	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3840	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3841	bz	sfmmu_mmu_trap
3842	or	%g2, CPU_DTRACE_BADADDR, %g2
3843	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3844	GET_MMU_D_ADDR(%g3, %g4)
3845	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3846
3847	/*
3848	 * Be sure that we're actually taking this miss from the kernel --
3849	 * otherwise we have managed to return to user-level with
3850	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3851	 */
3852	rdpr	%tstate, %g2
3853	btst	TSTATE_PRIV, %g2
3854	bz,a	ptl1_panic
3855	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3856	done
3857
3858	ALTENTRY(tsb_tl0_noctxt)
3859	/*
3860	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3861	 * if it is, indicated that we have faulted and issue a done.
3862	 */
3863	CPU_INDEX(%g5, %g6)
3864	set	cpu_core, %g6
3865	sllx	%g5, CPU_CORE_SHIFT, %g5
3866	add	%g5, %g6, %g5
3867	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3868	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3869	bz	1f
3870	or	%g6, CPU_DTRACE_BADADDR, %g6
3871	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3872	GET_MMU_D_ADDR(%g3, %g4)
3873	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3874
3875	/*
3876	 * Be sure that we're actually taking this miss from the kernel --
3877	 * otherwise we have managed to return to user-level with
3878	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3879	 */
3880	rdpr	%tstate, %g5
3881	btst	TSTATE_PRIV, %g5
3882	bz,a	ptl1_panic
3883	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3884	TSTAT_CHECK_TL1(2f, %g1, %g2);
38852:
3886	done
3887
38881:
3889	rdpr	%tt, %g5
3890	cmp	%g5, FAST_IMMU_MISS_TT
3891#ifdef sun4v
3892	MMU_FAULT_STATUS_AREA(%g2)
3893	be,a,pt	%icc, 2f
3894	  ldx	[%g2 + MMFSA_I_CTX], %g3
3895	cmp	%g5, T_INSTR_MMU_MISS
3896	be,a,pt	%icc, 2f
3897	  ldx	[%g2 + MMFSA_I_CTX], %g3
3898	ldx	[%g2 + MMFSA_D_CTX], %g3
38992:
3900#else
3901	mov	MMU_TAG_ACCESS, %g2
3902	be,a,pt	%icc, 2f
3903	  ldxa	[%g2]ASI_IMMU, %g3
3904	ldxa	[%g2]ASI_DMMU, %g3
39052:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3906#endif /* sun4v */
3907	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3908	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3909	rdpr	%tl, %g5
3910	cmp	%g5, 1
3911	ble,pt	%icc, sfmmu_mmu_trap
3912	  nop
3913	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3914	ba,pt	%icc, sfmmu_window_trap
3915	  nop
3916	SET_SIZE(sfmmu_tsb_miss)
3917#endif  /* lint */
3918
3919#if defined (lint)
3920/*
3921 * This routine will look for a user or kernel vaddr in the hash
3922 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3923 * grab any locks.  It should only be used by other sfmmu routines.
3924 */
3925/* ARGSUSED */
3926pfn_t
3927sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3928{
3929	return(0);
3930}
3931
3932/* ARGSUSED */
3933pfn_t
3934sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3935{
3936	return(0);
3937}
3938
3939#else /* lint */
3940
3941	ENTRY_NP(sfmmu_vatopfn)
3942 	/*
3943 	 * disable interrupts
3944 	 */
3945 	rdpr	%pstate, %o3
3946#ifdef DEBUG
3947	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3948#endif
3949	/*
3950	 * disable interrupts to protect the TSBMISS area
3951	 */
3952	andn    %o3, PSTATE_IE, %o5
3953	wrpr    %o5, 0, %pstate
3954
3955	/*
3956	 * o0 = vaddr
3957	 * o1 = sfmmup
3958	 * o2 = ttep
3959	 */
3960	CPU_TSBMISS_AREA(%g1, %o5)
3961	ldn	[%g1 + TSBMISS_KHATID], %o4
3962	cmp	%o4, %o1
3963	bne,pn	%ncc, vatopfn_nokernel
3964	  mov	TTE64K, %g5			/* g5 = rehash # */
3965	mov %g1,%o5				/* o5 = tsbmiss_area */
3966	/*
3967	 * o0 = vaddr
3968	 * o1 & o4 = hatid
3969	 * o2 = ttep
3970	 * o5 = tsbmiss area
3971	 */
3972	mov	HBLK_RANGE_SHIFT, %g6
39731:
3974
3975	/*
3976	 * o0 = vaddr
3977	 * o1 = sfmmup
3978	 * o2 = ttep
3979	 * o3 = old %pstate
3980	 * o4 = hatid
3981	 * o5 = tsbmiss
3982	 * g5 = rehash #
3983	 * g6 = hmeshift
3984	 *
3985	 * The first arg to GET_TTE is actually tagaccess register
3986	 * not just vaddr. Since this call is for kernel we need to clear
3987	 * any lower vaddr bits that would be interpreted as ctx bits.
3988	 */
3989	set     TAGACC_CTX_MASK, %g1
3990	andn    %o0, %g1, %o0
3991	GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
3992		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3993
3994kvtop_hblk_found:
3995	/*
3996	 * o0 = vaddr
3997	 * o1 = sfmmup
3998	 * o2 = ttep
3999	 * g1 = tte
4000	 * g2 = tte pa
4001	 * g3 = scratch
4002	 * o2 = tsbmiss area
4003	 * o1 = hat id
4004	 */
4005	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
4006	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4007	stx %g1,[%o2]				/* put tte into *ttep */
4008	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
4009	/*
4010	 * o0 = vaddr
4011	 * o1 = sfmmup
4012	 * o2 = ttep
4013	 * g1 = pfn
4014	 */
4015	ba,pt	%xcc, 6f
4016	  mov	%g1, %o0
4017
4018kvtop_nohblk:
4019	/*
4020	 * we get here if we couldn't find valid hblk in hash.  We rehash
4021	 * if neccesary.
4022	 */
4023	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
4024#ifdef sun4v
4025	cmp	%g5, MAX_HASHCNT
4026#else
4027	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
4028#endif /* sun4v */
4029	be,a,pn	%icc, 6f
4030	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4031	mov	%o1, %o4			/* restore hatid */
4032#ifdef sun4v
4033        add	%g5, 2, %g5
4034	cmp	%g5, 3
4035	move	%icc, MMU_PAGESHIFT4M, %g6
4036	ba,pt	%icc, 1b
4037	movne	%icc, MMU_PAGESHIFT256M, %g6
4038#else
4039        inc	%g5
4040	cmp	%g5, 2
4041	move	%icc, MMU_PAGESHIFT512K, %g6
4042	ba,pt	%icc, 1b
4043	movne	%icc, MMU_PAGESHIFT4M, %g6
4044#endif /* sun4v */
40456:
4046	retl
4047 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4048
4049tsb_suspend:
4050	/*
4051	 * o0 = vaddr
4052	 * o1 = sfmmup
4053	 * o2 = ttep
4054	 * g1 = tte
4055	 * g2 = tte pa
4056	 * g3 = tte va
4057	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
4058	 */
4059	stx %g1,[%o2]				/* put tte into *ttep */
4060	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
4061	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
4062	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
40638:
4064	retl
4065	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
4066
4067vatopfn_nokernel:
4068	/*
4069	 * This routine does NOT support user addresses
4070	 * There is a routine in C that supports this.
4071	 * The only reason why we don't have the C routine
4072	 * support kernel addresses as well is because
4073	 * we do va_to_pa while holding the hashlock.
4074	 */
4075 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4076	save	%sp, -SA(MINFRAME), %sp
4077	sethi	%hi(sfmmu_panic3), %o0
4078	call	panic
4079	 or	%o0, %lo(sfmmu_panic3), %o0
4080
4081	SET_SIZE(sfmmu_vatopfn)
4082
4083	/*
4084	 * %o0 = vaddr
4085	 * %o1 = hashno (aka szc)
4086	 *
4087	 *
4088	 * This routine is similar to sfmmu_vatopfn() but will only look for
4089	 * a kernel vaddr in the hash structure for the specified rehash value.
4090	 * It's just an optimization for the case when pagesize for a given
4091	 * va range is already known (e.g. large page heap) and we don't want
4092	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4093	 *
4094	 * Returns valid pfn or PFN_INVALID if
4095	 * tte for specified rehash # is not found, invalid or suspended.
4096	 */
4097	ENTRY_NP(sfmmu_kvaszc2pfn)
4098 	/*
4099 	 * disable interrupts
4100 	 */
4101 	rdpr	%pstate, %o3
4102#ifdef DEBUG
4103	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4104#endif
4105	/*
4106	 * disable interrupts to protect the TSBMISS area
4107	 */
4108	andn    %o3, PSTATE_IE, %o5
4109	wrpr    %o5, 0, %pstate
4110
4111	CPU_TSBMISS_AREA(%g1, %o5)
4112	ldn	[%g1 + TSBMISS_KHATID], %o4
4113	sll	%o1, 1, %g6
4114	add	%g6, %o1, %g6
4115	add	%g6, MMU_PAGESHIFT, %g6
4116	/*
4117	 * %o0 = vaddr
4118	 * %o1 = hashno
4119	 * %o3 = old %pstate
4120	 * %o4 = ksfmmup
4121	 * %g1 = tsbmiss area
4122	 * %g6 = hmeshift
4123	 */
4124
4125	/*
4126	 * The first arg to GET_TTE is actually tagaccess register
4127	 * not just vaddr. Since this call is for kernel we need to clear
4128	 * any lower vaddr bits that would be interpreted as ctx bits.
4129	 */
4130	srlx	%o0, MMU_PAGESHIFT, %o0
4131	sllx	%o0, MMU_PAGESHIFT, %o0
4132	GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
4133		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4134		kvaszc2pfn_nohblk)
4135
4136kvaszc2pfn_hblk_found:
4137	/*
4138	 * %g3 = tte
4139	 * %o0 = vaddr
4140	 */
4141	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4142	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4143	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4144	/*
4145	 * g3 = pfn
4146	 */
4147	ba,pt	%xcc, 1f
4148	  mov	%g3, %o0
4149
4150kvaszc2pfn_nohblk:
4151	mov	-1, %o0
4152
41531:
4154	retl
4155 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4156
4157	SET_SIZE(sfmmu_kvaszc2pfn)
4158
4159#endif /* lint */
4160
4161
4162
4163#if !defined(lint)
4164
4165/*
4166 * kpm lock used between trap level tsbmiss handler and kpm C level.
4167 */
4168#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
4169	mov     0xff, tmp1						;\
4170label1:									;\
4171	casa    [kpmlckp]asi, %g0, tmp1					;\
4172	brnz,pn tmp1, label1						;\
4173	mov     0xff, tmp1						;\
4174	membar  #LoadLoad
4175
4176#define KPMLOCK_EXIT(kpmlckp, asi)					\
4177	membar  #LoadStore|#StoreStore					;\
4178	sta     %g0, [kpmlckp]asi
4179
4180/*
4181 * Lookup a memseg for a given pfn and if found, return the physical
4182 * address of the corresponding struct memseg in mseg, otherwise
4183 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4184 * tsbmp, %asi is assumed to be ASI_MEM.
4185 * This lookup is done by strictly traversing only the physical memseg
4186 * linkage. The more generic approach, to check the virtual linkage
4187 * before using the physical (used e.g. with hmehash buckets), cannot
4188 * be used here. Memory DR operations can run in parallel to this
4189 * lookup w/o any locks and updates of the physical and virtual linkage
4190 * cannot be done atomically wrt. to each other. Because physical
4191 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4192 * as "physical NULL" pointer.
4193 */
4194#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4195	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
4196	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
4197	udivx	pfn, mseg, mseg						;\
4198	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
4199	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
4200	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
4201	add	tmp1, mseg, tmp1					;\
4202	ldxa	[tmp1]%asi, mseg					;\
4203	cmp	mseg, MSEG_NULLPTR_PA					;\
4204	be,pn	%xcc, label/**/1		/* if not found */	;\
4205	  nop								;\
4206	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4207	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4208	blu,pn	%xcc, label/**/1					;\
4209	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4210	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4211	bgeu,pn	%xcc, label/**/1					;\
4212	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
4213	mulx	tmp1, PAGE_SIZE, tmp1					;\
4214	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
4215	add	tmp2, tmp1, tmp1			/* pp */	;\
4216	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
4217	cmp	tmp2, pfn						;\
4218	be,pt	%xcc, label/**/_ok			/* found */	;\
4219label/**/1:								;\
4220	/* brute force lookup */					;\
4221	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
4222	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
4223label/**/2:								;\
4224	cmp	mseg, MSEG_NULLPTR_PA					;\
4225	be,pn	%xcc, label/**/_ok		/* if not found */	;\
4226	  nop								;\
4227	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4228	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4229	blu,a,pt %xcc, label/**/2					;\
4230	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4231	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4232	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4233	bgeu,a,pt %xcc, label/**/2					;\
4234	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4235label/**/_ok:
4236
4237	/*
4238	 * kpm tsb miss handler large pages
4239	 * g1 = 8K kpm TSB entry pointer
4240	 * g2 = tag access register
4241	 * g3 = 4M kpm TSB entry pointer
4242	 */
4243	ALTENTRY(sfmmu_kpm_dtsb_miss)
4244	TT_TRACE(trace_tsbmiss)
4245
4246	CPU_INDEX(%g7, %g6)
4247	sethi	%hi(kpmtsbm_area), %g6
4248	sllx	%g7, KPMTSBM_SHIFT, %g7
4249	or	%g6, %lo(kpmtsbm_area), %g6
4250	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4251
4252	/* check enable flag */
4253	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4254	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4255	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4256	  nop
4257
4258	/* VA range check */
4259	ldx	[%g6 + KPMTSBM_VBASE], %g7
4260	cmp	%g2, %g7
4261	blu,pn	%xcc, sfmmu_tsb_miss
4262	  ldx	[%g6 + KPMTSBM_VEND], %g5
4263	cmp	%g2, %g5
4264	bgeu,pn	%xcc, sfmmu_tsb_miss
4265	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4266
4267	/*
4268	 * check TL tsbmiss handling flag
4269	 * bump tsbmiss counter
4270	 */
4271	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4272#ifdef	DEBUG
4273	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4274	inc	%g5
4275	brz,pn	%g3, sfmmu_kpm_exception
4276	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4277#else
4278	inc	%g5
4279	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4280#endif
4281	/*
4282	 * At this point:
4283	 *  g1 = 8K kpm TSB pointer (not used)
4284	 *  g2 = tag access register
4285	 *  g3 = clobbered
4286	 *  g6 = per-CPU kpm tsbmiss area
4287	 *  g7 = kpm_vbase
4288	 */
4289
4290	/* vaddr2pfn */
4291	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4292	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4293	srax    %g4, %g3, %g2			/* which alias range (r) */
4294	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4295	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4296
4297	/*
4298	 * Setup %asi
4299	 * mseg_pa = page_numtomemseg_nolock(pfn)
4300	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4301	 * g2=pfn
4302	 */
4303	mov	ASI_MEM, %asi
4304	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4305	cmp	%g3, MSEG_NULLPTR_PA
4306	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4307	  nop
4308
4309	/*
4310	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4311	 * g2=pfn g3=mseg_pa
4312	 */
4313	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4314	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4315	srlx	%g2, %g5, %g4
4316	sllx	%g4, %g5, %g4
4317	sub	%g4, %g7, %g4
4318	srlx	%g4, %g5, %g4
4319
4320	/*
4321	 * Validate inx value
4322	 * g2=pfn g3=mseg_pa g4=inx
4323	 */
4324#ifdef	DEBUG
4325	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4326	cmp	%g4, %g5			/* inx - nkpmpgs */
4327	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4328	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4329#else
4330	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4331#endif
4332	/*
4333	 * kp = &mseg_pa->kpm_pages[inx]
4334	 */
4335	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4336	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4337	add	%g5, %g4, %g5			/* kp */
4338
4339	/*
4340	 * KPMP_HASH(kp)
4341	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4342	 */
4343	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4344	sub	%g7, 1, %g7			/* mask */
4345	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4346	add	%g5, %g1, %g5			/* y = ksp + x */
4347	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4348
4349	/*
4350	 * Calculate physical kpm_page pointer
4351	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4352	 */
4353	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4354	add	%g1, %g4, %g1			/* kp_pa */
4355
4356	/*
4357	 * Calculate physical hash lock address
4358	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4359	 */
4360	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4361	sllx	%g5, KPMHLK_SHIFT, %g5
4362	add	%g4, %g5, %g3
4363	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4364
4365	/*
4366	 * Assemble tte
4367	 * g1=kp_pa g2=pfn g3=hlck_pa
4368	 */
4369#ifdef sun4v
4370	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4371	sllx	%g5, 32, %g5
4372	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4373	or	%g4, TTE4M, %g4
4374	or	%g5, %g4, %g5
4375#else
4376	sethi	%hi(TTE_VALID_INT), %g4
4377	mov	TTE4M, %g5
4378	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4379	or	%g5, %g4, %g5			/* upper part */
4380	sllx	%g5, 32, %g5
4381	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4382	or	%g5, %g4, %g5
4383#endif
4384	sllx	%g2, MMU_PAGESHIFT, %g4
4385	or	%g5, %g4, %g5			/* tte */
4386	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4387	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4388
4389	/*
4390	 * tsb dropin
4391	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4392	 */
4393
4394	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4395	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4396
4397	/* use C-handler if there's no go for dropin */
4398	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4399	cmp	%g7, -1
4400	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4401	  nop
4402
4403#ifdef	DEBUG
4404	/* double check refcnt */
4405	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4406	brz,pn	%g7, 5f			/* let C-handler deal with this */
4407	  nop
4408#endif
4409
4410#ifndef sun4v
4411	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4412	mov	ASI_N, %g1
4413	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4414	movnz	%icc, ASI_MEM, %g1
4415	mov	%g1, %asi
4416#endif
4417
4418	/*
4419	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4420	 * If we fail to lock the TSB entry then just load the tte into the
4421	 * TLB.
4422	 */
4423	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
4424
4425	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4426	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4427locked_tsb_l1:
4428	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4429
4430	/* KPMLOCK_EXIT(kpmlckp, asi) */
4431	KPMLOCK_EXIT(%g3, ASI_MEM)
4432
4433	/*
4434	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4435	 * point to trapstat's TSB miss return code (note that trapstat
4436	 * itself will patch the correct offset to add).
4437	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4438	 */
4439	rdpr	%tl, %g7
4440	cmp	%g7, 1
4441	ble	%icc, 0f
4442	sethi	%hi(KERNELBASE), %g6
4443	rdpr	%tpc, %g7
4444	or	%g6, %lo(KERNELBASE), %g6
4445	cmp	%g7, %g6
4446	bgeu	%xcc, 0f
4447	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4448	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4449	wrpr	%g7, %tpc
4450	add	%g7, 4, %g7
4451	wrpr	%g7, %tnpc
44520:
4453	retry
44545:
4455	/* g3=hlck_pa */
4456	KPMLOCK_EXIT(%g3, ASI_MEM)
4457	ba,pt	%icc, sfmmu_kpm_exception
4458	  nop
4459	SET_SIZE(sfmmu_kpm_dtsb_miss)
4460
4461	/*
4462	 * kpm tsbmiss handler for smallpages
4463	 * g1 = 8K kpm TSB pointer
4464	 * g2 = tag access register
4465	 * g3 = 4M kpm TSB pointer
4466	 */
4467	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4468	TT_TRACE(trace_tsbmiss)
4469	CPU_INDEX(%g7, %g6)
4470	sethi	%hi(kpmtsbm_area), %g6
4471	sllx	%g7, KPMTSBM_SHIFT, %g7
4472	or	%g6, %lo(kpmtsbm_area), %g6
4473	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4474
4475	/* check enable flag */
4476	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4477	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4478	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4479	  nop
4480
4481	/*
4482	 * VA range check
4483	 * On fail: goto sfmmu_tsb_miss
4484	 */
4485	ldx	[%g6 + KPMTSBM_VBASE], %g7
4486	cmp	%g2, %g7
4487	blu,pn	%xcc, sfmmu_tsb_miss
4488	  ldx	[%g6 + KPMTSBM_VEND], %g5
4489	cmp	%g2, %g5
4490	bgeu,pn	%xcc, sfmmu_tsb_miss
4491	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4492
4493	/*
4494	 * check TL tsbmiss handling flag
4495	 * bump tsbmiss counter
4496	 */
4497	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4498#ifdef	DEBUG
4499	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4500	inc	%g5
4501	brz,pn	%g1, sfmmu_kpm_exception
4502	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4503#else
4504	inc	%g5
4505	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4506#endif
4507	/*
4508	 * At this point:
4509	 *  g1 = clobbered
4510	 *  g2 = tag access register
4511	 *  g3 = 4M kpm TSB pointer (not used)
4512	 *  g6 = per-CPU kpm tsbmiss area
4513	 *  g7 = kpm_vbase
4514	 */
4515
4516	/*
4517	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4518	 * which is defined in mach_kpm.h. Any changes in that macro
4519	 * should also be ported back to this assembly code.
4520	 */
4521	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
4522	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4523	srax    %g4, %g3, %g7			/* which alias range (r) */
4524	brz,pt	%g7, 2f
4525	  sethi   %hi(vac_colors_mask), %g5
4526	ld	[%g5 + %lo(vac_colors_mask)], %g5
4527
4528	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
4529	and	%g1, %g5, %g1			/* g1 = v */
4530	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
4531	cmp	%g7, %g1			/* if (r > v) */
4532	bleu,pn %xcc, 1f
4533	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
4534	sub	%g7, %g1, %g5			/* g5 = r - v */
4535	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
4536	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
4537	ba	2f
4538	  nop
45391:
4540	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
4541	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
4542
4543	/*
4544	 * paddr2pfn
4545	 *  g1 = vcolor (not used)
4546	 *  g2 = tag access register
4547	 *  g3 = clobbered
4548	 *  g4 = paddr
4549	 *  g5 = clobbered
4550	 *  g6 = per-CPU kpm tsbmiss area
4551	 *  g7 = clobbered
4552	 */
45532:
4554	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
4555
4556	/*
4557	 * Setup %asi
4558	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4559	 * if (mseg not found) sfmmu_kpm_exception
4560	 * g2=pfn g6=per-CPU kpm tsbmiss area
4561	 * g4 g5 g7 for scratch use.
4562	 */
4563	mov	ASI_MEM, %asi
4564	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4565	cmp	%g3, MSEG_NULLPTR_PA
4566	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4567	  nop
4568
4569	/*
4570	 * inx = pfn - mseg_pa->kpm_pbase
4571	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
4572	 */
4573	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4574	sub	%g2, %g7, %g4
4575
4576#ifdef	DEBUG
4577	/*
4578	 * Validate inx value
4579	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
4580	 */
4581	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4582	cmp	%g4, %g5			/* inx - nkpmpgs */
4583	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4584	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4585#else
4586	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4587#endif
4588	/* ksp = &mseg_pa->kpm_spages[inx] */
4589	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4590	add	%g5, %g4, %g5			/* ksp */
4591
4592	/*
4593	 * KPMP_SHASH(kp)
4594	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4595	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
4596	 */
4597	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4598	sub	%g7, 1, %g7			/* mask */
4599	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4600	add	%g5, %g1, %g5			/* y = ksp + x */
4601	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4602
4603	/*
4604	 * Calculate physical kpm_spage pointer
4605	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4606	 * g6=per-CPU kpm tsbmiss area
4607	 */
4608	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4609	add	%g1, %g4, %g1			/* ksp_pa */
4610
4611	/*
4612	 * Calculate physical hash lock address.
4613	 * Note: Changes in kpm_shlk_t must be reflected here.
4614	 * g1=ksp_pa g2=pfn g5=hashinx
4615	 * g6=per-CPU kpm tsbmiss area
4616	 */
4617	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4618	sllx	%g5, KPMSHLK_SHIFT, %g5
4619	add	%g4, %g5, %g3			/* hlck_pa */
4620
4621	/*
4622	 * Assemble non-cacheable tte initially
4623	 * g1=ksp_pa g2=pfn g3=hlck_pa
4624	 * g6=per-CPU kpm tsbmiss area
4625	 */
4626	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4627	sllx	%g5, 32, %g5
4628	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4629	or	%g5, %g4, %g5
4630	sllx	%g2, MMU_PAGESHIFT, %g4
4631	or	%g5, %g4, %g5			/* tte */
4632	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4633	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4634
4635	/*
4636	 * tsb dropin
4637	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4638	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
4639	 */
4640
4641	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4642	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4643
4644	/* use C-handler if there's no go for dropin */
4645	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
4646	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
4647	bz,pt	%icc, 5f				/* no go */
4648	  nop
4649	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
4650	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
4651	be,a,pn	%xcc, 3f
4652	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
46533:
4654#ifndef sun4v
4655	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4656	mov	ASI_N, %g1
4657	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4658	movnz	%icc, ASI_MEM, %g1
4659	mov	%g1, %asi
4660#endif
4661
4662	/*
4663	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4664	 * If we fail to lock the TSB entry then just load the tte into the
4665	 * TLB.
4666	 */
4667	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
4668
4669	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4670	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4671locked_tsb_l2:
4672	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4673
4674	/* KPMLOCK_EXIT(kpmlckp, asi) */
4675	KPMLOCK_EXIT(%g3, ASI_MEM)
4676
4677	/*
4678	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4679	 * point to trapstat's TSB miss return code (note that trapstat
4680	 * itself will patch the correct offset to add).
4681	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4682	 */
4683	rdpr	%tl, %g7
4684	cmp	%g7, 1
4685	ble	%icc, 0f
4686	sethi	%hi(KERNELBASE), %g6
4687	rdpr	%tpc, %g7
4688	or	%g6, %lo(KERNELBASE), %g6
4689	cmp	%g7, %g6
4690	bgeu	%xcc, 0f
4691	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4692	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4693	wrpr	%g7, %tpc
4694	add	%g7, 4, %g7
4695	wrpr	%g7, %tnpc
46960:
4697	retry
46985:
4699	/* g3=hlck_pa */
4700	KPMLOCK_EXIT(%g3, ASI_MEM)
4701	ba,pt	%icc, sfmmu_kpm_exception
4702	  nop
4703	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4704
4705#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4706#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4707#endif
4708
4709#endif /* lint */
4710
4711#ifdef	lint
4712/*
4713 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4714 * Called from C-level, sets/clears "go" indication for trap level handler.
4715 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4716 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4717 * Assumes khl_mutex is held when called from C-level.
4718 */
4719/* ARGSUSED */
4720void
4721sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4722{
4723}
4724
4725/*
4726 * kpm_smallpages: stores val to byte at address mapped within
4727 * low level lock brackets. The old value is returned.
4728 * Called from C-level.
4729 */
4730/* ARGSUSED */
4731int
4732sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4733{
4734	return (0);
4735}
4736
4737#else /* lint */
4738
4739	.seg	".data"
4740sfmmu_kpm_tsbmtl_panic:
4741	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4742	.byte	0
4743sfmmu_kpm_stsbmtl_panic:
4744	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4745	.byte	0
4746	.align	4
4747	.seg	".text"
4748
4749	ENTRY_NP(sfmmu_kpm_tsbmtl)
4750	rdpr	%pstate, %o3
4751	/*
4752	 * %o0 = &kp_refcntc
4753	 * %o1 = &khl_lock
4754	 * %o2 = 0/1 (off/on)
4755	 * %o3 = pstate save
4756	 */
4757#ifdef DEBUG
4758	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4759	bnz,pt %icc, 1f				/* disabled, panic	 */
4760	  nop
4761	save	%sp, -SA(MINFRAME), %sp
4762	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4763	call	panic
4764	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4765	ret
4766	restore
47671:
4768#endif /* DEBUG */
4769	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4770
4771	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4772	mov	-1, %o5
4773	brz,a	%o2, 2f
4774	  mov	0, %o5
47752:
4776	sth	%o5, [%o0]
4777	KPMLOCK_EXIT(%o1, ASI_N)
4778
4779	retl
4780	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4781	SET_SIZE(sfmmu_kpm_tsbmtl)
4782
4783	ENTRY_NP(sfmmu_kpm_stsbmtl)
4784	rdpr	%pstate, %o3
4785	/*
4786	 * %o0 = &mapped
4787	 * %o1 = &kshl_lock
4788	 * %o2 = val
4789	 * %o3 = pstate save
4790	 */
4791#ifdef DEBUG
4792	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4793	bnz,pt %icc, 1f				/* disabled, panic	 */
4794	  nop
4795	save	%sp, -SA(MINFRAME), %sp
4796	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4797	call	panic
4798	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4799	ret
4800	restore
48011:
4802#endif /* DEBUG */
4803	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4804
4805	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4806	ldsb	[%o0], %o5
4807	stb	%o2, [%o0]
4808	KPMLOCK_EXIT(%o1, ASI_N)
4809
4810	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
4811	retl
4812	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4813	SET_SIZE(sfmmu_kpm_stsbmtl)
4814
4815#endif /* lint */
4816
4817#ifndef lint
4818#ifdef sun4v
4819	/*
4820	 * User/kernel data miss w// multiple TSBs
4821	 * The first probe covers 8K, 64K, and 512K page sizes,
4822	 * because 64K and 512K mappings are replicated off 8K
4823	 * pointer.  Second probe covers 4M page size only.
4824	 *
4825	 * MMU fault area contains miss address and context.
4826	 */
4827	ALTENTRY(sfmmu_slow_dmmu_miss)
4828	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4829
4830slow_miss_common:
4831	/*
4832	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4833	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4834	 */
4835	brnz,pt	%g3, 8f			! check for user context
4836	  nop
4837
4838	/*
4839	 * Kernel miss
4840	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4841	 * branch to sfmmu_tsb_miss_tt to handle it.
4842	 */
4843	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4844sfmmu_dslow_patch_ktsb_base:
4845	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4846sfmmu_dslow_patch_ktsb_szcode:
4847	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4848
4849	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4850	! %g1 = First TSB entry pointer, as TSB miss handler expects
4851
4852	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4853sfmmu_dslow_patch_ktsb4m_base:
4854	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4855sfmmu_dslow_patch_ktsb4m_szcode:
4856	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4857
4858	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4859	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4860	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4861	.empty
4862
48638:
4864	/*
4865	 * User miss
4866	 * Get first TSB pointer in %g1
4867	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4868	 * Branch to sfmmu_tsb_miss_tt to handle it
4869	 */
4870	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4871	/* %g1 = first TSB entry ptr now, %g2 preserved */
4872
4873	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4874	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
4875	  nop
4876
4877	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4878	/* %g3 = second TSB entry ptr now, %g2 preserved */
48799:
4880	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4881	.empty
4882	SET_SIZE(sfmmu_slow_dmmu_miss)
4883
4884
4885	/*
4886	 * User/kernel instruction miss w/ multiple TSBs
4887	 * The first probe covers 8K, 64K, and 512K page sizes,
4888	 * because 64K and 512K mappings are replicated off 8K
4889	 * pointer.  Second probe covers 4M page size only.
4890	 *
4891	 * MMU fault area contains miss address and context.
4892	 */
4893	ALTENTRY(sfmmu_slow_immu_miss)
4894	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4895	ba,a,pt	%xcc, slow_miss_common
4896	SET_SIZE(sfmmu_slow_immu_miss)
4897
4898#endif /* sun4v */
4899#endif	/* lint */
4900
4901#ifndef lint
4902
4903/*
4904 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4905 */
4906	.seg	".data"
4907	.align	64
4908	.global tsbmiss_area
4909tsbmiss_area:
4910	.skip	(TSBMISS_SIZE * NCPU)
4911
4912	.align	64
4913	.global kpmtsbm_area
4914kpmtsbm_area:
4915	.skip	(KPMTSBM_SIZE * NCPU)
4916#endif	/* lint */
4917