xref: /titanic_50/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * SFMMU primitives.  These primitives should only be used by sfmmu
31 * routines.
32 */
33
34#if defined(lint)
35#include <sys/types.h>
36#else	/* lint */
37#include "assym.h"
38#endif	/* lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/machtrap.h>
42#include <sys/machasi.h>
43#include <sys/sun4asi.h>
44#include <sys/pte.h>
45#include <sys/mmu.h>
46#include <vm/hat_sfmmu.h>
47#include <vm/seg_spt.h>
48#include <sys/machparam.h>
49#include <sys/privregs.h>
50#include <sys/scb.h>
51#include <sys/intreg.h>
52#include <sys/machthread.h>
53#include <sys/intr.h>
54#include <sys/clock.h>
55#include <sys/trapstat.h>
56
57#ifdef TRAPTRACE
58#include <sys/traptrace.h>
59
60/*
61 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
62 */
63#define	TT_TRACE(label)		\
64	ba	label		;\
65	rd	%pc, %g7
66#else
67
68#define	TT_TRACE(label)
69
70#endif /* TRAPTRACE */
71
72#ifndef	lint
73
74#if (TTE_SUSPEND_SHIFT > 0)
75#define	TTE_SUSPEND_INT_SHIFT(reg)				\
76	sllx	reg, TTE_SUSPEND_SHIFT, reg
77#else
78#define	TTE_SUSPEND_INT_SHIFT(reg)
79#endif
80
81#endif /* lint */
82
83#ifndef	lint
84
85/*
86 * Assumes TSBE_TAG is 0
87 * Assumes TSBE_INTHI is 0
88 * Assumes TSBREG.split is 0
89 */
90
91#if TSBE_TAG != 0
92#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
93#endif
94
95#if TSBTAG_INTHI != 0
96#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
97#endif
98
99/*
100 * The following code assumes the tsb is not split.
101 *
102 * With TSBs no longer shared between processes, it's no longer
103 * necessary to hash the context bits into the tsb index to get
104 * tsb coloring; the new implementation treats the TSB as a
105 * direct-mapped, virtually-addressed cache.
106 *
107 * In:
108 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
109 *    tsbbase = base address of TSB (clobbered)
110 *    tagacc = tag access register (clobbered)
111 *    szc = size code of TSB (ro)
112 *    tmp = scratch reg
113 * Out:
114 *    tsbbase = pointer to entry in TSB
115 */
116#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
117	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
118	srlx	tagacc, vpshift, tagacc 				;\
119	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
120	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
121	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
122	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
123	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
124
125/*
126 * When the kpm TSB is used it is assumed that it is direct mapped
127 * using (vaddr>>vpshift)%tsbsz as the index.
128 *
129 * Note that, for now, the kpm TSB and kernel TSB are the same for
130 * each mapping size.  However that need not always be the case.  If
131 * the trap handlers are updated to search a different TSB for kpm
132 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
133 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
134 *
135 * In:
136 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
137 *    vaddr = virtual address (clobbered)
138 *    tsbp, szc, tmp = scratch
139 * Out:
140 *    tsbp = pointer to entry in TSB
141 */
142#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
143	cmp	vpshift, MMU_PAGESHIFT					;\
144	bne,pn	%icc, 1f		/* branch if large case */	;\
145	  sethi	%hi(kpmsm_tsbsz), szc					;\
146	sethi	%hi(kpmsm_tsbbase), tsbp				;\
147	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
148	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
149	ba,pt	%icc, 2f						;\
150	  nop								;\
1511:	sethi	%hi(kpm_tsbsz), szc					;\
152	sethi	%hi(kpm_tsbbase), tsbp					;\
153	ld	[szc + %lo(kpm_tsbsz)], szc				;\
154	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1552:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
156
157/*
158 * Lock the TSBE at virtual address tsbep.
159 *
160 * tsbep = TSBE va (ro)
161 * tmp1, tmp2 = scratch registers (clobbered)
162 * label = label to use for branches (text)
163 * %asi = ASI to use for TSB access
164 *
165 * NOTE that we flush the TSB using fast VIS instructions that
166 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
167 * not be treated as a locked entry or we'll get stuck spinning on
168 * an entry that isn't locked but really invalid.
169 */
170
171#if defined(UTSB_PHYS)
172
173#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
174	lda	[tsbep]ASI_MEM, tmp1					;\
175label:									;\
176	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
177	cmp	tmp1, tmp2 						;\
178	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
179	  lda	[tsbep]ASI_MEM, tmp1					;\
180	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
181	cmp	tmp1, tmp2 						;\
182	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
183	  lda	[tsbep]ASI_MEM, tmp1					;\
184	/* tsbe lock acquired */					;\
185	membar #StoreStore
186
187#else /* UTSB_PHYS */
188
189#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
190	lda	[tsbep]%asi, tmp1					;\
191label:									;\
192	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
193	cmp	tmp1, tmp2 						;\
194	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
195	  lda	[tsbep]%asi, tmp1					;\
196	casa	[tsbep]%asi, tmp1, tmp2					;\
197	cmp	tmp1, tmp2 						;\
198	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
199	  lda	[tsbep]%asi, tmp1					;\
200	/* tsbe lock acquired */					;\
201	membar #StoreStore
202
203#endif /* UTSB_PHYS */
204
205/*
206 * Atomically write TSBE at virtual address tsbep.
207 *
208 * tsbep = TSBE va (ro)
209 * tte = TSBE TTE (ro)
210 * tagtarget = TSBE tag (ro)
211 * %asi = ASI to use for TSB access
212 */
213
214#if defined(UTSB_PHYS)
215
216#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
217	add	tsbep, TSBE_TTE, tmp1					;\
218	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
219	membar #StoreStore						;\
220	add	tsbep, TSBE_TAG, tmp1					;\
221	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
222
223#else /* UTSB_PHYS */
224
225#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
226	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
227	membar #StoreStore						;\
228	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
229
230#endif /* UTSB_PHYS */
231
232/*
233 * Load an entry into the TSB at TL > 0.
234 *
235 * tsbep = pointer to the TSBE to load as va (ro)
236 * tte = value of the TTE retrieved and loaded (wo)
237 * tagtarget = tag target register.  To get TSBE tag to load,
238 *   we need to mask off the context and leave only the va (clobbered)
239 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
240 * tmp1, tmp2 = scratch registers
241 * label = label to use for branches (text)
242 * %asi = ASI to use for TSB access
243 */
244
245#if defined(UTSB_PHYS)
246
247#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
248	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
249	/*								;\
250	 * I don't need to update the TSB then check for the valid tte.	;\
251	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
252	 * we always invalidate the hash table before we unload the TSB.;\
253	 */								;\
254	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
255	ldxa	[ttepa]ASI_MEM, tte					;\
256	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
257	sethi	%hi(TSBTAG_INVALID), tmp2				;\
258	add	tsbep, TSBE_TAG, tmp1					;\
259	brgez,a,pn tte, label/**/f					;\
260	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
261	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
262label:
263
264#else /* UTSB_PHYS */
265
266#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
267	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
268	/*								;\
269	 * I don't need to update the TSB then check for the valid tte.	;\
270	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
271	 * we always invalidate the hash table before we unload the TSB.;\
272	 */								;\
273	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
274	ldxa	[ttepa]ASI_MEM, tte					;\
275	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
276	sethi	%hi(TSBTAG_INVALID), tmp2				;\
277	brgez,a,pn tte, label/**/f					;\
278	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
279	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
280label:
281
282#endif /* UTSB_PHYS */
283
284/*
285 * Load an entry into the TSB at TL=0.
286 *
287 * tsbep = pointer to the TSBE to load as va (ro)
288 * tteva = pointer to the TTE to load as va (ro)
289 * tagtarget = TSBE tag to load (which contains no context), synthesized
290 * to match va of MMU tag target register only (ro)
291 * tmp1, tmp2 = scratch registers (clobbered)
292 * label = label to use for branches (text)
293 * %asi = ASI to use for TSB access
294 */
295
296#if defined(UTSB_PHYS)
297
298#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
299	/* can't rd tteva after locking tsb because it can tlb miss */	;\
300	ldx	[tteva], tteva			/* load tte */		;\
301	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
302	sethi	%hi(TSBTAG_INVALID), tmp2				;\
303	add	tsbep, TSBE_TAG, tmp1					;\
304	brgez,a,pn tteva, label/**/f					;\
305	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
306	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
307label:
308
309#else /* UTSB_PHYS */
310
311#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
312	/* can't rd tteva after locking tsb because it can tlb miss */	;\
313	ldx	[tteva], tteva			/* load tte */		;\
314	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
315	sethi	%hi(TSBTAG_INVALID), tmp2				;\
316	brgez,a,pn tteva, label/**/f					;\
317	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
318	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
319label:
320
321#endif /* UTSB_PHYS */
322
323/*
324 * Invalidate a TSB entry in the TSB.
325 *
326 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
327 *	 about this earlier to ensure this is true.  Thus when we are
328 *	 directly referencing tsbep below, we are referencing the tte_tag
329 *	 field of the TSBE.  If this  offset ever changes, the code below
330 *	 will need to be modified.
331 *
332 * tsbep = pointer to TSBE as va (ro)
333 * tag = invalidation is done if this matches the TSBE tag (ro)
334 * tmp1 - tmp3 = scratch registers (clobbered)
335 * label = label name to use for branches (text)
336 * %asi = ASI to use for TSB access
337 */
338
339#if defined(UTSB_PHYS)
340
341#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
342	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
343	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
344label/**/1:								;\
345	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
346	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
347	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
348	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
349	cmp	tag, tmp3		/* compare tags */		;\
350	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
351	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
352	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
353	cmp	tmp1, tmp3		/* if not successful */		;\
354	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
355	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
356label/**/2:
357
358#else /* UTSB_PHYS */
359
360#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
361	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
362	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
363label/**/1:								;\
364	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
365	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
366	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
367	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
368	cmp	tag, tmp3		/* compare tags */		;\
369	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
370	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
371	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
372	cmp	tmp1, tmp3		/* if not successful */		;\
373	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
374	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
375label/**/2:
376
377#endif /* UTSB_PHYS */
378
379#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
380#error	- TSB_SOFTSZ_MASK too small
381#endif
382
383
384/*
385 * An implementation of setx which will be hot patched at run time.
386 * since it is being hot patched, there is no value passed in.
387 * Thus, essentially we are implementing
388 *	setx value, tmp, dest
389 * where value is RUNTIME_PATCH (aka 0) in this case.
390 */
391#define	RUNTIME_PATCH_SETX(dest, tmp)					\
392	sethi	%hh(RUNTIME_PATCH), tmp					;\
393	sethi	%lm(RUNTIME_PATCH), dest				;\
394	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
395	or	dest, %lo(RUNTIME_PATCH), dest				;\
396	sllx	tmp, 32, tmp						;\
397	nop				/* for perf reasons */		;\
398	or	tmp, dest, dest		/* contents of patched value */
399
400
401#endif (lint)
402
403
404#if defined (lint)
405
406/*
407 * sfmmu related subroutines
408 */
409
410/*
411 * Use cas, if tte has changed underneath us then reread and try again.
412 * In the case of a retry, it will update sttep with the new original.
413 */
414/* ARGSUSED */
415int
416sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
417{ return(0); }
418
419/*
420 * Use cas, if tte has changed underneath us then return 1, else return 0
421 */
422/* ARGSUSED */
423int
424sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
425{ return(0); }
426
427/* ARGSUSED */
428void
429sfmmu_copytte(tte_t *sttep, tte_t *dttep)
430{}
431
432/*ARGSUSED*/
433struct tsbe *
434sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
435{ return(0); }
436
437/*ARGSUSED*/
438uint64_t
439sfmmu_make_tsbtag(caddr_t va)
440{ return(0); }
441
442#else	/* lint */
443
444	.seg	".data"
445	.global	sfmmu_panic1
446sfmmu_panic1:
447	.asciz	"sfmmu_asm: interrupts already disabled"
448
449	.global	sfmmu_panic3
450sfmmu_panic3:
451	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
452
453	.global	sfmmu_panic4
454sfmmu_panic4:
455	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
456
457	.global	sfmmu_panic5
458sfmmu_panic5:
459	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
460
461
462	ENTRY_NP(sfmmu_modifytte)
463	ldx	[%o2], %g3			/* current */
464	ldx	[%o0], %g1			/* original */
4652:
466	ldx	[%o1], %g2			/* modified */
467	cmp	%g2, %g3			/* is modified = current? */
468	be,a,pt	%xcc,1f				/* yes, don't write */
469	stx	%g3, [%o0]			/* update new original */
470	casx	[%o2], %g1, %g2
471	cmp	%g1, %g2
472	be,pt	%xcc, 1f			/* cas succeeded - return */
473	  nop
474	ldx	[%o2], %g3			/* new current */
475	stx	%g3, [%o0]			/* save as new original */
476	ba,pt	%xcc, 2b
477	  mov	%g3, %g1
4781:	retl
479	membar	#StoreLoad
480	SET_SIZE(sfmmu_modifytte)
481
482	ENTRY_NP(sfmmu_modifytte_try)
483	ldx	[%o1], %g2			/* modified */
484	ldx	[%o2], %g3			/* current */
485	ldx	[%o0], %g1			/* original */
486	cmp	%g3, %g2			/* is modified = current? */
487	be,a,pn %xcc,1f				/* yes, don't write */
488	mov	0, %o1				/* as if cas failed. */
489
490	casx	[%o2], %g1, %g2
491	membar	#StoreLoad
492	cmp	%g1, %g2
493	movne	%xcc, -1, %o1			/* cas failed. */
494	move	%xcc, 1, %o1			/* cas succeeded. */
4951:
496	stx	%g2, [%o0]			/* report "current" value */
497	retl
498	mov	%o1, %o0
499	SET_SIZE(sfmmu_modifytte_try)
500
501	ENTRY_NP(sfmmu_copytte)
502	ldx	[%o0], %g1
503	retl
504	stx	%g1, [%o1]
505	SET_SIZE(sfmmu_copytte)
506
507
508	/*
509	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
510	 * %o0 = TSB base address (in), pointer to TSB entry (out)
511	 * %o1 = vaddr (in)
512	 * %o2 = vpshift (in)
513	 * %o3 = tsb size code (in)
514	 * %o4 = scratch register
515	 */
516	ENTRY_NP(sfmmu_get_tsbe)
517	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
518	retl
519	nop
520	SET_SIZE(sfmmu_get_tsbe)
521
522	/*
523	 * Return a TSB tag for the given va.
524	 * %o0 = va (in/clobbered)
525	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
526	 */
527	ENTRY_NP(sfmmu_make_tsbtag)
528	retl
529	srln	%o0, TTARGET_VA_SHIFT, %o0
530	SET_SIZE(sfmmu_make_tsbtag)
531
532#endif /* lint */
533
534/*
535 * Other sfmmu primitives
536 */
537
538
539#if defined (lint)
540void
541sfmmu_patch_ktsb(void)
542{
543}
544
545void
546sfmmu_kpm_patch_tlbm(void)
547{
548}
549
550void
551sfmmu_kpm_patch_tsbm(void)
552{
553}
554
555/* ARGSUSED */
556void
557sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
558{
559}
560
561/* ARGSUSED */
562void
563sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
564{
565}
566
567/* ARGSUSED */
568void
569sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
570{
571}
572
573/* ARGSUSED */
574void
575sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
576{
577}
578
579#else /* lint */
580
581#define	I_SIZE		4
582
583	ENTRY_NP(sfmmu_fix_ktlb_traptable)
584	/*
585	 * %o0 = start of patch area
586	 * %o1 = size code of TSB to patch
587	 * %o3 = scratch
588	 */
589	/* fix sll */
590	ld	[%o0], %o3			/* get sll */
591	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
592	st	%o3, [%o0]			/* write sll */
593	flush	%o0
594	/* fix srl */
595	add	%o0, I_SIZE, %o0		/* goto next instr. */
596	ld	[%o0], %o3			/* get srl */
597	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
598	st	%o3, [%o0]			/* write srl */
599	retl
600	flush	%o0
601	SET_SIZE(sfmmu_fix_ktlb_traptable)
602
603	ENTRY_NP(sfmmu_fixup_ktsbbase)
604	/*
605	 * %o0 = start of patch area
606	 * %o5 = kernel virtual or physical tsb base address
607	 * %o2, %o3 are used as scratch registers.
608	 */
609	/* fixup sethi instruction */
610	ld	[%o0], %o3
611	srl	%o5, 10, %o2			! offset is bits 32:10
612	or	%o3, %o2, %o3			! set imm22
613	st	%o3, [%o0]
614	/* fixup offset of lduw/ldx */
615	add	%o0, I_SIZE, %o0		! next instr
616	ld	[%o0], %o3
617	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
618	or	%o3, %o2, %o3
619	st	%o3, [%o0]
620	retl
621	flush	%o0
622	SET_SIZE(sfmmu_fixup_ktsbbase)
623
624	ENTRY_NP(sfmmu_fixup_setx)
625	/*
626	 * %o0 = start of patch area
627	 * %o4 = 64 bit value to patch
628	 * %o2, %o3 are used as scratch registers.
629	 *
630	 * Note: Assuming that all parts of the instructions which need to be
631	 *	 patched correspond to RUNTIME_PATCH (aka 0)
632	 *
633	 * Note the implementation of setx which is being patched is as follows:
634	 *
635	 * sethi   %hh(RUNTIME_PATCH), tmp
636	 * sethi   %lm(RUNTIME_PATCH), dest
637	 * or      tmp, %hm(RUNTIME_PATCH), tmp
638	 * or      dest, %lo(RUNTIME_PATCH), dest
639	 * sllx    tmp, 32, tmp
640	 * nop
641	 * or      tmp, dest, dest
642	 *
643	 * which differs from the implementation in the
644	 * "SPARC Architecture Manual"
645	 */
646	/* fixup sethi instruction */
647	ld	[%o0], %o3
648	srlx	%o4, 42, %o2			! bits [63:42]
649	or	%o3, %o2, %o3			! set imm22
650	st	%o3, [%o0]
651	/* fixup sethi instruction */
652	add	%o0, I_SIZE, %o0		! next instr
653	ld	[%o0], %o3
654	sllx	%o4, 32, %o2			! clear upper bits
655	srlx	%o2, 42, %o2			! bits [31:10]
656	or	%o3, %o2, %o3			! set imm22
657	st	%o3, [%o0]
658	/* fixup or instruction */
659	add	%o0, I_SIZE, %o0		! next instr
660	ld	[%o0], %o3
661	srlx	%o4, 32, %o2			! bits [63:32]
662	and	%o2, 0x3ff, %o2			! bits [41:32]
663	or	%o3, %o2, %o3			! set imm
664	st	%o3, [%o0]
665	/* fixup or instruction */
666	add	%o0, I_SIZE, %o0		! next instr
667	ld	[%o0], %o3
668	and	%o4, 0x3ff, %o2			! bits [9:0]
669	or	%o3, %o2, %o3			! set imm
670	st	%o3, [%o0]
671	retl
672	flush	%o0
673	SET_SIZE(sfmmu_fixup_setx)
674
675	ENTRY_NP(sfmmu_fixup_or)
676	/*
677	 * %o0 = start of patch area
678	 * %o4 = 32 bit value to patch
679	 * %o2, %o3 are used as scratch registers.
680	 * Note: Assuming that all parts of the instructions which need to be
681	 *	 patched correspond to RUNTIME_PATCH (aka 0)
682	 */
683	ld	[%o0], %o3
684	and	%o4, 0x3ff, %o2			! bits [9:0]
685	or	%o3, %o2, %o3			! set imm
686	st	%o3, [%o0]
687	retl
688	flush	%o0
689	SET_SIZE(sfmmu_fixup_or)
690
691	ENTRY_NP(sfmmu_fixup_shiftx)
692	/*
693	 * %o0 = start of patch area
694	 * %o4 = signed int immediate value to add to sllx/srlx imm field
695	 * %o2, %o3 are used as scratch registers.
696	 *
697	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
698	 * so we do a simple add.  The caller must be careful to prevent
699	 * overflow, which could easily occur if the initial value is nonzero!
700	 */
701	ld	[%o0], %o3			! %o3 = instruction to patch
702	and	%o3, 0x3f, %o2			! %o2 = existing imm value
703	add	%o2, %o4, %o2			! %o2 = new imm value
704	andn	%o3, 0x3f, %o3			! clear old imm value
705	and	%o2, 0x3f, %o2			! truncate new imm value
706	or	%o3, %o2, %o3			! set new imm value
707	st	%o3, [%o0]			! store updated instruction
708	retl
709	flush	%o0
710	SET_SIZE(sfmmu_fixup_shiftx)
711
712	ENTRY_NP(sfmmu_fixup_mmu_asi)
713	/*
714	 * Patch imm_asi of all ldda instructions in the MMU
715	 * trap handlers.  We search MMU_PATCH_INSTR instructions
716	 * starting from the itlb miss handler (trap 0x64).
717	 * %o0 = address of tt[0,1]_itlbmiss
718	 * %o1 = imm_asi to setup, shifted by appropriate offset.
719	 * %o3 = number of instructions to search
720	 * %o4 = reserved by caller: called from leaf routine
721	 */
7221:	ldsw	[%o0], %o2			! load instruction to %o2
723	brgez,pt %o2, 2f
724	  srl	%o2, 30, %o5
725	btst	1, %o5				! test bit 30; skip if not set
726	bz,pt	%icc, 2f
727	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
728	srlx	%o5, 58, %o5			! isolate op3 part of opcode
729	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
730	brnz,pt	%o5, 2f				! skip if not a match
731	  or	%o2, %o1, %o2			! or in imm_asi
732	st	%o2, [%o0]			! write patched instruction
7332:	dec	%o3
734	brnz,a,pt %o3, 1b			! loop until we're done
735	  add	%o0, I_SIZE, %o0
736	retl
737	flush	%o0
738	SET_SIZE(sfmmu_fixup_mmu_asi)
739
740	/*
741	 * Patch immediate ASI used to access the TSB in the
742	 * trap table.
743	 * inputs: %o0 = value of ktsb_phys
744	 */
745	ENTRY_NP(sfmmu_patch_mmu_asi)
746	mov	%o7, %o4			! save return pc in %o4
747	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
748	movrz	%o0, ASI_NQUAD_LD, %o3
749	sll	%o3, 5, %o1			! imm_asi offset
750	mov	6, %o3				! number of instructions
751	sethi	%hi(dktsb), %o0			! to search
752	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
753	  or	%o0, %lo(dktsb), %o0
754	mov	6, %o3				! number of instructions
755	sethi	%hi(dktsb4m), %o0		! to search
756	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
757	  or	%o0, %lo(dktsb4m), %o0
758	mov	6, %o3				! number of instructions
759	sethi	%hi(iktsb), %o0			! to search
760	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
761	  or	%o0, %lo(iktsb), %o0
762	mov	%o4, %o7			! retore return pc -- leaf
763	retl
764	nop
765	SET_SIZE(sfmmu_patch_mmu_asi)
766
767	ENTRY_NP(sfmmu_patch_ktsb)
768	/*
769	 * We need to fix iktsb, dktsb, et. al.
770	 */
771	save	%sp, -SA(MINFRAME), %sp
772	set	ktsb_phys, %o1
773	ld	[%o1], %o4
774	set	ktsb_base, %o5
775	set	ktsb4m_base, %l1
776	brz,pt	%o4, 1f
777	  nop
778	set	ktsb_pbase, %o5
779	set	ktsb4m_pbase, %l1
7801:
781	sethi	%hi(ktsb_szcode), %o1
782	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
783
784	sethi	%hi(iktsb), %o0
785	call	sfmmu_fix_ktlb_traptable
786	  or	%o0, %lo(iktsb), %o0
787
788	sethi	%hi(dktsb), %o0
789	call	sfmmu_fix_ktlb_traptable
790	  or	%o0, %lo(dktsb), %o0
791
792	sethi	%hi(ktsb4m_szcode), %o1
793	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
794
795	sethi	%hi(dktsb4m), %o0
796	call	sfmmu_fix_ktlb_traptable
797	  or	%o0, %lo(dktsb4m), %o0
798
799#ifndef sun4v
800	mov	ASI_N, %o2
801	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
802	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
803	sethi	%hi(tsb_kernel_patch_asi), %o0
804	call	sfmmu_fixup_or
805	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
806#endif
807
808	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
809
810	sethi	%hi(dktsbbase), %o0
811	call	sfmmu_fixup_setx	! patch value of ktsb base addr
812	  or	%o0, %lo(dktsbbase), %o0
813
814	sethi	%hi(iktsbbase), %o0
815	call	sfmmu_fixup_setx	! patch value of ktsb base addr
816	  or	%o0, %lo(iktsbbase), %o0
817
818	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
819	call	sfmmu_fixup_setx	! patch value of ktsb base addr
820	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
821
822#ifdef sun4v
823	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
824	call	sfmmu_fixup_setx	! patch value of ktsb base addr
825	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
826#endif /* sun4v */
827
828	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
829
830	sethi	%hi(dktsb4mbase), %o0
831	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
832	  or	%o0, %lo(dktsb4mbase), %o0
833
834	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
835	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
836	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
837
838#ifdef sun4v
839	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
840	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
841	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
842#endif /* sun4v */
843
844	set	ktsb_szcode, %o4
845	ld	[%o4], %o4
846	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
847	call	sfmmu_fixup_or		! patch value of ktsb_szcode
848	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
849
850#ifdef sun4v
851	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
852	call	sfmmu_fixup_or		! patch value of ktsb_szcode
853	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
854#endif /* sun4v */
855
856	set	ktsb4m_szcode, %o4
857	ld	[%o4], %o4
858	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
859	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
860	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
861
862#ifdef sun4v
863	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
864	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
865	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
866#endif /* sun4v */
867
868	ret
869	restore
870	SET_SIZE(sfmmu_patch_ktsb)
871
872	ENTRY_NP(sfmmu_kpm_patch_tlbm)
873	/*
874	 * Fixup trap handlers in common segkpm case.  This is reserved
875	 * for future use should kpm TSB be changed to be other than the
876	 * kernel TSB.
877	 */
878	retl
879	nop
880	SET_SIZE(sfmmu_kpm_patch_tlbm)
881
882	ENTRY_NP(sfmmu_kpm_patch_tsbm)
883	/*
884	 * nop the branch to sfmmu_kpm_dtsb_miss_small
885	 * in the case where we are using large pages for
886	 * seg_kpm (and hence must probe the second TSB for
887	 * seg_kpm VAs)
888	 */
889	set	dktsb4m_kpmcheck_small, %o0
890	MAKE_NOP_INSTR(%o1)
891	st	%o1, [%o0]
892	flush	%o0
893	retl
894	nop
895	SET_SIZE(sfmmu_kpm_patch_tsbm)
896
897	ENTRY_NP(sfmmu_patch_utsb)
898#ifdef sun4v
899	retl
900	nop
901#else /* sun4v */
902	/*
903	 * We need to hot patch utsb_vabase and utsb4m_vabase
904	 */
905	save	%sp, -SA(MINFRAME), %sp
906
907	/* patch value of utsb_vabase */
908	set	utsb_vabase, %o1
909	ldx	[%o1], %o4
910	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
911	call	sfmmu_fixup_setx
912	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
913	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
914	call	sfmmu_fixup_setx
915	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
916	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
917	call	sfmmu_fixup_setx
918	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
919
920	/* patch value of utsb4m_vabase */
921	set	utsb4m_vabase, %o1
922	ldx	[%o1], %o4
923	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
924	call	sfmmu_fixup_setx
925	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
926	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
927	call	sfmmu_fixup_setx
928	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
929	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
930	call	sfmmu_fixup_setx
931	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
932
933	/*
934	 * Patch TSB base register masks and shifts if needed.
935	 * By default the TSB base register contents are set up for 4M slab.
936	 * If we're using a smaller slab size and reserved VA range we need
937	 * to patch up those values here.
938	 */
939	set	tsb_slab_shift, %o1
940	set	MMU_PAGESHIFT4M, %o4
941	ldsw	[%o1], %o3
942	subcc	%o4, %o3, %o4
943	bz,pt	%icc, 1f
944	  /* delay slot safe */
945
946	/* patch reserved VA range size if needed. */
947	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
948	call	sfmmu_fixup_shiftx
949	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
950	call	sfmmu_fixup_shiftx
951	  add	%o0, I_SIZE, %o0
952	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
953	call	sfmmu_fixup_shiftx
954	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
955	call	sfmmu_fixup_shiftx
956	  add	%o0, I_SIZE, %o0
9571:
958	/* patch TSBREG_VAMASK used to set up TSB base register */
959	set	tsb_slab_mask, %o1
960	lduw	[%o1], %o4
961	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
962	call	sfmmu_fixup_or
963	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
964	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
965	call	sfmmu_fixup_or
966	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
967
968	ret
969	restore
970#endif /* sun4v */
971	SET_SIZE(sfmmu_patch_utsb)
972
973
974	/*
975	 * Routine that loads an entry into a tsb using virtual addresses.
976	 * Locking is required since all cpus can use the same TSB.
977	 * Note that it is no longer required to have a valid context
978	 * when calling this function.
979	 */
980	ENTRY_NP(sfmmu_load_tsbe)
981	/*
982	 * %o0 = pointer to tsbe to load
983	 * %o1 = tsb tag
984	 * %o2 = virtual pointer to TTE
985	 * %o3 = 1 if physical address in %o0 else 0
986	 */
987	rdpr	%pstate, %o5
988#ifdef DEBUG
989	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
990	bnz,pt 	%icc, 1f			/* disabled, panic	 */
991	  nop
992
993	sethi	%hi(panicstr), %g1
994	ldx	[%g1 + %lo(panicstr)], %g1
995	tst	%g1
996	bnz,pt	%icc, 1f
997	  nop
998
999	save	%sp, -SA(MINFRAME), %sp
1000	sethi	%hi(sfmmu_panic1), %o0
1001	call	panic
1002	 or	%o0, %lo(sfmmu_panic1), %o0
10031:
1004#endif /* DEBUG */
1005
1006	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1007
1008	SETUP_TSB_ASI(%o3, %g3)
1009	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1010
1011	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1012
1013	retl
1014	membar	#StoreStore|#StoreLoad
1015	SET_SIZE(sfmmu_load_tsbe)
1016
1017	/*
1018	 * Flush TSB of a given entry if the tag matches.
1019	 */
1020	ENTRY(sfmmu_unload_tsbe)
1021	/*
1022	 * %o0 = pointer to tsbe to be flushed
1023	 * %o1 = tag to match
1024	 * %o2 = 1 if physical address in %o0 else 0
1025	 */
1026	SETUP_TSB_ASI(%o2, %g1)
1027	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1028	retl
1029	membar	#StoreStore|#StoreLoad
1030	SET_SIZE(sfmmu_unload_tsbe)
1031
1032	/*
1033	 * Routine that loads a TTE into the kpm TSB from C code.
1034	 * Locking is required since kpm TSB is shared among all CPUs.
1035	 */
1036	ENTRY_NP(sfmmu_kpm_load_tsb)
1037	/*
1038	 * %o0 = vaddr
1039	 * %o1 = ttep
1040	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1041	 */
1042	rdpr	%pstate, %o5			! %o5 = saved pstate
1043#ifdef DEBUG
1044	andcc	%o5, PSTATE_IE, %g0		! if interrupts already
1045	bnz,pt	%icc, 1f			! disabled, panic
1046	  nop
1047
1048	sethi	%hi(panicstr), %g1
1049	ldx	[%g1 + %lo(panicstr)], %g1
1050	tst	%g1
1051	bnz,pt	%icc, 1f
1052	  nop
1053
1054	save	%sp, -SA(MINFRAME), %sp
1055	sethi	%hi(sfmmu_panic1), %o0
1056	call	panic
1057	  or	%o0, %lo(sfmmu_panic1), %o0
10581:
1059#endif /* DEBUG */
1060	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1061
1062#ifndef sun4v
1063	sethi	%hi(ktsb_phys), %o4
1064	mov	ASI_N, %o3
1065	ld	[%o4 + %lo(ktsb_phys)], %o4
1066	movrnz	%o4, ASI_MEM, %o3
1067	mov	%o3, %asi
1068#endif
1069	mov	%o0, %g1			! %g1 = vaddr
1070
1071	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1072	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1073	/* %g2 = tsbep, %g1 clobbered */
1074
1075	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1076	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1077	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1078
1079	wrpr	%g0, %o5, %pstate		! enable interrupts
1080	retl
1081	  membar #StoreStore|#StoreLoad
1082	SET_SIZE(sfmmu_kpm_load_tsb)
1083
1084	/*
1085	 * Routine that shoots down a TTE in the kpm TSB or in the
1086	 * kernel TSB depending on virtpg. Locking is required since
1087	 * kpm/kernel TSB is shared among all CPUs.
1088	 */
1089	ENTRY_NP(sfmmu_kpm_unload_tsb)
1090	/*
1091	 * %o0 = vaddr
1092	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1093	 */
1094#ifndef sun4v
1095	sethi	%hi(ktsb_phys), %o4
1096	mov	ASI_N, %o3
1097	ld	[%o4 + %lo(ktsb_phys)], %o4
1098	movrnz	%o4, ASI_MEM, %o3
1099	mov	%o3, %asi
1100#endif
1101	mov	%o0, %g1			! %g1 = vaddr
1102
1103	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1104	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1105	/* %g2 = tsbep, %g1 clobbered */
1106
1107	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1108	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1109	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1110
1111	retl
1112	  membar	#StoreStore|#StoreLoad
1113	SET_SIZE(sfmmu_kpm_unload_tsb)
1114
1115#endif /* lint */
1116
1117
1118#if defined (lint)
1119
1120/*ARGSUSED*/
1121pfn_t
1122sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1123{ return(0); }
1124
1125#else /* lint */
1126
1127	ENTRY_NP(sfmmu_ttetopfn)
1128	ldx	[%o0], %g1			/* read tte */
1129	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1130	/*
1131	 * g1 = pfn
1132	 */
1133	retl
1134	mov	%g1, %o0
1135	SET_SIZE(sfmmu_ttetopfn)
1136
1137#endif /* !lint */
1138
1139
1140#if defined (lint)
1141/*
1142 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1143 * the hash list.
1144 */
1145/* ARGSUSED */
1146void
1147sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1148	uint64_t hblkpa)
1149{
1150}
1151
1152/*
1153 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1154 * hash list.
1155 */
1156/* ARGSUSED */
1157void
1158sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1159	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1160{
1161}
1162#else /* lint */
1163
1164/*
1165 * Functions to grab/release hme bucket list lock.  I only use a byte
1166 * instead of the whole int because eventually we might want to
1167 * put some counters on the other bytes (of course, these routines would
1168 * have to change).  The code that grab this lock should execute
1169 * with interrupts disabled and hold the lock for the least amount of time
1170 * possible.
1171 */
1172
1173/*
1174 * Even though hmeh_listlock is updated using pa there's no need to flush
1175 * dcache since hmeh_listlock will be restored to the original value (0)
1176 * before interrupts are reenabled.
1177 */
1178
1179/*
1180 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1181 * routines still use virtual addresses to update the bucket fields. But they
1182 * must not cause a TLB miss after grabbing the low level bucket lock. To
1183 * achieve this we must make sure the bucket structure is completely within an
1184 * 8K page.
1185 */
1186
1187#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1188#error - the size of hmehash_bucket structure is not power of 2
1189#endif
1190
1191#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1192	mov     0xff, tmp2                                      ;\
1193	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1194label1:                                                         ;\
1195	casa    [tmp1]asi, %g0, tmp2                            ;\
1196	brnz,pn tmp2, label1                                    ;\
1197	mov     0xff, tmp2                                      ;\
1198	membar  #LoadLoad
1199
1200#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1201	membar  #LoadStore|#StoreStore                          ;\
1202	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1203	sta     %g0, [tmp1]asi
1204
1205	.seg	".data"
1206hblk_add_panic1:
1207	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1208	.byte	0
1209hblk_add_panic2:
1210	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1211	.byte	0
1212	.align	4
1213	.seg	".text"
1214
1215	ENTRY_NP(sfmmu_hblk_hash_add)
1216	/*
1217	 * %o0 = hmebp
1218	 * %o1 = hmeblkp
1219	 * %o2 = hblkpa
1220	 */
1221	rdpr	%pstate, %o5
1222#ifdef DEBUG
1223	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1224	bnz,pt %icc, 3f				/* disabled, panic	 */
1225	  nop
1226	save	%sp, -SA(MINFRAME), %sp
1227	sethi	%hi(hblk_add_panic1), %o0
1228	call	panic
1229	 or	%o0, %lo(hblk_add_panic1), %o0
1230	ret
1231	restore
1232
12333:
1234#endif /* DEBUG */
1235	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1236	mov	%o2, %g1
1237
1238	/*
1239	 * g1 = hblkpa
1240	 */
1241	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1242	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1243#ifdef	DEBUG
1244	cmp	%o4, %g0
1245	bne,pt %xcc, 1f
1246	 nop
1247	brz,pt %g2, 1f
1248	 nop
1249	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1250	save	%sp, -SA(MINFRAME), %sp
1251	sethi	%hi(hblk_add_panic2), %o0
1252	call	panic
1253	  or	%o0, %lo(hblk_add_panic2), %o0
1254	ret
1255	restore
12561:
1257#endif /* DEBUG */
1258	/*
1259	 * We update hmeblks entries before grabbing lock because the stores
1260	 * could take a tlb miss and require the hash lock.  The buckets
1261	 * are part of the nucleus so we are cool with those stores.
1262	 *
1263	 * if buckets are not part of the nucleus our game is to
1264	 * not touch any other page via va until we drop the lock.
1265	 * This guarantees we won't get a tlb miss before the lock release
1266	 * since interrupts are disabled.
1267	 */
1268	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1269	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1270	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1271	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1272	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1273	HMELOCK_EXIT(%o0, %g2, ASI_N)
1274	retl
1275	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1276	SET_SIZE(sfmmu_hblk_hash_add)
1277
1278	ENTRY_NP(sfmmu_hblk_hash_rm)
1279	/*
1280	 * This function removes an hmeblk from the hash chain.
1281	 * It is written to guarantee we don't take a tlb miss
1282	 * by using physical addresses to update the list.
1283	 *
1284	 * %o0 = hmebp
1285	 * %o1 = hmeblkp
1286	 * %o2 = hmeblkp previous pa
1287	 * %o3 = hmeblkp previous
1288	 */
1289
1290	mov	%o3, %o4			/* o4 = hmeblkp previous */
1291
1292	rdpr	%pstate, %o5
1293#ifdef DEBUG
1294	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1295	bnz,pt 	%icc, 3f			/* disabled, panic	 */
1296	  nop
1297
1298	sethi	%hi(panicstr), %g1
1299	ldx	[%g1 + %lo(panicstr)], %g1
1300	tst	%g1
1301	bnz,pt	%icc, 3f
1302	  nop
1303
1304	sethi	%hi(sfmmu_panic1), %o0
1305	call	panic
1306	 or	%o0, %lo(sfmmu_panic1), %o0
13073:
1308#endif /* DEBUG */
1309	/*
1310	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1311	 */
1312	andn    %o5, PSTATE_IE, %g1
1313	wrpr    %g1, 0, %pstate
1314
1315#ifndef sun4v
1316	sethi   %hi(dcache_line_mask), %g4
1317	ld      [%g4 + %lo(dcache_line_mask)], %g4
1318#endif /* sun4v */
1319
1320	/*
1321	 * if buckets are not part of the nucleus our game is to
1322	 * not touch any other page via va until we drop the lock.
1323	 * This guarantees we won't get a tlb miss before the lock release
1324	 * since interrupts are disabled.
1325	 */
1326	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1327	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1328	cmp	%g2, %o1
1329	bne,pt	%ncc,1f
1330	 mov	ASI_MEM, %asi
1331	/*
1332	 * hmeblk is first on list
1333	 */
1334	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1335	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1336	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1337	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1338	ba,pt	%xcc, 2f
1339	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
13401:
1341	/* hmeblk is not first on list */
1342
1343	mov	%o2, %g3
1344#ifndef sun4v
1345	GET_CPU_IMPL(%g2)
1346	cmp %g2, CHEETAH_IMPL
1347	bge %icc, hblk_hash_rm_1
1348	and	%o4, %g4, %g2
1349	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1350	add	%o4, HMEBLK_NEXT, %o4
1351	and	%o4, %g4, %g2
1352	ba	hblk_hash_rm_2
1353	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1354hblk_hash_rm_1:
1355
1356	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1357	membar	#Sync
1358	add     %g3, HMEBLK_NEXT, %g2
1359	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1360hblk_hash_rm_2:
1361	membar	#Sync
1362#endif /* sun4v */
1363	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1364	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1365	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1366	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1367	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
13682:
1369	HMELOCK_EXIT(%o0, %g2, ASI_N)
1370	retl
1371	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1372	SET_SIZE(sfmmu_hblk_hash_rm)
1373
1374#endif /* lint */
1375
1376/*
1377 * These macros are used to update global sfmmu hme hash statistics
1378 * in perf critical paths. It is only enabled in debug kernels or
1379 * if SFMMU_STAT_GATHER is defined
1380 */
1381#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1382#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1383	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1384	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1385	cmp	tmp1, hatid						;\
1386	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1387	set	sfmmu_global_stat, tmp1					;\
1388	add	tmp1, tmp2, tmp1					;\
1389	ld	[tmp1], tmp2						;\
1390	inc	tmp2							;\
1391	st	tmp2, [tmp1]
1392
1393#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1394	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1395	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1396	cmp	tmp1, hatid						;\
1397	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1398	set	sfmmu_global_stat, tmp1					;\
1399	add	tmp1, tmp2, tmp1					;\
1400	ld	[tmp1], tmp2						;\
1401	inc	tmp2							;\
1402	st	tmp2, [tmp1]
1403
1404
1405#else /* DEBUG || SFMMU_STAT_GATHER */
1406
1407#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1408
1409#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1410
1411#endif  /* DEBUG || SFMMU_STAT_GATHER */
1412
1413/*
1414 * This macro is used to update global sfmmu kstas in non
1415 * perf critical areas so they are enabled all the time
1416 */
1417#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1418	sethi	%hi(sfmmu_global_stat), tmp1				;\
1419	add	tmp1, statname, tmp1					;\
1420	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1421	inc	tmp2							;\
1422	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1423
1424/*
1425 * These macros are used to update per cpu stats in non perf
1426 * critical areas so they are enabled all the time
1427 */
1428#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1429	ld	[tsbarea + stat], tmp1					;\
1430	inc	tmp1							;\
1431	st	tmp1, [tsbarea + stat]
1432
1433/*
1434 * These macros are used to update per cpu stats in non perf
1435 * critical areas so they are enabled all the time
1436 */
1437#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1438	lduh	[tsbarea + stat], tmp1					;\
1439	inc	tmp1							;\
1440	stuh	tmp1, [tsbarea + stat]
1441
1442#if defined(KPM_TLBMISS_STATS_GATHER)
1443	/*
1444	 * Count kpm dtlb misses separately to allow a different
1445	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1446	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1447	 */
1448#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1449	brgez	tagacc, label	/* KPM VA? */				;\
1450	nop								;\
1451	CPU_INDEX(tmp1, tsbma)						;\
1452	sethi	%hi(kpmtsbm_area), tsbma				;\
1453	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1454	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1455	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1456	/* VA range check */						;\
1457	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1458	cmp	tagacc, val						;\
1459	blu,pn	%xcc, label						;\
1460	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1461	cmp	tagacc, tmp1						;\
1462	bgeu,pn	%xcc, label						;\
1463	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1464	inc	val							;\
1465	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1466label:
1467#else
1468#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1469#endif	/* KPM_TLBMISS_STATS_GATHER */
1470
1471#if defined (lint)
1472/*
1473 * The following routines are jumped to from the mmu trap handlers to do
1474 * the setting up to call systrap.  They are separate routines instead of
1475 * being part of the handlers because the handlers would exceed 32
1476 * instructions and since this is part of the slow path the jump
1477 * cost is irrelevant.
1478 */
1479void
1480sfmmu_pagefault(void)
1481{
1482}
1483
1484void
1485sfmmu_mmu_trap(void)
1486{
1487}
1488
1489void
1490sfmmu_window_trap(void)
1491{
1492}
1493
1494void
1495sfmmu_kpm_exception(void)
1496{
1497}
1498
1499#else /* lint */
1500
1501#ifdef	PTL1_PANIC_DEBUG
1502	.seg	".data"
1503	.global	test_ptl1_panic
1504test_ptl1_panic:
1505	.word	0
1506	.align	8
1507
1508	.seg	".text"
1509	.align	4
1510#endif	/* PTL1_PANIC_DEBUG */
1511
1512
1513	ENTRY_NP(sfmmu_pagefault)
1514	USE_ALTERNATE_GLOBALS(%g5)
1515	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1516	rdpr	%tt, %g6
1517	cmp	%g6, FAST_IMMU_MISS_TT
1518	be,a,pn	%icc, 1f
1519	  mov	T_INSTR_MMU_MISS, %g3
1520	cmp	%g6, T_INSTR_MMU_MISS
1521	be,a,pn	%icc, 1f
1522	  mov	T_INSTR_MMU_MISS, %g3
1523	mov	%g5, %g2
1524	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1525	cmp	%g6, FAST_DMMU_MISS_TT
1526	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1527	cmp	%g6, T_DATA_MMU_MISS
1528	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1529
1530#ifdef  PTL1_PANIC_DEBUG
1531	/* check if we want to test the tl1 panic */
1532	sethi	%hi(test_ptl1_panic), %g4
1533	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1534	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1535	cmp	%g1, %g0
1536	bne,a,pn %icc, ptl1_panic
1537	  or	%g0, PTL1_BAD_DEBUG, %g1
1538#endif	/* PTL1_PANIC_DEBUG */
15391:
1540	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1541	/*
1542	 * g2 = tag access reg
1543	 * g3.l = type
1544	 * g3.h = 0
1545	 */
1546	sethi	%hi(trap), %g1
1547	or	%g1, %lo(trap), %g1
15482:
1549	ba,pt	%xcc, sys_trap
1550	  mov	-1, %g4
1551	SET_SIZE(sfmmu_pagefault)
1552
1553	ENTRY_NP(sfmmu_mmu_trap)
1554	USE_ALTERNATE_GLOBALS(%g5)
1555	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1556	rdpr	%tt, %g6
1557	cmp	%g6, FAST_IMMU_MISS_TT
1558	be,a,pn	%icc, 1f
1559	  mov	T_INSTR_MMU_MISS, %g3
1560	cmp	%g6, T_INSTR_MMU_MISS
1561	be,a,pn	%icc, 1f
1562	  mov	T_INSTR_MMU_MISS, %g3
1563	mov	%g5, %g2
1564	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1565	cmp	%g6, FAST_DMMU_MISS_TT
1566	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1567	cmp	%g6, T_DATA_MMU_MISS
1568	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
15691:
1570	/*
1571	 * g2 = tag access reg
1572	 * g3 = type
1573	 */
1574	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1575	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1576	ba,pt	%xcc, sys_trap
1577	  mov	-1, %g4
1578	/*NOTREACHED*/
1579	SET_SIZE(sfmmu_mmu_trap)
1580
1581	ENTRY_NP(sfmmu_suspend_tl)
1582	USE_ALTERNATE_GLOBALS(%g5)
1583	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1584	rdpr	%tt, %g6
1585	cmp	%g6, FAST_IMMU_MISS_TT
1586	be,a,pn	%icc, 1f
1587	  mov	T_INSTR_MMU_MISS, %g3
1588	mov	%g5, %g2
1589	cmp	%g6, FAST_DMMU_MISS_TT
1590	move	%icc, T_DATA_MMU_MISS, %g3
1591	movne	%icc, T_DATA_PROT, %g3
15921:
1593	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1594	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1595	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1596	ba,pt	%xcc, sys_trap
1597	  mov	PIL_15, %g4
1598	/*NOTREACHED*/
1599	SET_SIZE(sfmmu_suspend_tl)
1600
1601	/*
1602	 * No %g registers in use at this point.
1603	 */
1604	ENTRY_NP(sfmmu_window_trap)
1605	rdpr	%tpc, %g1
1606#ifdef sun4v
1607#ifdef DEBUG
1608	/* We assume previous %gl was 1 */
1609	rdpr	%tstate, %g4
1610	srlx	%g4, TSTATE_GL_SHIFT, %g4
1611	and	%g4, TSTATE_GL_MASK, %g4
1612	cmp	%g4, 1
1613	bne,a,pn %icc, ptl1_panic
1614	  mov	PTL1_BAD_WTRAP, %g1
1615#endif /* DEBUG */
1616	/* user miss at tl>1. better be the window handler or user_rtt */
1617	/* in user_rtt? */
1618	set	rtt_fill_start, %g4
1619	cmp	%g1, %g4
1620	blu,pn %xcc, 6f
1621	 .empty
1622	set	rtt_fill_end, %g4
1623	cmp	%g1, %g4
1624	bgeu,pn %xcc, 6f
1625	 nop
1626	set	fault_rtt_fn1, %g1
1627	wrpr	%g0, %g1, %tnpc
1628	ba,a	7f
16296:
1630	! must save this trap level before descending trap stack
1631	! no need to save %tnpc, either overwritten or discarded
1632	! already got it: rdpr	%tpc, %g1
1633	rdpr	%tstate, %g6
1634	rdpr	%tt, %g7
1635	! trap level saved, go get underlying trap type
1636	rdpr	%tl, %g5
1637	sub	%g5, 1, %g3
1638	wrpr	%g3, %tl
1639	rdpr	%tt, %g2
1640	wrpr	%g5, %tl
1641	! restore saved trap level
1642	wrpr	%g1, %tpc
1643	wrpr	%g6, %tstate
1644	wrpr	%g7, %tt
1645#else /* sun4v */
1646	/* user miss at tl>1. better be the window handler */
1647	rdpr	%tl, %g5
1648	sub	%g5, 1, %g3
1649	wrpr	%g3, %tl
1650	rdpr	%tt, %g2
1651	wrpr	%g5, %tl
1652#endif /* sun4v */
1653	and	%g2, WTRAP_TTMASK, %g4
1654	cmp	%g4, WTRAP_TYPE
1655	bne,pn	%xcc, 1f
1656	 nop
1657	/* tpc should be in the trap table */
1658	set	trap_table, %g4
1659	cmp	%g1, %g4
1660	blt,pn %xcc, 1f
1661	 .empty
1662	set	etrap_table, %g4
1663	cmp	%g1, %g4
1664	bge,pn %xcc, 1f
1665	 .empty
1666	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1667	add	%g1, WTRAP_FAULTOFF, %g1
1668	wrpr	%g0, %g1, %tnpc
16697:
1670	/*
1671	 * some wbuf handlers will call systrap to resolve the fault
1672	 * we pass the trap type so they figure out the correct parameters.
1673	 * g5 = trap type, g6 = tag access reg
1674	 */
1675
1676	/*
1677	 * only use g5, g6, g7 registers after we have switched to alternate
1678	 * globals.
1679	 */
1680	SET_GL_REG(1)
1681	USE_ALTERNATE_GLOBALS(%g5)
1682	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1683	rdpr	%tt, %g7
1684	cmp	%g7, FAST_IMMU_MISS_TT
1685	be,a,pn	%icc, ptl1_panic
1686	  mov	PTL1_BAD_WTRAP, %g1
1687	cmp	%g7, T_INSTR_MMU_MISS
1688	be,a,pn	%icc, ptl1_panic
1689	  mov	PTL1_BAD_WTRAP, %g1
1690	mov	T_DATA_PROT, %g5
1691	cmp	%g7, FAST_DMMU_MISS_TT
1692	move	%icc, T_DATA_MMU_MISS, %g5
1693	cmp	%g7, T_DATA_MMU_MISS
1694	move	%icc, T_DATA_MMU_MISS, %g5
1695	! XXXQ AGS re-check out this one
1696	done
16971:
1698	CPU_ADDR(%g1, %g4)
1699	ld	[%g1 + CPU_TL1_HDLR], %g4
1700	brnz,a,pt %g4, sfmmu_mmu_trap
1701	  st	%g0, [%g1 + CPU_TL1_HDLR]
1702	ba,pt	%icc, ptl1_panic
1703	  mov	PTL1_BAD_TRAP, %g1
1704	SET_SIZE(sfmmu_window_trap)
1705
1706	ENTRY_NP(sfmmu_kpm_exception)
1707	/*
1708	 * We have accessed an unmapped segkpm address or a legal segkpm
1709	 * address which is involved in a VAC alias conflict prevention.
1710	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1711	 * set. If it is, we will instead note that a fault has occurred
1712	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1713	 * a "retry"). This will step over the faulting instruction.
1714	 * Note that this means that a legal segkpm address involved in
1715	 * a VAC alias conflict prevention (a rare case to begin with)
1716	 * cannot be used in DTrace.
1717	 */
1718	CPU_INDEX(%g1, %g2)
1719	set	cpu_core, %g2
1720	sllx	%g1, CPU_CORE_SHIFT, %g1
1721	add	%g1, %g2, %g1
1722	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1723	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1724	bz	0f
1725	or	%g2, CPU_DTRACE_BADADDR, %g2
1726	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1727	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1728	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1729	done
17300:
1731	TSTAT_CHECK_TL1(1f, %g1, %g2)
17321:
1733	USE_ALTERNATE_GLOBALS(%g5)
1734	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1735	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1736	/*
1737	 * g2=tagacc g3.l=type g3.h=0
1738	 */
1739	sethi	%hi(trap), %g1
1740	or	%g1, %lo(trap), %g1
1741	ba,pt	%xcc, sys_trap
1742	mov	-1, %g4
1743	SET_SIZE(sfmmu_kpm_exception)
1744
1745#endif /* lint */
1746
1747#if defined (lint)
1748
1749void
1750sfmmu_tsb_miss(void)
1751{
1752}
1753
1754void
1755sfmmu_kpm_dtsb_miss(void)
1756{
1757}
1758
1759void
1760sfmmu_kpm_dtsb_miss_small(void)
1761{
1762}
1763
1764#else /* lint */
1765
1766
1767#if (CTX_SIZE != (1 << CTX_SZ_SHIFT))
1768#error - size of context struct does not match with CTX_SZ_SHIFT
1769#endif
1770
1771#if (IMAP_SEG != 0)
1772#error - ism_map->ism_seg offset is not zero
1773#endif
1774
1775/*
1776 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1777 * tlb miss and branches to label "ismhit". If this is not an ISM
1778 * process or an ISM tlb miss it falls thru.
1779 *
1780 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1781 * this process.
1782 * If so, it will branch to label "ismhit".  If not, it will fall through.
1783 *
1784 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1785 * so that any other threads of this process will not try and walk the ism
1786 * maps while they are being changed.
1787 *
1788 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
1789 *       will make sure of that. This means we can terminate our search on
1790 *       the first zero mapping we find.
1791 *
1792 * Parameters:
1793 * tagacc	= tag access register (vaddr + ctx) (in)
1794 * tsbmiss	= address of tsb miss area (in)
1795 * ismseg	= contents of ism_seg for this ism map (out)
1796 * ismhat	= physical address of imap_ismhat for this ism map (out)
1797 * tmp1		= scratch reg (CLOBBERED)
1798 * tmp2		= scratch reg (CLOBBERED)
1799 * tmp3		= scratch reg (CLOBBERED)
1800 * label:    temporary labels
1801 * ismhit:   label where to jump to if an ism dtlb miss
1802 * exitlabel:label where to jump if hat is busy due to hat_unshare.
1803 */
1804#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
1805	label, ismhit)							\
1806	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
1807	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
1808	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
1809label/**/1:								;\
1810	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
1811	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
1812label/**/2:								;\
1813	brz,pt  ismseg, label/**/3		/* no mapping */	;\
1814	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
1815	lduha	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
1816	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
1817	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
1818	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
1819	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
1820	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
1821	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
1822	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
1823	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
1824	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
1825									;\
1826	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
1827	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
1828	cmp	ismhat, tmp1						;\
1829	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
1830	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
1831									;\
1832	add	tmp3, IBLK_NEXTPA, tmp1					;\
1833	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
1834	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
1835	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
1836label/**/3:
1837
1838/*
1839 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
1840 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
1841 * Parameters:
1842 * vaddr = reg containing virtual address
1843 * hatid = reg containing sfmmu pointer
1844 * hmeshift = constant/register to shift vaddr to obtain vapg
1845 * hmebp = register where bucket pointer will be stored
1846 * vapg = register where virtual page will be stored
1847 * tmp1, tmp2 = tmp registers
1848 */
1849
1850
1851#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
1852	vapg, label, tmp1, tmp2)					\
1853	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
1854	brnz,a,pt tmp1, label/**/1					;\
1855	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
1856	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
1857	ba,pt	%xcc, label/**/2					;\
1858	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
1859label/**/1:								;\
1860	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
1861label/**/2:								;\
1862	srlx	tagacc, hmeshift, vapg					;\
1863	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
1864	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
1865	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
1866	add	hmebp, tmp1, hmebp
1867
1868/*
1869 * hashtag includes bspage + hashno (64 bits).
1870 */
1871
1872#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
1873	sllx	vapg, hmeshift, vapg					;\
1874	or	vapg, hashno, hblktag
1875
1876/*
1877 * Function to traverse hmeblk hash link list and find corresponding match.
1878 * The search is done using physical pointers. It returns the physical address
1879 * and virtual address pointers to the hmeblk that matches with the tag
1880 * provided.
1881 * Parameters:
1882 * hmebp	= register that points to hme hash bucket, also used as
1883 *		  tmp reg (clobbered)
1884 * hmeblktag	= register with hmeblk tag match
1885 * hatid	= register with hatid
1886 * hmeblkpa	= register where physical ptr will be stored
1887 * hmeblkva	= register where virtual ptr will be stored
1888 * tmp1		= tmp reg
1889 * label: temporary label
1890 */
1891
1892#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
1893	tsbarea, tmp1, label)					 	\
1894	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
1895	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
1896	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
1897	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
1898	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1899label/**/1:								;\
1900	brz,pn	hmeblkva, label/**/2					;\
1901	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1902	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
1903	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
1904	add	hmebp, CLONGSIZE, hmebp					;\
1905	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
1906	xor	tmp1, hmeblktag, tmp1					;\
1907	xor	hmebp, hatid, hmebp					;\
1908	or	hmebp, tmp1, hmebp					;\
1909	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
1910	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
1911	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
1912	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
1913	ba,pt	%xcc, label/**/1					;\
1914	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
1915label/**/2:
1916
1917
1918#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
1919#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
1920#endif
1921
1922/*
1923 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
1924 * he offset for the corresponding hment.
1925 * Parameters:
1926 * vaddr = register with virtual address
1927 * hmeblkpa = physical pointer to hme_blk
1928 * hment = register where address of hment will be stored
1929 * hmentoff = register where hment offset will be stored
1930 * label1 = temporary label
1931 */
1932#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, tmp1, label1)	\
1933	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
1934	lda	[hmentoff]ASI_MEM, tmp1 				;\
1935	andcc	tmp1, HBLK_SZMASK, %g0	 /* tmp1 = get_hblk_sz(%g5) */	;\
1936	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
1937	  or	%g0, HMEBLK_HME1, hmentoff				;\
1938	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
1939	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
1940	sllx	tmp1, SFHME_SHIFT, tmp1					;\
1941	add	tmp1, HMEBLK_HME1, hmentoff				;\
1942label1:
1943
1944/*
1945 * GET_TTE is a macro that returns a TTE given a tag and hatid.
1946 *
1947 * tagacc	= tag access register (vaddr + ctx) (in)
1948 * hatid	= sfmmu pointer for TSB miss (in)
1949 * tte		= tte for TLB miss if found, otherwise clobbered (out)
1950 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
1951 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
1952 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
1953 * hmentoff	= temporarily stores hment offset (clobbered)
1954 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
1955 *		  for this page size.
1956 * hashno	= constant/register hash number
1957 * label	= temporary label for branching within macro.
1958 * foundlabel	= label to jump to when tte is found.
1959 * suspendlabel= label to jump to when tte is suspended.
1960 * exitlabel	= label to jump to when tte is not found.  The hmebp lock
1961 *		  is still held at this time.
1962 *
1963 * The caller should set up the tsbmiss->scratch[2] field correctly before
1964 * calling this funciton  (aka TSBMISS_SCRATCH + TSBMISS_HATID)
1965 */
1966#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmentoff, \
1967		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
1968									;\
1969	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
1970	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
1971	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
1972		hmeblkpa, label/**/5, hmentoff, hmeblkva)		;\
1973									;\
1974	/*								;\
1975	 * tagacc = tagacc						;\
1976	 * hatid = hatid						;\
1977	 * tsbarea = tsbarea						;\
1978	 * tte   = hmebp (hme bucket pointer)				;\
1979	 * hmeblkpa  = vapg  (virtual page)				;\
1980	 * hmentoff, hmeblkva = scratch					;\
1981	 */								;\
1982	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmentoff)	;\
1983									;\
1984	/*								;\
1985	 * tagacc = tagacc						;\
1986	 * hatid = hatid						;\
1987	 * tte   = hmebp						;\
1988	 * hmeblkpa  = CLOBBERED					;\
1989	 * hmentoff  = htag_bspage & hashno				;\
1990	 * hmeblkva  = scratch						;\
1991	 */								;\
1992	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
1993	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
1994	HMEHASH_SEARCH(tte, hmentoff, hatid, hmeblkpa, hmeblkva, 	\
1995		tsbarea, tagacc, label/**/1)				;\
1996	/*								;\
1997	 * tagacc = CLOBBERED						;\
1998	 * tte = CLOBBERED						;\
1999	 * hmeblkpa = hmeblkpa						;\
2000	 * hmeblkva = hmeblkva						;\
2001	 */								;\
2002	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2003	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2004	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2005	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2006	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2007	  nop								;\
2008label/**/4:								;\
2009	/*								;\
2010	 * We have found the hmeblk containing the hment.		;\
2011	 * Now we calculate the corresponding tte.			;\
2012	 *								;\
2013	 * tagacc = tagacc						;\
2014	 * hatid = clobbered						;\
2015	 * tte   = hmebp						;\
2016	 * hmeblkpa  = hmeblkpa						;\
2017	 * hmentoff  = hblktag						;\
2018	 * hmeblkva  = hmeblkva 					;\
2019	 */								;\
2020	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hmentoff, hatid, label/**/2)	;\
2021									;\
2022	add	hmentoff, SFHME_TTE, hmentoff				;\
2023	add	hmeblkpa, hmentoff, hmeblkpa				;\
2024	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2025	add	hmeblkva, hmentoff, hmeblkva				;\
2026	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2027	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmentoff ;\
2028	HMELOCK_EXIT(hmentoff, hmentoff, ASI_MEM)	/* drop lock */	;\
2029	set	TTE_SUSPEND, hmentoff					;\
2030	TTE_SUSPEND_INT_SHIFT(hmentoff)					;\
2031	btst	tte, hmentoff						;\
2032	bz,pt	%xcc, foundlabel					;\
2033	 nop								;\
2034									;\
2035	/*								;\
2036	 * Mapping is suspended, so goto suspend label.			;\
2037	 */								;\
2038	ba,pt	%xcc, suspendlabel					;\
2039	  nop
2040
2041	/*
2042	 * KERNEL PROTECTION HANDLER
2043	 *
2044	 * g1 = tsb8k pointer register (clobbered)
2045	 * g2 = tag access register (ro)
2046	 * g3 - g7 = scratch registers
2047	 *
2048	 * Note: This function is patched at runtime for performance reasons.
2049	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2050	 */
2051	ENTRY_NP(sfmmu_kprot_trap)
2052	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2053sfmmu_kprot_patch_ktsb_base:
2054	RUNTIME_PATCH_SETX(%g1, %g6)
2055	/* %g1 = contents of ktsb_base or ktsb_pbase */
2056sfmmu_kprot_patch_ktsb_szcode:
2057	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2058
2059	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2060	! %g1 = First TSB entry pointer, as TSB miss handler expects
2061
2062	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2063sfmmu_kprot_patch_ktsb4m_base:
2064	RUNTIME_PATCH_SETX(%g3, %g6)
2065	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2066sfmmu_kprot_patch_ktsb4m_szcode:
2067	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2068
2069	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2070	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2071
2072	CPU_TSBMISS_AREA(%g6, %g7)
2073	HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2074	ba,pt	%xcc, sfmmu_tsb_miss_tt
2075	  nop
2076
2077	/*
2078	 * USER PROTECTION HANDLER
2079	 *
2080	 * g1 = tsb8k pointer register (ro)
2081	 * g2 = tag access register (ro)
2082	 * g3 = faulting context (clobbered, currently not used)
2083	 * g4 - g7 = scratch registers
2084	 */
2085	ALTENTRY(sfmmu_uprot_trap)
2086#ifdef sun4v
2087	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2088	/* %g1 = first TSB entry ptr now, %g2 preserved */
2089
2090	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2091	brlz,pt %g3, 9f			/* check for 2nd TSB */
2092	  mov	%g0, %g3		/* clear second tsbe ptr */
2093
2094	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2095	/* %g3 = second TSB entry ptr now, %g2 preserved */
2096
2097#else /* sun4v */
2098
2099	brgez,pt %g1, 9f		/* check for 2nd TSB */
2100	  mov	%g0, %g3		/* clear second tsbe ptr */
2101
2102	mov	%g2, %g7
2103	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2104	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2105	mov	%g1, %g7
2106	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2107
2108#endif /* sun4v */
21099:
2110	CPU_TSBMISS_AREA(%g6, %g7)
2111	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2112	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2113	  nop
2114
2115	/*
2116	 * Kernel 8K page iTLB miss.  We also get here if we took a
2117	 * fast instruction access mmu miss trap while running in
2118	 * invalid context.
2119	 *
2120	 * %g1 = 8K TSB pointer register (not used, clobbered)
2121	 * %g2 = tag access register (used)
2122	 * %g3 = faulting context id (used)
2123	 * %g7 = 4M virtual page number for tag matching  (used)
2124	 */
2125	.align	64
2126	ALTENTRY(sfmmu_kitlb_miss)
2127	brnz,pn %g3, tsb_tl0_noctxt
2128	  nop
2129
2130	/* kernel miss */
2131	/* get kernel tsb pointer */
2132	/* we patch the next set of instructions at run time */
2133	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2134iktsbbase:
2135	RUNTIME_PATCH_SETX(%g4, %g5)
2136	/* %g4 = contents of ktsb_base or ktsb_pbase */
2137
2138iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2139	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2140	or	%g4, %g1, %g1			! form tsb ptr
2141	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2142	cmp	%g4, %g7
2143	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2144	  andcc %g5, TTE_EXECPRM_INT, %g0	! check exec bit
2145	bz,pn	%icc, exec_fault
2146	  nop
2147	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2148	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2149	retry
2150
2151	/*
2152	 * Kernel dTLB miss.  We also get here if we took a fast data
2153	 * access mmu miss trap while running in invalid context.
2154	 *
2155	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2156	 *	We select the TSB miss handler to branch to depending on
2157	 *	the virtual address of the access.  In the future it may
2158	 *	be desirable to separate kpm TTEs into their own TSB,
2159	 *	in which case all that needs to be done is to set
2160	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2161	 *	early in the miss if we detect a kpm VA to a new handler.
2162	 *
2163	 * %g1 = 8K TSB pointer register (not used, clobbered)
2164	 * %g2 = tag access register (used)
2165	 * %g3 = faulting context id (used)
2166	 */
2167	.align	64
2168	ALTENTRY(sfmmu_kdtlb_miss)
2169	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2170	  nop
2171
2172	/* Gather some stats for kpm misses in the TLB. */
2173	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2174	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2175
2176	/*
2177	 * Get first TSB offset and look for 8K/64K/512K mapping
2178	 * using the 8K virtual page as the index.
2179	 *
2180	 * We patch the next set of instructions at run time;
2181	 * any changes here require sfmmu_patch_ktsb changes too.
2182	 */
2183dktsbbase:
2184	RUNTIME_PATCH_SETX(%g7, %g6)
2185	/* %g7 = contents of ktsb_base or ktsb_pbase */
2186
2187dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2188	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2189
2190	/*
2191	 * At this point %g1 is our index into the TSB.
2192	 * We just masked off enough bits of the VA depending
2193	 * on our TSB size code.
2194	 */
2195	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2196	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2197	cmp	%g6, %g4			! compare tag
2198	bne,pn	%xcc, dktsb4m_kpmcheck_small
2199	  add	%g7, %g1, %g1			/* form tsb ptr */
2200	TT_TRACE(trace_tsbhit)
2201	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2202	/* trapstat expects tte in %g5 */
2203	retry
2204
2205	/*
2206	 * If kpm is using large pages, the following instruction needs
2207	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2208	 * so that we will probe the 4M TSB regardless of the VA.  In
2209	 * the case kpm is using small pages, we know no large kernel
2210	 * mappings are located above 0x80000000.00000000 so we skip the
2211	 * probe as an optimization.
2212	 */
2213dktsb4m_kpmcheck_small:
2214	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2215	  /* delay slot safe, below */
2216
2217	/*
2218	 * Get second TSB offset and look for 4M mapping
2219	 * using 4M virtual page as the TSB index.
2220	 *
2221	 * Here:
2222	 * %g1 = 8K TSB pointer.  Don't squash it.
2223	 * %g2 = tag access register (we still need it)
2224	 */
2225	srlx	%g2, MMU_PAGESHIFT4M, %g3
2226
2227	/*
2228	 * We patch the next set of instructions at run time;
2229	 * any changes here require sfmmu_patch_ktsb changes too.
2230	 */
2231dktsb4mbase:
2232	RUNTIME_PATCH_SETX(%g7, %g6)
2233	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2234dktsb4m:
2235	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2236	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2237
2238	/*
2239	 * At this point %g3 is our index into the TSB.
2240	 * We just masked off enough bits of the VA depending
2241	 * on our TSB size code.
2242	 */
2243	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2244	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2245	cmp	%g6, %g4			! compare tag
2246
2247dktsb4m_tsbmiss:
2248	bne,pn	%xcc, dktsb4m_kpmcheck
2249	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2250	TT_TRACE(trace_tsbhit)
2251	/* we don't check TTE size here since we assume 4M TSB is separate */
2252	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2253	/* trapstat expects tte in %g5 */
2254	retry
2255
2256	/*
2257	 * So, we failed to find a valid TTE to match the faulting
2258	 * address in either TSB.  There are a few cases that could land
2259	 * us here:
2260	 *
2261	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2262	 *    to sfmmu_tsb_miss_tt to handle the miss.
2263	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2264	 *    4M TSB.  Let segkpm handle it.
2265	 *
2266	 * Note that we shouldn't land here in the case of a kpm VA when
2267	 * kpm_smallpages is active -- we handled that case earlier at
2268	 * dktsb4m_kpmcheck_small.
2269	 *
2270	 * At this point:
2271	 *  g1 = 8K-indexed primary TSB pointer
2272	 *  g2 = tag access register
2273	 *  g3 = 4M-indexed secondary TSB pointer
2274	 */
2275dktsb4m_kpmcheck:
2276	cmp	%g2, %g0
2277	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2278	  nop
2279	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2280	  nop
2281
2282#ifdef sun4v
2283	/*
2284	 * User instruction miss w/ single TSB.
2285	 * The first probe covers 8K, 64K, and 512K page sizes,
2286	 * because 64K and 512K mappings are replicated off 8K
2287	 * pointer.
2288	 *
2289	 * g1 = tsb8k pointer register
2290	 * g2 = tag access register
2291	 * g3 - g6 = scratch registers
2292	 * g7 = TSB tag to match
2293	 */
2294	.align	64
2295	ALTENTRY(sfmmu_uitlb_fastpath)
2296
2297	SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
2298	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2299	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2300	ba,pn	%xcc, sfmmu_tsb_miss_tt
2301	  mov	%g0, %g3
2302
2303	/*
2304	 * User data miss w/ single TSB.
2305	 * The first probe covers 8K, 64K, and 512K page sizes,
2306	 * because 64K and 512K mappings are replicated off 8K
2307	 * pointer.
2308	 *
2309	 * g1 = tsb8k pointer register
2310	 * g2 = tag access register
2311	 * g3 - g6 = scratch registers
2312	 * g7 = TSB tag to match
2313	 */
2314	.align 64
2315	ALTENTRY(sfmmu_udtlb_fastpath)
2316
2317	SETUP_UTSB_ATOMIC_ASI(%g4, %g6)
2318	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2319	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2320	ba,pn	%xcc, sfmmu_tsb_miss_tt
2321	  mov	%g0, %g3
2322
2323#endif /* sun4v */
2324
2325	/*
2326	 * User instruction miss w/ multiple TSBs.
2327	 * The first probe covers 8K, 64K, and 512K page sizes,
2328	 * because 64K and 512K mappings are replicated off 8K
2329	 * pointer.  Second probe covers 4M page size only.
2330	 *
2331	 * Just like sfmmu_udtlb_slowpath, except:
2332	 *   o Uses ASI_ITLB_IN
2333	 *   o checks for execute permission
2334	 *   o No ISM prediction.
2335	 *
2336	 * g1 = tsb8k pointer register
2337	 * g2 = tag access register
2338	 * g3 - g6 = scratch registers
2339	 * g7 = TSB tag to match
2340	 */
2341	.align	64
2342	ALTENTRY(sfmmu_uitlb_slowpath)
2343
2344#ifdef sun4v
2345	SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
2346	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2347
2348	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2349	/* g4 - g5 = clobbered here */
2350
2351	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2352	/* g1 = first TSB pointer, g3 = second TSB pointer */
2353	srlx	%g2, TAG_VALO_SHIFT, %g7
2354	PROBE_2ND_ITSB(%g3, %g7)
2355	/* NOT REACHED */
2356#else /* sun4v */
2357	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2358	SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
2359	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2360
2361	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2362	/* g4 - g5 = clobbered here */
2363
2364	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2365	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2366	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2367	/* g1 = first TSB pointer, g3 = second TSB pointer */
2368	srlx	%g2, TAG_VALO_SHIFT, %g7
2369	PROBE_2ND_ITSB(%g3, %g7)
2370	/* NOT REACHED */
2371#endif /* sun4v */
2372
2373	/*
2374	 * User data miss w/ multiple TSBs.
2375	 * The first probe covers 8K, 64K, and 512K page sizes,
2376	 * because 64K and 512K mappings are replicated off 8K
2377	 * pointer.  Second probe covers 4M page size only.
2378	 *
2379	 * We consider probing for 4M pages first if the VA falls
2380	 * in a range that's likely to be ISM.
2381	 *
2382	 * g1 = tsb8k pointer register
2383	 * g2 = tag access register
2384	 * g3 - g6 = scratch registers
2385	 * g7 = TSB tag to match
2386	 */
2387	.align 64
2388	ALTENTRY(sfmmu_udtlb_slowpath)
2389
2390	SETUP_UTSB_ATOMIC_ASI(%g4, %g6)
2391
2392	/*
2393	 * Check for ISM.  If it exists, look for 4M mappings in the second TSB
2394	 * first, then probe for other mappings in the first TSB if that fails.
2395	 */
2396	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2397	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2398	  mov	%g1, %g3
2399
2400udtlb_miss_probefirst:
2401	/*
2402	 * g1 = 8K TSB pointer register
2403	 * g2 = tag access register
2404	 * g3 = (potentially) second TSB entry ptr
2405	 * g6 = ism pred.
2406	 * g7 = vpg_4m
2407	 */
2408#ifdef sun4v
2409	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2410	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2411
2412	/*
2413	 * Here:
2414	 *   g1 = first TSB pointer
2415	 *   g2 = tag access reg
2416	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2417	 */
2418	brgz,pn	%g6, sfmmu_tsb_miss_tt
2419	  nop
2420#else /* sun4v */
2421	mov	%g1, %g4
2422	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2423	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2424
2425	/*
2426	 * Here:
2427	 *   g1 = first TSB pointer
2428	 *   g2 = tag access reg
2429	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2430	 */
2431	brgz,pn	%g6, sfmmu_tsb_miss_tt
2432	  nop
2433	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2434	/* fall through in 8K->4M probe order */
2435#endif /* sun4v */
2436
2437udtlb_miss_probesecond:
2438	/*
2439	 * Look in the second TSB for the TTE
2440	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2441	 * g2 = tag access reg
2442	 * g3 = 8K TSB pointer register
2443	 * g6 = ism pred.
2444	 * g7 = vpg_4m
2445	 */
2446#ifdef sun4v
2447	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2448	/* tagacc (%g2) not destroyed */
2449	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2450	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2451#else
2452	mov	%g3, %g7
2453	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2454	/* %g2 clobbered, %g3 =second tsbe ptr */
2455	mov	MMU_TAG_ACCESS, %g2
2456	ldxa	[%g2]ASI_DMMU, %g2
2457#endif
2458
2459	srlx	%g2, TAG_VALO_SHIFT, %g7
2460	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2461	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
2462	brgz,pn	%g6, udtlb_miss_probefirst
2463	  nop
2464
2465	/* fall through to sfmmu_tsb_miss_tt */
2466
2467	ALTENTRY(sfmmu_tsb_miss_tt)
2468	TT_TRACE(trace_tsbmiss)
2469	/*
2470	 * We get here if there is a TSB miss OR a write protect trap.
2471	 *
2472	 * g1 = First TSB entry pointer
2473	 * g2 = tag access register
2474	 * g3 = 4M TSB entry pointer; NULL if no 2nd TSB
2475	 * g4 - g7 = scratch registers
2476	 */
2477
2478	ALTENTRY(sfmmu_tsb_miss)
2479
2480	/*
2481	 * If trapstat is running, we need to shift the %tpc and %tnpc to
2482	 * point to trapstat's TSB miss return code (note that trapstat
2483	 * itself will patch the correct offset to add).
2484	 */
2485	rdpr	%tl, %g7
2486	cmp	%g7, 1
2487	ble,pt	%xcc, 0f
2488	  sethi	%hi(KERNELBASE), %g6
2489	rdpr	%tpc, %g7
2490	or	%g6, %lo(KERNELBASE), %g6
2491	cmp	%g7, %g6
2492	bgeu,pt	%xcc, 0f
2493	/* delay slot safe */
2494
2495	ALTENTRY(tsbmiss_trapstat_patch_point)
2496	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
2497	wrpr	%g7, %tpc
2498	add	%g7, 4, %g7
2499	wrpr	%g7, %tnpc
25000:
2501	CPU_TSBMISS_AREA(%g6, %g7)
2502
2503	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save first tsb pointer */
2504	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save second tsb pointer */
2505
2506	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
2507	brz,a,pn %g3, 1f			/* skip ahead if kernel */
2508	  ldn	[%g6 + TSBMISS_KHATID], %g7
2509	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
2510	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
2511
2512	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
2513
2514	cmp	%g3, INVALID_CONTEXT
2515	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
2516	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
2517
2518	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
2519	/*
2520	 * The miss wasn't in an ISM segment.
2521	 *
2522	 * %g1 %g3, %g4, %g5, %g7 all clobbered
2523	 * %g2 = tag access (vaddr + ctx)
2524	 */
2525
2526	ba,pt	%icc, 2f
2527	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
2528
25291:
2530	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
2531	/*
2532	 * 8K and 64K hash.
2533	 */
25342:
2535
2536	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2537		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
2538		sfmmu_suspend_tl, tsb_512K)
2539	/* NOT REACHED */
2540
2541tsb_512K:
2542	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2543	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2544	brz,pn	%g5, 3f
2545	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2546	and	%g4, HAT_512K_FLAG, %g5
2547
2548	/*
2549	 * Note that there is a small window here where we may have
2550	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
2551	 * flag yet, so we will skip searching the 512k hash list.
2552	 * In this case we will end up in pagefault which will find
2553	 * the mapping and return.  So, in this instance we will end up
2554	 * spending a bit more time resolving this TSB miss, but it can
2555	 * only happen once per process and even then, the chances of that
2556	 * are very small, so it's not worth the extra overhead it would
2557	 * take to close this window.
2558	 */
2559	brz,pn	%g5, tsb_4M
2560	  nop
25613:
2562	/*
2563	 * 512K hash
2564	 */
2565
2566	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2567		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
2568		sfmmu_suspend_tl, tsb_4M)
2569	/* NOT REACHED */
2570
2571tsb_4M:
2572	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2573	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2574	brz,pn	%g5, 4f
2575	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2576	and	%g4, HAT_4M_FLAG, %g5
2577	brz,pn	%g5, tsb_32M
2578	  nop
25794:
2580	/*
2581	 * 4M hash
2582	 */
2583
2584	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2585		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
2586		sfmmu_suspend_tl, tsb_32M)
2587	/* NOT REACHED */
2588
2589tsb_32M:
2590#ifndef sun4v
2591	GET_CPU_IMPL(%g5)
2592	cmp	%g5, PANTHER_IMPL
2593	bne,pt	%xcc, tsb_pagefault
2594	  nop
2595#endif
2596
2597	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2598	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2599#ifdef sun4v
2600	brz,pn	%g5, 6f
2601#else
2602	brz,pn	%g5, tsb_pagefault
2603#endif
2604	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2605	and	%g4, HAT_32M_FLAG, %g5
2606	brz,pn	%g5, tsb_256M
2607	  nop
26085:
2609	/*
2610	 * 32M hash
2611	 */
2612
2613	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2614		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
2615		sfmmu_suspend_tl, tsb_256M)
2616	/* NOT REACHED */
2617
2618tsb_256M:
2619	lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2620	and	%g4, HAT_256M_FLAG, %g5
2621	brz,pn	%g5, tsb_pagefault
2622	  nop
26236:
2624	/*
2625	 * 256M hash
2626	 */
2627
2628	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2629	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
2630	    sfmmu_suspend_tl, tsb_pagefault)
2631	/* NOT REACHED */
2632
2633tsb_checktte:
2634	/*
2635	 * g3 = tte
2636	 * g4 = tte pa
2637	 * g5 = tte va
2638	 * g6 = tsbmiss area
2639	 */
2640	brgez,pn %g3, tsb_pagefault	/* if tte invalid branch */
2641	  nop
2642
2643tsb_validtte:
2644	/*
2645	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
2646	 */
2647	rdpr	%tt, %g7
2648	cmp	%g7, FAST_PROT_TT
2649	bne,pt	%icc, 4f
2650	  nop
2651
2652	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
2653	    tsb_protfault)
2654
2655	rdpr	%tt, %g5
2656	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
2657	ba,pt	%xcc, tsb_update_tl1
2658	  nop
2659
26604:
2661	/*
2662	 * If ITLB miss check exec bit.
2663	 * If not set treat as invalid TTE.
2664	 */
2665	cmp     %g7, T_INSTR_MMU_MISS
2666	be,pn	%icc, 5f
2667	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
2668	cmp     %g7, FAST_IMMU_MISS_TT
2669	bne,pt %icc, 3f
2670	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
26715:
2672	bz,pn %icc, tsb_protfault
2673	  nop
2674
26753:
2676	/*
2677	 * Set reference bit if not already set
2678	 */
2679	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
2680
2681	/*
2682	 * Now, load into TSB/TLB.  At this point:
2683	 * g3 = tte
2684	 * g4 = patte
2685	 * g6 = tsbmiss area
2686	 */
2687	rdpr	%tt, %g5
2688#ifdef sun4v
2689	MMU_FAULT_STATUS_AREA(%g2)
2690	cmp	%g5, T_INSTR_MMU_MISS
2691	be,a,pt	%icc, 9f
2692	  nop
2693	cmp	%g5, FAST_IMMU_MISS_TT
2694	be,a,pt	%icc, 9f
2695	  nop
2696	add	%g2, MMFSA_D_, %g2
26979:
2698	ldx	[%g2 + MMFSA_CTX_], %g7
2699	sllx	%g7, TTARGET_CTX_SHIFT, %g7
2700	ldx	[%g2 + MMFSA_ADDR_], %g2
2701	srlx	%g2, TTARGET_VA_SHIFT, %g2
2702	or	%g2, %g7, %g2
2703#else
2704	cmp	%g5, FAST_IMMU_MISS_TT
2705	be,a,pt	%icc, tsb_update_tl1
2706	  ldxa	[%g0]ASI_IMMU, %g2
2707	ldxa	[%g0]ASI_DMMU, %g2
2708#endif
2709tsb_update_tl1:
2710	srlx	%g2, TTARGET_CTX_SHIFT, %g7
2711	brz,pn	%g7, tsb_kernel
2712#ifdef sun4v
2713	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
2714#else
2715	  srlx	%g3, TTE_SZ_SHFT, %g7
2716#endif
2717
2718tsb_user:
2719#ifdef sun4v
2720	cmp	%g7, TTE4M
2721	bge,pn	%icc, tsb_user4m
2722	  nop
2723#else
2724	cmp	%g7, TTESZ_VALID | TTE4M
2725	be,pn	%icc, tsb_user4m
2726	  srlx	%g3, TTE_SZ2_SHFT, %g7
2727	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
2728	bnz,a,pn %icc, tsb_user4m
2729	  nop
2730#endif
2731
2732tsb_user8k:
2733	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = first TSB ptr
2734
2735#ifndef sun4v
2736	mov	ASI_N, %g7	! user TSBs always accessed by VA
2737	mov	%g7, %asi
2738#endif /* sun4v */
2739
2740	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
2741
2742#ifdef sun4v
2743	cmp	%g5, T_INSTR_MMU_MISS
2744	be,a,pn	%xcc, 9f
2745	  mov	%g3, %g5
2746#endif /* sun4v */
2747	cmp	%g5, FAST_IMMU_MISS_TT
2748	be,pn	%xcc, 9f
2749	  mov	%g3, %g5
2750
2751	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2752	! trapstat wants TTE in %g5
2753	retry
27549:
2755	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2756	! trapstat wants TTE in %g5
2757	retry
2758
2759tsb_user4m:
2760	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
2761	brz,pn	%g1, 5f	/* Check to see if we have 2nd TSB programmed */
2762	  nop
2763
2764#ifndef sun4v
2765	mov	ASI_N, %g7	! user TSBs always accessed by VA
2766	mov	%g7, %asi
2767#endif
2768
2769	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
2770
27715:
2772#ifdef sun4v
2773	cmp	%g5, T_INSTR_MMU_MISS
2774	be,a,pn	%xcc, 9f
2775	  mov	%g3, %g5
2776#endif /* sun4v */
2777	cmp	%g5, FAST_IMMU_MISS_TT
2778	be,pn	%xcc, 9f
2779	mov	%g3, %g5
2780
2781	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2782	! trapstat wants TTE in %g5
2783	retry
27849:
2785	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2786	! trapstat wants TTE in %g5
2787	retry
2788
2789tsb_kernel:					! no 32M or 256M support
2790#ifdef sun4v
2791	cmp	%g7, TTE4M
2792#else
2793	cmp	%g7, TTESZ_VALID | TTE4M
2794#endif
2795	be,pn	%icc, 5f
2796	  nop
2797	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8k tsbptr
2798	ba,pt	%xcc, 6f
2799	  nop
28005:
2801	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4m tsbptr
2802	brz,pn	%g1, 3f		/* skip programming if 4m TSB ptr is NULL */
2803	  nop
28046:
2805#ifndef sun4v
2806tsb_kernel_patch_asi:
2807	or	%g0, RUNTIME_PATCH, %g6
2808	mov	%g6, %asi	! XXX avoid writing to %asi !!
2809#endif
2810	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
28113:
2812#ifdef sun4v
2813	cmp	%g5, T_INSTR_MMU_MISS
2814	be,a,pn	%icc, 1f
2815	  mov	%g3, %g5			! trapstat wants TTE in %g5
2816#endif /* sun4v */
2817	cmp	%g5, FAST_IMMU_MISS_TT
2818	be,pn	%icc, 1f
2819	  mov	%g3, %g5			! trapstat wants TTE in %g5
2820	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2821	! trapstat wants TTE in %g5
2822	retry
28231:
2824	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2825	! trapstat wants TTE in %g5
2826	retry
2827
2828tsb_ism:
2829	/*
2830	 * This is an ISM [i|d]tlb miss.  We optimize for largest
2831	 * page size down to smallest.
2832	 *
2833	 * g2 = vaddr + ctx	aka tag access register
2834	 * g3 = ismmap->ism_seg
2835	 * g4 = physical address of ismmap->ism_sfmmu
2836	 * g6 = tsbmiss area
2837	 */
2838	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
2839	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
2840	  mov	PTL1_BAD_ISM, %g1
2841						/* g5 = pa of imap_vb_shift */
2842	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
2843	lduha	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
2844	srlx	%g3, %g4, %g3			/* clr size field */
2845	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
2846	sllx	%g3, %g4, %g3			/* g3 = ism vbase */
2847	and	%g2, %g1, %g4			/* g4 = ctx number */
2848	andn	%g2, %g1, %g1			/* g1 = tlb miss vaddr */
2849	sub	%g1, %g3, %g2			/* g2 = offset in ISM seg */
2850	or	%g2, %g4, %g2			/* g2 = tagacc (vaddr + ctx) */
2851
2852	/*
2853	 * ISM pages are always locked down.
2854	 * If we can't find the tte then pagefault
2855	 * and let the spt segment driver resovle it.
2856	 *
2857	 * g2 = ISM vaddr (offset in ISM seg)
2858	 * g6 = tsb miss area
2859	 * g7 = ISM hatid
2860	 */
2861	sub	%g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
2862	lduha	[%g5]ASI_MEM, %g4		/* g5 = pa of imap_hatflags */
2863	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
2864	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
2865	  nop
2866
2867tsb_ism_32M:
2868	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
2869	brz,pn	%g5, tsb_ism_256M
2870	  nop
2871
2872	/*
2873	 * 32M hash.
2874	 */
2875
2876	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
2877	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
2878	    tsb_ism_4M)
2879	/* NOT REACHED */
2880
2881tsb_ism_32M_found:
2882	brlz,pt %g3, tsb_validtte
2883	  nop
2884	ba,pt	%xcc, tsb_ism_4M
2885	  nop
2886
2887tsb_ism_256M:
2888	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
2889	brz,a,pn %g5, ptl1_panic
2890	  mov	PTL1_BAD_ISM, %g1
2891
2892	/*
2893	 * 256M hash.
2894	 */
2895	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
2896	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
2897	    tsb_ism_4M)
2898
2899tsb_ism_256M_found:
2900	brlz,pt %g3, tsb_validtte
2901	  nop
2902
2903tsb_ism_4M:
2904	/*
2905	 * 4M hash.
2906	 */
2907	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
2908	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
2909	    tsb_ism_8K)
2910	/* NOT REACHED */
2911
2912tsb_ism_4M_found:
2913	brlz,pt %g3, tsb_validtte
2914	  nop
2915
2916tsb_ism_8K:
2917	/*
2918	 * 8K and 64K hash.
2919	 */
2920
2921	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
2922	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
2923	    tsb_pagefault)
2924	/* NOT REACHED */
2925
2926tsb_ism_8K_found:
2927	brlz,pt	%g3, tsb_validtte
2928	  nop
2929
2930tsb_pagefault:
2931	rdpr	%tt, %g7
2932	cmp	%g7, FAST_PROT_TT
2933	be,a,pn	%icc, tsb_protfault
2934	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
2935
2936tsb_protfault:
2937	/*
2938	 * we get here if we couldn't find a valid tte in the hash.
2939	 *
2940	 * If user and we are at tl>1 we go to window handling code.
2941	 *
2942	 * If kernel and the fault is on the same page as our stack
2943	 * pointer, then we know the stack is bad and the trap handler
2944	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
2945	 *
2946	 * If this is a kernel trap and tl>1, panic.
2947	 *
2948	 * Otherwise we call pagefault.
2949	 */
2950	cmp	%g7, FAST_IMMU_MISS_TT
2951#ifdef sun4v
2952	MMU_FAULT_STATUS_AREA(%g4)
2953	ldx	[%g4 + MMFSA_I_CTX], %g5
2954	ldx	[%g4 + MMFSA_D_CTX], %g4
2955	move	%icc, %g5, %g4
2956	cmp	%g7, T_INSTR_MMU_MISS
2957	move	%icc, %g5, %g4
2958#else
2959	mov	MMU_TAG_ACCESS, %g4
2960	ldxa	[%g4]ASI_DMMU, %g2
2961	ldxa	[%g4]ASI_IMMU, %g5
2962	move	%icc, %g5, %g2
2963	cmp	%g7, T_INSTR_MMU_MISS
2964	move	%icc, %g5, %g2
2965	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
2966#endif
2967	brnz,pn	%g4, 3f				/* skip if not kernel */
2968	  rdpr	%tl, %g5
2969
2970	add	%sp, STACK_BIAS, %g3
2971	srlx	%g3, MMU_PAGESHIFT, %g3
2972	srlx	%g2, MMU_PAGESHIFT, %g4
2973	cmp	%g3, %g4
2974	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
2975	  mov	PTL1_BAD_STACK, %g1
2976
2977	cmp	%g5, 1
2978	ble,pt	%icc, 2f
2979	  nop
2980	TSTAT_CHECK_TL1(2f, %g1, %g2)
2981	rdpr	%tt, %g2
2982	cmp	%g2, FAST_PROT_TT
2983	mov	PTL1_BAD_KPROT_FAULT, %g1
2984	movne	%icc, PTL1_BAD_KMISS, %g1
2985	ba,pt	%icc, ptl1_panic
2986	  nop
2987
29882:
2989	/*
2990	 * We are taking a pagefault in the kernel on a kernel address.  If
2991	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
2992	 * want to call sfmmu_pagefault -- we will instead note that a fault
2993	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
2994	 * (instead of a "retry").  This will step over the faulting
2995	 * instruction.
2996	 */
2997	CPU_INDEX(%g1, %g2)
2998	set	cpu_core, %g2
2999	sllx	%g1, CPU_CORE_SHIFT, %g1
3000	add	%g1, %g2, %g1
3001	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3002	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3003	bz	sfmmu_pagefault
3004	or	%g2, CPU_DTRACE_BADADDR, %g2
3005	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3006	GET_MMU_D_ADDR(%g3, %g4)
3007	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3008	done
3009
30103:
3011	cmp	%g5, 1
3012	ble,pt	%icc, 4f
3013	  nop
3014	TSTAT_CHECK_TL1(4f, %g1, %g2)
3015	ba,pt	%icc, sfmmu_window_trap
3016	  nop
3017
30184:
3019	/*
3020	 * We are taking a pagefault on a non-kernel address.  If we are in
3021	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3022	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3023	 */
3024	CPU_INDEX(%g1, %g2)
3025	set	cpu_core, %g2
3026	sllx	%g1, CPU_CORE_SHIFT, %g1
3027	add	%g1, %g2, %g1
3028	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3029	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3030	bz	sfmmu_pagefault
3031	or	%g2, CPU_DTRACE_BADADDR, %g2
3032	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3033	GET_MMU_D_ADDR(%g3, %g4)
3034	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3035
3036	/*
3037	 * Be sure that we're actually taking this miss from the kernel --
3038	 * otherwise we have managed to return to user-level with
3039	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3040	 */
3041	rdpr	%tstate, %g2
3042	btst	TSTATE_PRIV, %g2
3043	bz,a	ptl1_panic
3044	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3045	done
3046
3047	ALTENTRY(tsb_tl0_noctxt)
3048	/*
3049	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3050	 * if it is, indicated that we have faulted and issue a done.
3051	 */
3052	CPU_INDEX(%g5, %g6)
3053	set	cpu_core, %g6
3054	sllx	%g5, CPU_CORE_SHIFT, %g5
3055	add	%g5, %g6, %g5
3056	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3057	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3058	bz	1f
3059	or	%g6, CPU_DTRACE_BADADDR, %g6
3060	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3061	GET_MMU_D_ADDR(%g3, %g4)
3062	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3063
3064	/*
3065	 * Be sure that we're actually taking this miss from the kernel --
3066	 * otherwise we have managed to return to user-level with
3067	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3068	 */
3069	rdpr	%tstate, %g5
3070	btst	TSTATE_PRIV, %g5
3071	bz,a	ptl1_panic
3072	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3073	done
3074
30751:
3076	rdpr	%tt, %g5
3077	cmp	%g5, FAST_IMMU_MISS_TT
3078#ifdef sun4v
3079	MMU_FAULT_STATUS_AREA(%g2)
3080	be,a,pt	%icc, 2f
3081	  ldx	[%g2 + MMFSA_I_CTX], %g3
3082	cmp	%g5, T_INSTR_MMU_MISS
3083	be,a,pt	%icc, 2f
3084	  ldx	[%g2 + MMFSA_I_CTX], %g3
3085	ldx	[%g2 + MMFSA_D_CTX], %g3
30862:
3087#else
3088	mov	MMU_TAG_ACCESS, %g2
3089	be,a,pt	%icc, 2f
3090	  ldxa	[%g2]ASI_IMMU, %g3
3091	ldxa	[%g2]ASI_DMMU, %g3
30922:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3093#endif
3094	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3095	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3096	rdpr	%tl, %g5
3097	cmp	%g5, 1
3098	ble,pt	%icc, sfmmu_mmu_trap
3099	  nop
3100	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3101	ba,pt	%icc, sfmmu_window_trap
3102	  nop
3103	SET_SIZE(sfmmu_tsb_miss)
3104
3105#if (1<< TSBMISS_SHIFT) != TSBMISS_SIZE
3106#error - TSBMISS_SHIFT does not correspond to size of tsbmiss struct
3107#endif
3108
3109#endif /* lint */
3110
3111#if defined (lint)
3112/*
3113 * This routine will look for a user or kernel vaddr in the hash
3114 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3115 * grab any locks.  It should only be used by other sfmmu routines.
3116 */
3117/* ARGSUSED */
3118pfn_t
3119sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3120{
3121	return(0);
3122}
3123
3124#else /* lint */
3125
3126	ENTRY_NP(sfmmu_vatopfn)
3127 	/*
3128 	 * disable interrupts
3129 	 */
3130 	rdpr	%pstate, %o3
3131#ifdef DEBUG
3132	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
3133	bnz,pt	%icc, 1f			/* disabled, panic	 */
3134	  nop
3135
3136	sethi	%hi(panicstr), %g1
3137	ldx	[%g1 + %lo(panicstr)], %g1
3138	tst	%g1
3139	bnz,pt	%icc, 1f
3140	  nop
3141
3142	save	%sp, -SA(MINFRAME), %sp
3143	sethi	%hi(sfmmu_panic1), %o0
3144	call	panic
3145	 or	%o0, %lo(sfmmu_panic1), %o0
31461:
3147#endif
3148	/*
3149	 * disable interrupts to protect the TSBMISS area
3150	 */
3151	andn    %o3, PSTATE_IE, %o5
3152	wrpr    %o5, 0, %pstate
3153
3154	/*
3155	 * o0 = vaddr
3156	 * o1 = sfmmup
3157	 * o2 = ttep
3158	 */
3159	CPU_TSBMISS_AREA(%g1, %o5)
3160	ldn	[%g1 + TSBMISS_KHATID], %o4
3161	cmp	%o4, %o1
3162	bne,pn	%ncc, vatopfn_nokernel
3163	  mov	TTE64K, %g5			/* g5 = rehash # */
3164	mov %g1,%o5				/* o5 = tsbmiss_area */
3165	/*
3166	 * o0 = vaddr
3167	 * o1 & o4 = hatid
3168	 * o2 = ttep
3169	 * o5 = tsbmiss area
3170	 */
3171	mov	HBLK_RANGE_SHIFT, %g6
31721:
3173
3174	/*
3175	 * o0 = vaddr
3176	 * o1 = sfmmup
3177	 * o2 = ttep
3178	 * o3 = old %pstate
3179	 * o4 = hatid
3180	 * o5 = tsbmiss
3181	 * g5 = rehash #
3182	 * g6 = hmeshift
3183	 *
3184	 * The first arg to GET_TTE is actually tagaccess register
3185	 * not just vaddr. Since this call is for kernel we need to clear
3186	 * any lower vaddr bits that would be interpreted as ctx bits.
3187	 */
3188	set     TAGACC_CTX_MASK, %g1
3189	andn    %o0, %g1, %o0
3190	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
3191		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3192
3193kvtop_hblk_found:
3194	/*
3195	 * o0 = vaddr
3196	 * o1 = sfmmup
3197	 * o2 = ttep
3198	 * g1 = tte
3199	 * g2 = tte pa
3200	 * g3 = tte va
3201	 * o2 = tsbmiss area
3202	 * o1 = hat id
3203	 */
3204	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3205	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3206	stx %g1,[%o2]				/* put tte into *ttep */
3207	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3208	/*
3209	 * o0 = vaddr
3210	 * o1 = sfmmup
3211	 * o2 = ttep
3212	 * g1 = pfn
3213	 */
3214	ba,pt	%xcc, 6f
3215	  mov	%g1, %o0
3216
3217kvtop_nohblk:
3218	/*
3219	 * we get here if we couldn't find valid hblk in hash.  We rehash
3220	 * if neccesary.
3221	 */
3222	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3223#ifdef sun4v
3224	cmp	%g5, MAX_HASHCNT
3225#else
3226	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3227#endif
3228	be,a,pn	%icc, 6f
3229	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3230	mov	%o1, %o4			/* restore hatid */
3231#ifdef sun4v
3232        add	%g5, 2, %g5
3233	cmp	%g5, 3
3234	move	%icc, MMU_PAGESHIFT4M, %g6
3235	ba,pt	%icc, 1b
3236	movne	%icc, MMU_PAGESHIFT256M, %g6
3237#else
3238        inc	%g5
3239	cmp	%g5, 2
3240	move	%icc, MMU_PAGESHIFT512K, %g6
3241	ba,pt	%icc, 1b
3242	movne	%icc, MMU_PAGESHIFT4M, %g6
3243#endif
32446:
3245	retl
3246 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3247
3248tsb_suspend:
3249	/*
3250	 * o0 = vaddr
3251	 * o1 = sfmmup
3252	 * o2 = ttep
3253	 * g1 = tte
3254	 * g2 = tte pa
3255	 * g3 = tte va
3256	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
3257	 */
3258	stx %g1,[%o2]				/* put tte into *ttep */
3259	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
3260	  sub	%g0, 1, %o0			/* output = -1 (PFN_INVALID) */
3261	TTETOPFN(%g1, %o0, vatopfn_l3, %g2, %g3, %g4)
3262	/*
3263	 * o0 = PFN return value PFN_INVALID, PFN_SUSPENDED, or pfn#
3264	 * o1 = sfmmup
3265	 * o2 = ttep
3266	 * g1 = pfn
3267	 */
3268	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
32698:
3270	retl
3271	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
3272
3273vatopfn_nokernel:
3274	/*
3275	 * This routine does NOT support user addresses
3276	 * There is a routine in C that supports this.
3277	 * The only reason why we don't have the C routine
3278	 * support kernel addresses as well is because
3279	 * we do va_to_pa while holding the hashlock.
3280	 */
3281 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3282	save	%sp, -SA(MINFRAME), %sp
3283	sethi	%hi(sfmmu_panic3), %o0
3284	call	panic
3285	 or	%o0, %lo(sfmmu_panic3), %o0
3286
3287	SET_SIZE(sfmmu_vatopfn)
3288#endif /* lint */
3289
3290
3291
3292#if !defined(lint)
3293
3294/*
3295 * kpm lock used between trap level tsbmiss handler and kpm C level.
3296 */
3297#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
3298	mov     0xff, tmp1						;\
3299label1:									;\
3300	casa    [kpmlckp]asi, %g0, tmp1					;\
3301	brnz,pn tmp1, label1						;\
3302	mov     0xff, tmp1						;\
3303	membar  #LoadLoad
3304
3305#define KPMLOCK_EXIT(kpmlckp, asi)					\
3306	membar  #LoadStore|#StoreStore					;\
3307	sta     %g0, [kpmlckp]asi
3308
3309/*
3310 * Lookup a memseg for a given pfn and if found, return the physical
3311 * address of the corresponding struct memseg in mseg, otherwise
3312 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
3313 * tsbmp, %asi is assumed to be ASI_MEM.
3314 * This lookup is done by strictly traversing only the physical memseg
3315 * linkage. The more generic approach, to check the virtual linkage
3316 * before using the physical (used e.g. with hmehash buckets), cannot
3317 * be used here. Memory DR operations can run in parallel to this
3318 * lookup w/o any locks and updates of the physical and virtual linkage
3319 * cannot be done atomically wrt. to each other. Because physical
3320 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
3321 * as "physical NULL" pointer.
3322 */
3323#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
3324	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
3325	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
3326	udivx	pfn, mseg, mseg						;\
3327	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
3328	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
3329	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
3330	add	tmp1, mseg, tmp1					;\
3331	ldxa	[tmp1]%asi, mseg					;\
3332	cmp	mseg, MSEG_NULLPTR_PA					;\
3333	be,pn	%xcc, label/**/1		/* if not found */	;\
3334	  nop								;\
3335	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3336	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3337	blu,pn	%xcc, label/**/1					;\
3338	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3339	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3340	bgeu,pn	%xcc, label/**/1					;\
3341	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
3342	mulx	tmp1, PAGE_SIZE, tmp1					;\
3343	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
3344	add	tmp2, tmp1, tmp1			/* pp */	;\
3345	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
3346	cmp	tmp2, pfn						;\
3347	be,pt	%xcc, label/**/_ok			/* found */	;\
3348label/**/1:								;\
3349	/* brute force lookup */					;\
3350	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
3351	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
3352label/**/2:								;\
3353	cmp	mseg, MSEG_NULLPTR_PA					;\
3354	be,pn	%xcc, label/**/_ok		/* if not found */	;\
3355	  nop								;\
3356	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3357	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3358	blu,a,pt %xcc, label/**/2					;\
3359	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3360	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3361	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3362	bgeu,a,pt %xcc, label/**/2					;\
3363	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3364label/**/_ok:
3365
3366	/*
3367	 * kpm tsb miss handler large pages
3368	 * g1 = 8K kpm TSB entry pointer
3369	 * g2 = tag access register
3370	 * g3 = 4M kpm TSB entry pointer
3371	 */
3372	ALTENTRY(sfmmu_kpm_dtsb_miss)
3373	TT_TRACE(trace_tsbmiss)
3374
3375	CPU_INDEX(%g7, %g6)
3376	sethi	%hi(kpmtsbm_area), %g6
3377	sllx	%g7, KPMTSBM_SHIFT, %g7
3378	or	%g6, %lo(kpmtsbm_area), %g6
3379	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3380
3381	/* check enable flag */
3382	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3383	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3384	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3385	  nop
3386
3387	/* VA range check */
3388	ldx	[%g6 + KPMTSBM_VBASE], %g7
3389	cmp	%g2, %g7
3390	blu,pn	%xcc, sfmmu_tsb_miss
3391	  ldx	[%g6 + KPMTSBM_VEND], %g5
3392	cmp	%g2, %g5
3393	bgeu,pn	%xcc, sfmmu_tsb_miss
3394	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
3395
3396	/*
3397	 * check TL tsbmiss handling flag
3398	 * bump tsbmiss counter
3399	 */
3400	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3401#ifdef	DEBUG
3402	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
3403	inc	%g5
3404	brz,pn	%g3, sfmmu_kpm_exception
3405	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3406#else
3407	inc	%g5
3408	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3409#endif
3410	/*
3411	 * At this point:
3412	 *  g1 = 8K kpm TSB pointer (not used)
3413	 *  g2 = tag access register
3414	 *  g3 = clobbered
3415	 *  g6 = per-CPU kpm tsbmiss area
3416	 *  g7 = kpm_vbase
3417	 */
3418
3419	/* vaddr2pfn */
3420	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3421	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3422	srax    %g4, %g3, %g2			/* which alias range (r) */
3423	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3424	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3425
3426	/*
3427	 * Setup %asi
3428	 * mseg_pa = page_numtomemseg_nolock(pfn)
3429	 * if (mseg_pa == NULL) sfmmu_kpm_exception
3430	 * g2=pfn
3431	 */
3432	mov	ASI_MEM, %asi
3433	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
3434	cmp	%g3, MSEG_NULLPTR_PA
3435	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3436	  nop
3437
3438	/*
3439	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
3440	 * g2=pfn g3=mseg_pa
3441	 */
3442	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
3443	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3444	srlx	%g2, %g5, %g4
3445	sllx	%g4, %g5, %g4
3446	sub	%g4, %g7, %g4
3447	srlx	%g4, %g5, %g4
3448
3449	/*
3450	 * Validate inx value
3451	 * g2=pfn g3=mseg_pa g4=inx
3452	 */
3453#ifdef	DEBUG
3454	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3455	cmp	%g4, %g5			/* inx - nkpmpgs */
3456	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3457	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3458#else
3459	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3460#endif
3461	/*
3462	 * kp = &mseg_pa->kpm_pages[inx]
3463	 */
3464	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
3465	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
3466	add	%g5, %g4, %g5			/* kp */
3467
3468	/*
3469	 * KPMP_HASH(kp)
3470	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
3471	 */
3472	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3473	sub	%g7, 1, %g7			/* mask */
3474	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
3475	add	%g5, %g1, %g5			/* y = ksp + x */
3476	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3477
3478	/*
3479	 * Calculate physical kpm_page pointer
3480	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3481	 */
3482	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
3483	add	%g1, %g4, %g1			/* kp_pa */
3484
3485	/*
3486	 * Calculate physical hash lock address
3487	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
3488	 */
3489	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
3490	sllx	%g5, KPMHLK_SHIFT, %g5
3491	add	%g4, %g5, %g3
3492	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
3493
3494	/*
3495	 * Assemble tte
3496	 * g1=kp_pa g2=pfn g3=hlck_pa
3497	 */
3498#ifdef sun4v
3499	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
3500	sllx	%g5, 32, %g5
3501	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3502	or	%g4, TTE4M, %g4
3503	or	%g5, %g4, %g5
3504#else
3505	sethi	%hi(TTE_VALID_INT), %g4
3506	mov	TTE4M, %g5
3507	sllx	%g5, TTE_SZ_SHFT_INT, %g5
3508	or	%g5, %g4, %g5			/* upper part */
3509	sllx	%g5, 32, %g5
3510	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3511	or	%g5, %g4, %g5
3512#endif
3513	sllx	%g2, MMU_PAGESHIFT, %g4
3514	or	%g5, %g4, %g5			/* tte */
3515	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
3516	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3517
3518	/*
3519	 * tsb dropin
3520	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
3521	 */
3522
3523	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
3524	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
3525
3526	/* use C-handler if there's no go for dropin */
3527	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
3528	cmp	%g7, -1
3529	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
3530	  nop
3531
3532#ifdef	DEBUG
3533	/* double check refcnt */
3534	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
3535	brz,pn	%g7, 5f			/* let C-handler deal with this */
3536	  nop
3537#endif
3538
3539#ifndef sun4v
3540	ldub	[%g6 + KPMTSBM_FLAGS], %g7
3541	mov	ASI_N, %g1
3542	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
3543	movnz	%icc, ASI_MEM, %g1
3544	mov	%g1, %asi
3545#endif
3546
3547	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
3548	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
3549
3550	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
3551	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
3552
3553	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
3554
3555	/* KPMLOCK_EXIT(kpmlckp, asi) */
3556	KPMLOCK_EXIT(%g3, ASI_MEM)
3557
3558	/*
3559	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3560	 * point to trapstat's TSB miss return code (note that trapstat
3561	 * itself will patch the correct offset to add).
3562	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
3563	 */
3564	rdpr	%tl, %g7
3565	cmp	%g7, 1
3566	ble	%icc, 0f
3567	sethi	%hi(KERNELBASE), %g6
3568	rdpr	%tpc, %g7
3569	or	%g6, %lo(KERNELBASE), %g6
3570	cmp	%g7, %g6
3571	bgeu	%xcc, 0f
3572	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
3573	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3574	wrpr	%g7, %tpc
3575	add	%g7, 4, %g7
3576	wrpr	%g7, %tnpc
35770:
3578	retry
35795:
3580	/* g3=hlck_pa */
3581	KPMLOCK_EXIT(%g3, ASI_MEM)
3582	ba,pt	%icc, sfmmu_kpm_exception
3583	  nop
3584	SET_SIZE(sfmmu_kpm_dtsb_miss)
3585
3586	/*
3587	 * kpm tsbmiss handler for smallpages
3588	 * g1 = 8K kpm TSB pointer
3589	 * g2 = tag access register
3590	 * g3 = 4M kpm TSB pointer
3591	 */
3592	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
3593	TT_TRACE(trace_tsbmiss)
3594	CPU_INDEX(%g7, %g6)
3595	sethi	%hi(kpmtsbm_area), %g6
3596	sllx	%g7, KPMTSBM_SHIFT, %g7
3597	or	%g6, %lo(kpmtsbm_area), %g6
3598	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3599
3600	/* check enable flag */
3601	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3602	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3603	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3604	  nop
3605
3606	/*
3607	 * VA range check
3608	 * On fail: goto sfmmu_tsb_miss
3609	 */
3610	ldx	[%g6 + KPMTSBM_VBASE], %g7
3611	cmp	%g2, %g7
3612	blu,pn	%xcc, sfmmu_tsb_miss
3613	  ldx	[%g6 + KPMTSBM_VEND], %g5
3614	cmp	%g2, %g5
3615	bgeu,pn	%xcc, sfmmu_tsb_miss
3616	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
3617
3618	/*
3619	 * check TL tsbmiss handling flag
3620	 * bump tsbmiss counter
3621	 */
3622	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3623#ifdef	DEBUG
3624	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
3625	inc	%g5
3626	brz,pn	%g1, sfmmu_kpm_exception
3627	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3628#else
3629	inc	%g5
3630	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3631#endif
3632	/*
3633	 * At this point:
3634	 *  g1 = clobbered
3635	 *  g2 = tag access register
3636	 *  g3 = 4M kpm TSB pointer (not used)
3637	 *  g6 = per-CPU kpm tsbmiss area
3638	 *  g7 = kpm_vbase
3639	 */
3640
3641	/* vaddr2pfn */
3642	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3643	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3644	srax    %g4, %g3, %g2			/* which alias range (r) */
3645	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3646	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3647
3648	/*
3649	 * Setup %asi
3650	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
3651	 * if (mseg not found) sfmmu_kpm_exception
3652	 * g2=pfn
3653	 */
3654	mov	ASI_MEM, %asi
3655	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
3656	cmp	%g3, MSEG_NULLPTR_PA
3657	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3658	  nop
3659
3660	/*
3661	 * inx = pfn - mseg_pa->kpm_pbase
3662	 * g2=pfn g3=mseg_pa
3663	 */
3664	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3665	sub	%g2, %g7, %g4
3666
3667#ifdef	DEBUG
3668	/*
3669	 * Validate inx value
3670	 * g2=pfn g3=mseg_pa g4=inx
3671	 */
3672	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3673	cmp	%g4, %g5			/* inx - nkpmpgs */
3674	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3675	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3676#else
3677	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3678#endif
3679	/* ksp = &mseg_pa->kpm_spages[inx] */
3680	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
3681	add	%g5, %g4, %g5			/* ksp */
3682
3683	/*
3684	 * KPMP_SHASH(kp)
3685	 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
3686	 */
3687	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3688	sub	%g7, 1, %g7			/* mask */
3689	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
3690	add	%g5, %g1, %g5			/* y = ksp + x */
3691	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3692
3693	/*
3694	 * Calculate physical kpm_spage pointer
3695	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3696	 */
3697	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
3698	add	%g1, %g4, %g1			/* ksp_pa */
3699
3700	/*
3701	 * Calculate physical hash lock address.
3702	 * Note: Changes in kpm_shlk_t must be reflected here.
3703	 * g1=ksp_pa g2=pfn g5=hashinx
3704	 */
3705	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
3706	sllx	%g5, KPMSHLK_SHIFT, %g5
3707	add	%g4, %g5, %g3			/* hlck_pa */
3708
3709	/*
3710	 * Assemble tte
3711	 * g1=ksp_pa g2=pfn g3=hlck_pa
3712	 */
3713	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
3714	sllx	%g5, 32, %g5
3715	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3716	or	%g5, %g4, %g5
3717	sllx	%g2, MMU_PAGESHIFT, %g4
3718	or	%g5, %g4, %g5			/* tte */
3719	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
3720	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3721
3722	/*
3723	 * tsb dropin
3724	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
3725	 */
3726
3727	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
3728	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
3729
3730	/* use C-handler if there's no go for dropin */
3731	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
3732	cmp	%g7, -1
3733	bne,pn	%xcc, 5f
3734	  nop
3735
3736#ifndef sun4v
3737	ldub	[%g6 + KPMTSBM_FLAGS], %g7
3738	mov	ASI_N, %g1
3739	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
3740	movnz	%icc, ASI_MEM, %g1
3741	mov	%g1, %asi
3742#endif
3743
3744	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
3745	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
3746
3747	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
3748	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
3749
3750	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
3751
3752	/* KPMLOCK_EXIT(kpmlckp, asi) */
3753	KPMLOCK_EXIT(%g3, ASI_MEM)
3754
3755	/*
3756	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3757	 * point to trapstat's TSB miss return code (note that trapstat
3758	 * itself will patch the correct offset to add).
3759	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
3760	 */
3761	rdpr	%tl, %g7
3762	cmp	%g7, 1
3763	ble	%icc, 0f
3764	sethi	%hi(KERNELBASE), %g6
3765	rdpr	%tpc, %g7
3766	or	%g6, %lo(KERNELBASE), %g6
3767	cmp	%g7, %g6
3768	bgeu	%xcc, 0f
3769	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
3770	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3771	wrpr	%g7, %tpc
3772	add	%g7, 4, %g7
3773	wrpr	%g7, %tnpc
37740:
3775	retry
37765:
3777	/* g3=hlck_pa */
3778	KPMLOCK_EXIT(%g3, ASI_MEM)
3779	ba,pt	%icc, sfmmu_kpm_exception
3780	  nop
3781	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
3782
3783#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
3784#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
3785#endif
3786
3787#endif /* lint */
3788
3789#ifdef	lint
3790/*
3791 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
3792 * Called from C-level, sets/clears "go" indication for trap level handler.
3793 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
3794 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
3795 * Assumes khl_mutex is held when called from C-level.
3796 */
3797/* ARGSUSED */
3798void
3799sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
3800{
3801}
3802
3803/*
3804 * kpm_smallpages: stores val to byte at address mapped within
3805 * low level lock brackets. The old value is returned.
3806 * Called from C-level.
3807 */
3808/* ARGSUSED */
3809int
3810sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
3811{
3812	return (0);
3813}
3814
3815#else /* lint */
3816
3817	.seg	".data"
3818sfmmu_kpm_tsbmtl_panic:
3819	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
3820	.byte	0
3821sfmmu_kpm_stsbmtl_panic:
3822	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
3823	.byte	0
3824	.align	4
3825	.seg	".text"
3826
3827	ENTRY_NP(sfmmu_kpm_tsbmtl)
3828	rdpr	%pstate, %o3
3829	/*
3830	 * %o0 = &kp_refcntc
3831	 * %o1 = &khl_lock
3832	 * %o2 = 0/1 (off/on)
3833	 * %o3 = pstate save
3834	 */
3835#ifdef DEBUG
3836	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
3837	bnz,pt %icc, 1f				/* disabled, panic	 */
3838	  nop
3839	save	%sp, -SA(MINFRAME), %sp
3840	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
3841	call	panic
3842	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
3843	ret
3844	restore
38451:
3846#endif /* DEBUG */
3847	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
3848
3849	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
3850	mov	-1, %o5
3851	brz,a	%o2, 2f
3852	  mov	0, %o5
38532:
3854	sth	%o5, [%o0]
3855	KPMLOCK_EXIT(%o1, ASI_N)
3856
3857	retl
3858	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
3859	SET_SIZE(sfmmu_kpm_tsbmtl)
3860
3861	ENTRY_NP(sfmmu_kpm_stsbmtl)
3862	rdpr	%pstate, %o3
3863	/*
3864	 * %o0 = &mapped
3865	 * %o1 = &kshl_lock
3866	 * %o2 = val
3867	 * %o3 = pstate save
3868	 */
3869#ifdef DEBUG
3870	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
3871	bnz,pt %icc, 1f				/* disabled, panic	 */
3872	  nop
3873	save	%sp, -SA(MINFRAME), %sp
3874	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
3875	call	panic
3876	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
3877	ret
3878	restore
38791:
3880#endif /* DEBUG */
3881	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
3882
3883	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
3884	ldsb	[%o0], %o5
3885	stb	%o2, [%o0]
3886	KPMLOCK_EXIT(%o1, ASI_N)
3887
3888	mov	%o5, %o0			/* return old val */
3889	retl
3890	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
3891	SET_SIZE(sfmmu_kpm_stsbmtl)
3892
3893#endif /* lint */
3894
3895#ifndef lint
3896#ifdef sun4v
3897	/*
3898	 * User/kernel data miss w// multiple TSBs
3899	 * The first probe covers 8K, 64K, and 512K page sizes,
3900	 * because 64K and 512K mappings are replicated off 8K
3901	 * pointer.  Second probe covers 4M page size only.
3902	 *
3903	 * MMU fault area contains miss address and context.
3904	 */
3905	ALTENTRY(sfmmu_slow_dmmu_miss)
3906	GET_MMU_D_TAGACC_CTX(%g2, %g3)	! %g2 = tagacc, %g3 = ctx
3907
3908slow_miss_common:
3909	/*
3910	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
3911	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
3912	 */
3913	brnz,pt	%g3, 8f			! check for user context
3914	  nop
3915
3916	/*
3917	 * Kernel miss
3918	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
3919	 * branch to sfmmu_tsb_miss_tt to handle it.
3920	 */
3921	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
3922sfmmu_dslow_patch_ktsb_base:
3923	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
3924sfmmu_dslow_patch_ktsb_szcode:
3925	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
3926
3927	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
3928	! %g1 = First TSB entry pointer, as TSB miss handler expects
3929
3930	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
3931sfmmu_dslow_patch_ktsb4m_base:
3932	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
3933sfmmu_dslow_patch_ktsb4m_szcode:
3934	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
3935
3936	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
3937	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
3938	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
3939	.empty
3940
39418:
3942	/*
3943	 * User miss
3944	 * Get first TSB pointer in %g1
3945	 * Get second TSB pointer (or NULL if no second TSB) in %g3
3946	 * Branch to sfmmu_tsb_miss_tt to handle it
3947	 */
3948	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
3949	/* %g1 = first TSB entry ptr now, %g2 preserved */
3950
3951	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
3952	brlz,a,pt %g3, sfmmu_tsb_miss_tt	/* done if no 2nd TSB */
3953	  mov	%g0, %g3
3954
3955	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3956	/* %g3 = second TSB entry ptr now, %g2 preserved */
39579:
3958	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
3959	.empty
3960	SET_SIZE(sfmmu_slow_dmmu_miss)
3961
3962
3963	/*
3964	 * User/kernel instruction miss w/ multiple TSBs
3965	 * The first probe covers 8K, 64K, and 512K page sizes,
3966	 * because 64K and 512K mappings are replicated off 8K
3967	 * pointer.  Second probe covers 4M page size only.
3968	 *
3969	 * MMU fault area contains miss address and context.
3970	 */
3971	ALTENTRY(sfmmu_slow_immu_miss)
3972	MMU_FAULT_STATUS_AREA(%g2)
3973	ldx	[%g2 + MMFSA_I_CTX], %g3
3974	ldx	[%g2 + MMFSA_I_ADDR], %g2
3975	srlx	%g2, MMU_PAGESHIFT, %g2	! align address to page boundry
3976	sllx	%g2, MMU_PAGESHIFT, %g2
3977	ba,pt	%xcc, slow_miss_common
3978	or	%g2, %g3, %g2
3979	SET_SIZE(sfmmu_slow_immu_miss)
3980
3981#endif /* sun4v */
3982#endif	/* lint */
3983
3984#ifndef lint
3985
3986/*
3987 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
3988 */
3989	.seg	".data"
3990	.align	64
3991	.global tsbmiss_area
3992tsbmiss_area:
3993	.skip	(TSBMISS_SIZE * NCPU)
3994
3995	.align	64
3996	.global kpmtsbm_area
3997kpmtsbm_area:
3998	.skip	(KPMTSBM_SIZE * NCPU)
3999#endif	/* lint */
4000