xref: /titanic_51/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 7aec1d6e253b21f9e9b7ef68b4d81ab9859b51fe)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * SFMMU primitives.  These primitives should only be used by sfmmu
31 * routines.
32 */
33
34#if defined(lint)
35#include <sys/types.h>
36#else	/* lint */
37#include "assym.h"
38#endif	/* lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/machtrap.h>
42#include <sys/machasi.h>
43#include <sys/sun4asi.h>
44#include <sys/pte.h>
45#include <sys/mmu.h>
46#include <vm/hat_sfmmu.h>
47#include <vm/seg_spt.h>
48#include <sys/machparam.h>
49#include <sys/privregs.h>
50#include <sys/scb.h>
51#include <sys/intreg.h>
52#include <sys/machthread.h>
53#include <sys/intr.h>
54#include <sys/clock.h>
55#include <sys/trapstat.h>
56
57#ifdef TRAPTRACE
58#include <sys/traptrace.h>
59
60/*
61 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
62 */
63#define	TT_TRACE(label)		\
64	ba	label		;\
65	rd	%pc, %g7
66#else
67
68#define	TT_TRACE(label)
69
70#endif /* TRAPTRACE */
71
72#ifndef	lint
73
74#if (TTE_SUSPEND_SHIFT > 0)
75#define	TTE_SUSPEND_INT_SHIFT(reg)				\
76	sllx	reg, TTE_SUSPEND_SHIFT, reg
77#else
78#define	TTE_SUSPEND_INT_SHIFT(reg)
79#endif
80
81#endif /* lint */
82
83#ifndef	lint
84
85/*
86 * Assumes TSBE_TAG is 0
87 * Assumes TSBE_INTHI is 0
88 * Assumes TSBREG.split is 0
89 */
90
91#if TSBE_TAG != 0
92#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
93#endif
94
95#if TSBTAG_INTHI != 0
96#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
97#endif
98
99/*
100 * The following code assumes the tsb is not split.
101 *
102 * With TSBs no longer shared between processes, it's no longer
103 * necessary to hash the context bits into the tsb index to get
104 * tsb coloring; the new implementation treats the TSB as a
105 * direct-mapped, virtually-addressed cache.
106 *
107 * In:
108 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
109 *    tsbbase = base address of TSB (clobbered)
110 *    tagacc = tag access register (clobbered)
111 *    szc = size code of TSB (ro)
112 *    tmp = scratch reg
113 * Out:
114 *    tsbbase = pointer to entry in TSB
115 */
116#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
117	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
118	srlx	tagacc, vpshift, tagacc 				;\
119	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
120	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
121	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
122	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
123	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
124
125/*
126 * When the kpm TSB is used it is assumed that it is direct mapped
127 * using (vaddr>>vpshift)%tsbsz as the index.
128 *
129 * Note that, for now, the kpm TSB and kernel TSB are the same for
130 * each mapping size.  However that need not always be the case.  If
131 * the trap handlers are updated to search a different TSB for kpm
132 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
133 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
134 *
135 * In:
136 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
137 *    vaddr = virtual address (clobbered)
138 *    tsbp, szc, tmp = scratch
139 * Out:
140 *    tsbp = pointer to entry in TSB
141 */
142#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
143	cmp	vpshift, MMU_PAGESHIFT					;\
144	bne,pn	%icc, 1f		/* branch if large case */	;\
145	  sethi	%hi(kpmsm_tsbsz), szc					;\
146	sethi	%hi(kpmsm_tsbbase), tsbp				;\
147	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
148	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
149	ba,pt	%icc, 2f						;\
150	  nop								;\
1511:	sethi	%hi(kpm_tsbsz), szc					;\
152	sethi	%hi(kpm_tsbbase), tsbp					;\
153	ld	[szc + %lo(kpm_tsbsz)], szc				;\
154	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1552:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
156
157/*
158 * Lock the TSBE at virtual address tsbep.
159 *
160 * tsbep = TSBE va (ro)
161 * tmp1, tmp2 = scratch registers (clobbered)
162 * label = label to use for branches (text)
163 * %asi = ASI to use for TSB access
164 *
165 * NOTE that we flush the TSB using fast VIS instructions that
166 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
167 * not be treated as a locked entry or we'll get stuck spinning on
168 * an entry that isn't locked but really invalid.
169 */
170
171#if defined(UTSB_PHYS)
172
173#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
174	lda	[tsbep]ASI_MEM, tmp1					;\
175label:									;\
176	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
177	cmp	tmp1, tmp2 						;\
178	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
179	  lda	[tsbep]ASI_MEM, tmp1					;\
180	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
181	cmp	tmp1, tmp2 						;\
182	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
183	  lda	[tsbep]ASI_MEM, tmp1					;\
184	/* tsbe lock acquired */					;\
185	membar #StoreStore
186
187#else /* UTSB_PHYS */
188
189#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
190	lda	[tsbep]%asi, tmp1					;\
191label:									;\
192	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
193	cmp	tmp1, tmp2 						;\
194	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
195	  lda	[tsbep]%asi, tmp1					;\
196	casa	[tsbep]%asi, tmp1, tmp2					;\
197	cmp	tmp1, tmp2 						;\
198	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
199	  lda	[tsbep]%asi, tmp1					;\
200	/* tsbe lock acquired */					;\
201	membar #StoreStore
202
203#endif /* UTSB_PHYS */
204
205/*
206 * Atomically write TSBE at virtual address tsbep.
207 *
208 * tsbep = TSBE va (ro)
209 * tte = TSBE TTE (ro)
210 * tagtarget = TSBE tag (ro)
211 * %asi = ASI to use for TSB access
212 */
213
214#if defined(UTSB_PHYS)
215
216#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
217	add	tsbep, TSBE_TTE, tmp1					;\
218	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
219	membar #StoreStore						;\
220	add	tsbep, TSBE_TAG, tmp1					;\
221	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
222
223#else /* UTSB_PHYS */
224
225#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
226	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
227	membar #StoreStore						;\
228	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
229
230#endif /* UTSB_PHYS */
231
232/*
233 * Load an entry into the TSB at TL > 0.
234 *
235 * tsbep = pointer to the TSBE to load as va (ro)
236 * tte = value of the TTE retrieved and loaded (wo)
237 * tagtarget = tag target register.  To get TSBE tag to load,
238 *   we need to mask off the context and leave only the va (clobbered)
239 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
240 * tmp1, tmp2 = scratch registers
241 * label = label to use for branches (text)
242 * %asi = ASI to use for TSB access
243 */
244
245#if defined(UTSB_PHYS)
246
247#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
248	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
249	/*								;\
250	 * I don't need to update the TSB then check for the valid tte.	;\
251	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
252	 * we always invalidate the hash table before we unload the TSB.;\
253	 */								;\
254	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
255	ldxa	[ttepa]ASI_MEM, tte					;\
256	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
257	sethi	%hi(TSBTAG_INVALID), tmp2				;\
258	add	tsbep, TSBE_TAG, tmp1					;\
259	brgez,a,pn tte, label/**/f					;\
260	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
261	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
262label:
263
264#else /* UTSB_PHYS */
265
266#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
267	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
268	/*								;\
269	 * I don't need to update the TSB then check for the valid tte.	;\
270	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
271	 * we always invalidate the hash table before we unload the TSB.;\
272	 */								;\
273	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
274	ldxa	[ttepa]ASI_MEM, tte					;\
275	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
276	sethi	%hi(TSBTAG_INVALID), tmp2				;\
277	brgez,a,pn tte, label/**/f					;\
278	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
279	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
280label:
281
282/*
283 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
284 *   for ITLB synthesis.
285 *
286 * tsbep = pointer to the TSBE to load as va (ro)
287 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
288 *   with exec_perm turned off and exec_synth turned on
289 * tagtarget = tag target register.  To get TSBE tag to load,
290 *   we need to mask off the context and leave only the va (clobbered)
291 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
292 * tmp1, tmp2 = scratch registers
293 * label = label to use for branch (text)
294 * %asi = ASI to use for TSB access
295 */
296
297#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
298	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
299	/*								;\
300	 * I don't need to update the TSB then check for the valid tte.	;\
301	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
302	 * we always invalidate the hash table before we unload the TSB.;\
303	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
304	 * and exec_synth bit to 1.					;\
305	 */								;\
306	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
307	mov	tte, tmp1						;\
308	ldxa	[ttepa]ASI_MEM, tte					;\
309	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
310	sethi	%hi(TSBTAG_INVALID), tmp2				;\
311	brgez,a,pn tte, label/**/f					;\
312	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
313	or	tte, tmp1, tte						;\
314	andn	tte, TTE_EXECPRM_INT, tte				;\
315	or	tte, TTE_E_SYNTH_INT, tte				;\
316	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
317label:
318
319/*
320 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
321 *
322 * tte = value of the TTE, used to get tte_size bits (ro)
323 * tagaccess = tag access register, used to get 4M pfn bits (ro)
324 * pfn = 4M pfn bits shifted to offset for tte (out)
325 * tmp1 = scratch register
326 * label = label to use for branch (text)
327 */
328
329#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
330	/*								;\
331	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
332	 * Return them, shifted, in pfn.				;\
333	 */								;\
334	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
335	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
336	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
337	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
338	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
339	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
340label:									;\
341	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
342
343/*
344 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
345 * for ITLB synthesis.
346 *
347 * tte = value of the TTE, used to get tte_size bits (rw)
348 * tmp1 = scratch register
349 */
350
351#define	SET_TTE4M_PN(tte, tmp)						\
352	/*								;\
353	 * Set 4M pagesize tte bits. 					;\
354	 */								;\
355	set	TTE4M, tmp						;\
356	sllx	tmp, TTE_SZ_SHFT, tmp					;\
357	or	tte, tmp, tte
358
359#endif /* UTSB_PHYS */
360
361/*
362 * Load an entry into the TSB at TL=0.
363 *
364 * tsbep = pointer to the TSBE to load as va (ro)
365 * tteva = pointer to the TTE to load as va (ro)
366 * tagtarget = TSBE tag to load (which contains no context), synthesized
367 * to match va of MMU tag target register only (ro)
368 * tmp1, tmp2 = scratch registers (clobbered)
369 * label = label to use for branches (text)
370 * %asi = ASI to use for TSB access
371 */
372
373#if defined(UTSB_PHYS)
374
375#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
376	/* can't rd tteva after locking tsb because it can tlb miss */	;\
377	ldx	[tteva], tteva			/* load tte */		;\
378	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
379	sethi	%hi(TSBTAG_INVALID), tmp2				;\
380	add	tsbep, TSBE_TAG, tmp1					;\
381	brgez,a,pn tteva, label/**/f					;\
382	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
383	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
384label:
385
386#else /* UTSB_PHYS */
387
388#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
389	/* can't rd tteva after locking tsb because it can tlb miss */	;\
390	ldx	[tteva], tteva			/* load tte */		;\
391	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
392	sethi	%hi(TSBTAG_INVALID), tmp2				;\
393	brgez,a,pn tteva, label/**/f					;\
394	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
395	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
396label:
397
398#endif /* UTSB_PHYS */
399
400/*
401 * Invalidate a TSB entry in the TSB.
402 *
403 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
404 *	 about this earlier to ensure this is true.  Thus when we are
405 *	 directly referencing tsbep below, we are referencing the tte_tag
406 *	 field of the TSBE.  If this  offset ever changes, the code below
407 *	 will need to be modified.
408 *
409 * tsbep = pointer to TSBE as va (ro)
410 * tag = invalidation is done if this matches the TSBE tag (ro)
411 * tmp1 - tmp3 = scratch registers (clobbered)
412 * label = label name to use for branches (text)
413 * %asi = ASI to use for TSB access
414 */
415
416#if defined(UTSB_PHYS)
417
418#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
419	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
420	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
421label/**/1:								;\
422	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
423	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
424	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
425	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
426	cmp	tag, tmp3		/* compare tags */		;\
427	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
428	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
429	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
430	cmp	tmp1, tmp3		/* if not successful */		;\
431	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
432	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
433label/**/2:
434
435#else /* UTSB_PHYS */
436
437#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
438	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
439	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
440label/**/1:								;\
441	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
442	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
443	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
444	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
445	cmp	tag, tmp3		/* compare tags */		;\
446	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
447	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
448	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
449	cmp	tmp1, tmp3		/* if not successful */		;\
450	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
451	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
452label/**/2:
453
454#endif /* UTSB_PHYS */
455
456#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
457#error	- TSB_SOFTSZ_MASK too small
458#endif
459
460
461/*
462 * An implementation of setx which will be hot patched at run time.
463 * since it is being hot patched, there is no value passed in.
464 * Thus, essentially we are implementing
465 *	setx value, tmp, dest
466 * where value is RUNTIME_PATCH (aka 0) in this case.
467 */
468#define	RUNTIME_PATCH_SETX(dest, tmp)					\
469	sethi	%hh(RUNTIME_PATCH), tmp					;\
470	sethi	%lm(RUNTIME_PATCH), dest				;\
471	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
472	or	dest, %lo(RUNTIME_PATCH), dest				;\
473	sllx	tmp, 32, tmp						;\
474	nop				/* for perf reasons */		;\
475	or	tmp, dest, dest		/* contents of patched value */
476
477
478#endif (lint)
479
480
481#if defined (lint)
482
483/*
484 * sfmmu related subroutines
485 */
486
487/*
488 * Use cas, if tte has changed underneath us then reread and try again.
489 * In the case of a retry, it will update sttep with the new original.
490 */
491/* ARGSUSED */
492int
493sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
494{ return(0); }
495
496/*
497 * Use cas, if tte has changed underneath us then return 1, else return 0
498 */
499/* ARGSUSED */
500int
501sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
502{ return(0); }
503
504/* ARGSUSED */
505void
506sfmmu_copytte(tte_t *sttep, tte_t *dttep)
507{}
508
509/*ARGSUSED*/
510struct tsbe *
511sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
512{ return(0); }
513
514/*ARGSUSED*/
515uint64_t
516sfmmu_make_tsbtag(caddr_t va)
517{ return(0); }
518
519#else	/* lint */
520
521	.seg	".data"
522	.global	sfmmu_panic1
523sfmmu_panic1:
524	.asciz	"sfmmu_asm: interrupts already disabled"
525
526	.global	sfmmu_panic3
527sfmmu_panic3:
528	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
529
530	.global	sfmmu_panic4
531sfmmu_panic4:
532	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
533
534	.global	sfmmu_panic5
535sfmmu_panic5:
536	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
537
538
539	ENTRY_NP(sfmmu_modifytte)
540	ldx	[%o2], %g3			/* current */
541	ldx	[%o0], %g1			/* original */
5422:
543	ldx	[%o1], %g2			/* modified */
544	cmp	%g2, %g3			/* is modified = current? */
545	be,a,pt	%xcc,1f				/* yes, don't write */
546	stx	%g3, [%o0]			/* update new original */
547	casx	[%o2], %g1, %g2
548	cmp	%g1, %g2
549	be,pt	%xcc, 1f			/* cas succeeded - return */
550	  nop
551	ldx	[%o2], %g3			/* new current */
552	stx	%g3, [%o0]			/* save as new original */
553	ba,pt	%xcc, 2b
554	  mov	%g3, %g1
5551:	retl
556	membar	#StoreLoad
557	SET_SIZE(sfmmu_modifytte)
558
559	ENTRY_NP(sfmmu_modifytte_try)
560	ldx	[%o1], %g2			/* modified */
561	ldx	[%o2], %g3			/* current */
562	ldx	[%o0], %g1			/* original */
563	cmp	%g3, %g2			/* is modified = current? */
564	be,a,pn %xcc,1f				/* yes, don't write */
565	mov	0, %o1				/* as if cas failed. */
566
567	casx	[%o2], %g1, %g2
568	membar	#StoreLoad
569	cmp	%g1, %g2
570	movne	%xcc, -1, %o1			/* cas failed. */
571	move	%xcc, 1, %o1			/* cas succeeded. */
5721:
573	stx	%g2, [%o0]			/* report "current" value */
574	retl
575	mov	%o1, %o0
576	SET_SIZE(sfmmu_modifytte_try)
577
578	ENTRY_NP(sfmmu_copytte)
579	ldx	[%o0], %g1
580	retl
581	stx	%g1, [%o1]
582	SET_SIZE(sfmmu_copytte)
583
584
585	/*
586	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
587	 * %o0 = TSB base address (in), pointer to TSB entry (out)
588	 * %o1 = vaddr (in)
589	 * %o2 = vpshift (in)
590	 * %o3 = tsb size code (in)
591	 * %o4 = scratch register
592	 */
593	ENTRY_NP(sfmmu_get_tsbe)
594	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
595	retl
596	nop
597	SET_SIZE(sfmmu_get_tsbe)
598
599	/*
600	 * Return a TSB tag for the given va.
601	 * %o0 = va (in/clobbered)
602	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
603	 */
604	ENTRY_NP(sfmmu_make_tsbtag)
605	retl
606	srln	%o0, TTARGET_VA_SHIFT, %o0
607	SET_SIZE(sfmmu_make_tsbtag)
608
609#endif /* lint */
610
611/*
612 * Other sfmmu primitives
613 */
614
615
616#if defined (lint)
617void
618sfmmu_patch_ktsb(void)
619{
620}
621
622void
623sfmmu_kpm_patch_tlbm(void)
624{
625}
626
627void
628sfmmu_kpm_patch_tsbm(void)
629{
630}
631
632/* ARGSUSED */
633void
634sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
635{
636}
637
638/* ARGSUSED */
639void
640sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
641{
642}
643
644/* ARGSUSED */
645void
646sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
647{
648}
649
650/* ARGSUSED */
651void
652sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
653{
654}
655
656#else /* lint */
657
658#define	I_SIZE		4
659
660	ENTRY_NP(sfmmu_fix_ktlb_traptable)
661	/*
662	 * %o0 = start of patch area
663	 * %o1 = size code of TSB to patch
664	 * %o3 = scratch
665	 */
666	/* fix sll */
667	ld	[%o0], %o3			/* get sll */
668	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
669	st	%o3, [%o0]			/* write sll */
670	flush	%o0
671	/* fix srl */
672	add	%o0, I_SIZE, %o0		/* goto next instr. */
673	ld	[%o0], %o3			/* get srl */
674	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
675	st	%o3, [%o0]			/* write srl */
676	retl
677	flush	%o0
678	SET_SIZE(sfmmu_fix_ktlb_traptable)
679
680	ENTRY_NP(sfmmu_fixup_ktsbbase)
681	/*
682	 * %o0 = start of patch area
683	 * %o5 = kernel virtual or physical tsb base address
684	 * %o2, %o3 are used as scratch registers.
685	 */
686	/* fixup sethi instruction */
687	ld	[%o0], %o3
688	srl	%o5, 10, %o2			! offset is bits 32:10
689	or	%o3, %o2, %o3			! set imm22
690	st	%o3, [%o0]
691	/* fixup offset of lduw/ldx */
692	add	%o0, I_SIZE, %o0		! next instr
693	ld	[%o0], %o3
694	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
695	or	%o3, %o2, %o3
696	st	%o3, [%o0]
697	retl
698	flush	%o0
699	SET_SIZE(sfmmu_fixup_ktsbbase)
700
701	ENTRY_NP(sfmmu_fixup_setx)
702	/*
703	 * %o0 = start of patch area
704	 * %o4 = 64 bit value to patch
705	 * %o2, %o3 are used as scratch registers.
706	 *
707	 * Note: Assuming that all parts of the instructions which need to be
708	 *	 patched correspond to RUNTIME_PATCH (aka 0)
709	 *
710	 * Note the implementation of setx which is being patched is as follows:
711	 *
712	 * sethi   %hh(RUNTIME_PATCH), tmp
713	 * sethi   %lm(RUNTIME_PATCH), dest
714	 * or      tmp, %hm(RUNTIME_PATCH), tmp
715	 * or      dest, %lo(RUNTIME_PATCH), dest
716	 * sllx    tmp, 32, tmp
717	 * nop
718	 * or      tmp, dest, dest
719	 *
720	 * which differs from the implementation in the
721	 * "SPARC Architecture Manual"
722	 */
723	/* fixup sethi instruction */
724	ld	[%o0], %o3
725	srlx	%o4, 42, %o2			! bits [63:42]
726	or	%o3, %o2, %o3			! set imm22
727	st	%o3, [%o0]
728	/* fixup sethi instruction */
729	add	%o0, I_SIZE, %o0		! next instr
730	ld	[%o0], %o3
731	sllx	%o4, 32, %o2			! clear upper bits
732	srlx	%o2, 42, %o2			! bits [31:10]
733	or	%o3, %o2, %o3			! set imm22
734	st	%o3, [%o0]
735	/* fixup or instruction */
736	add	%o0, I_SIZE, %o0		! next instr
737	ld	[%o0], %o3
738	srlx	%o4, 32, %o2			! bits [63:32]
739	and	%o2, 0x3ff, %o2			! bits [41:32]
740	or	%o3, %o2, %o3			! set imm
741	st	%o3, [%o0]
742	/* fixup or instruction */
743	add	%o0, I_SIZE, %o0		! next instr
744	ld	[%o0], %o3
745	and	%o4, 0x3ff, %o2			! bits [9:0]
746	or	%o3, %o2, %o3			! set imm
747	st	%o3, [%o0]
748	retl
749	flush	%o0
750	SET_SIZE(sfmmu_fixup_setx)
751
752	ENTRY_NP(sfmmu_fixup_or)
753	/*
754	 * %o0 = start of patch area
755	 * %o4 = 32 bit value to patch
756	 * %o2, %o3 are used as scratch registers.
757	 * Note: Assuming that all parts of the instructions which need to be
758	 *	 patched correspond to RUNTIME_PATCH (aka 0)
759	 */
760	ld	[%o0], %o3
761	and	%o4, 0x3ff, %o2			! bits [9:0]
762	or	%o3, %o2, %o3			! set imm
763	st	%o3, [%o0]
764	retl
765	flush	%o0
766	SET_SIZE(sfmmu_fixup_or)
767
768	ENTRY_NP(sfmmu_fixup_shiftx)
769	/*
770	 * %o0 = start of patch area
771	 * %o4 = signed int immediate value to add to sllx/srlx imm field
772	 * %o2, %o3 are used as scratch registers.
773	 *
774	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
775	 * so we do a simple add.  The caller must be careful to prevent
776	 * overflow, which could easily occur if the initial value is nonzero!
777	 */
778	ld	[%o0], %o3			! %o3 = instruction to patch
779	and	%o3, 0x3f, %o2			! %o2 = existing imm value
780	add	%o2, %o4, %o2			! %o2 = new imm value
781	andn	%o3, 0x3f, %o3			! clear old imm value
782	and	%o2, 0x3f, %o2			! truncate new imm value
783	or	%o3, %o2, %o3			! set new imm value
784	st	%o3, [%o0]			! store updated instruction
785	retl
786	flush	%o0
787	SET_SIZE(sfmmu_fixup_shiftx)
788
789	ENTRY_NP(sfmmu_fixup_mmu_asi)
790	/*
791	 * Patch imm_asi of all ldda instructions in the MMU
792	 * trap handlers.  We search MMU_PATCH_INSTR instructions
793	 * starting from the itlb miss handler (trap 0x64).
794	 * %o0 = address of tt[0,1]_itlbmiss
795	 * %o1 = imm_asi to setup, shifted by appropriate offset.
796	 * %o3 = number of instructions to search
797	 * %o4 = reserved by caller: called from leaf routine
798	 */
7991:	ldsw	[%o0], %o2			! load instruction to %o2
800	brgez,pt %o2, 2f
801	  srl	%o2, 30, %o5
802	btst	1, %o5				! test bit 30; skip if not set
803	bz,pt	%icc, 2f
804	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
805	srlx	%o5, 58, %o5			! isolate op3 part of opcode
806	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
807	brnz,pt	%o5, 2f				! skip if not a match
808	  or	%o2, %o1, %o2			! or in imm_asi
809	st	%o2, [%o0]			! write patched instruction
8102:	dec	%o3
811	brnz,a,pt %o3, 1b			! loop until we're done
812	  add	%o0, I_SIZE, %o0
813	retl
814	flush	%o0
815	SET_SIZE(sfmmu_fixup_mmu_asi)
816
817	/*
818	 * Patch immediate ASI used to access the TSB in the
819	 * trap table.
820	 * inputs: %o0 = value of ktsb_phys
821	 */
822	ENTRY_NP(sfmmu_patch_mmu_asi)
823	mov	%o7, %o4			! save return pc in %o4
824	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
825	movrz	%o0, ASI_NQUAD_LD, %o3
826	sll	%o3, 5, %o1			! imm_asi offset
827	mov	6, %o3				! number of instructions
828	sethi	%hi(dktsb), %o0			! to search
829	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
830	  or	%o0, %lo(dktsb), %o0
831	mov	6, %o3				! number of instructions
832	sethi	%hi(dktsb4m), %o0		! to search
833	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
834	  or	%o0, %lo(dktsb4m), %o0
835	mov	6, %o3				! number of instructions
836	sethi	%hi(iktsb), %o0			! to search
837	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
838	  or	%o0, %lo(iktsb), %o0
839	mov	%o4, %o7			! retore return pc -- leaf
840	retl
841	nop
842	SET_SIZE(sfmmu_patch_mmu_asi)
843
844	ENTRY_NP(sfmmu_patch_ktsb)
845	/*
846	 * We need to fix iktsb, dktsb, et. al.
847	 */
848	save	%sp, -SA(MINFRAME), %sp
849	set	ktsb_phys, %o1
850	ld	[%o1], %o4
851	set	ktsb_base, %o5
852	set	ktsb4m_base, %l1
853	brz,pt	%o4, 1f
854	  nop
855	set	ktsb_pbase, %o5
856	set	ktsb4m_pbase, %l1
8571:
858	sethi	%hi(ktsb_szcode), %o1
859	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
860
861	sethi	%hi(iktsb), %o0
862	call	sfmmu_fix_ktlb_traptable
863	  or	%o0, %lo(iktsb), %o0
864
865	sethi	%hi(dktsb), %o0
866	call	sfmmu_fix_ktlb_traptable
867	  or	%o0, %lo(dktsb), %o0
868
869	sethi	%hi(ktsb4m_szcode), %o1
870	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
871
872	sethi	%hi(dktsb4m), %o0
873	call	sfmmu_fix_ktlb_traptable
874	  or	%o0, %lo(dktsb4m), %o0
875
876#ifndef sun4v
877	mov	ASI_N, %o2
878	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
879	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
880	sethi	%hi(tsb_kernel_patch_asi), %o0
881	call	sfmmu_fixup_or
882	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
883#endif
884
885	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
886
887	sethi	%hi(dktsbbase), %o0
888	call	sfmmu_fixup_setx	! patch value of ktsb base addr
889	  or	%o0, %lo(dktsbbase), %o0
890
891	sethi	%hi(iktsbbase), %o0
892	call	sfmmu_fixup_setx	! patch value of ktsb base addr
893	  or	%o0, %lo(iktsbbase), %o0
894
895	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
896	call	sfmmu_fixup_setx	! patch value of ktsb base addr
897	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
898
899#ifdef sun4v
900	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
901	call	sfmmu_fixup_setx	! patch value of ktsb base addr
902	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
903#endif /* sun4v */
904
905	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
906
907	sethi	%hi(dktsb4mbase), %o0
908	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
909	  or	%o0, %lo(dktsb4mbase), %o0
910
911	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
912	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
913	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
914
915#ifdef sun4v
916	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
917	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
918	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
919#endif /* sun4v */
920
921	set	ktsb_szcode, %o4
922	ld	[%o4], %o4
923	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
924	call	sfmmu_fixup_or		! patch value of ktsb_szcode
925	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
926
927#ifdef sun4v
928	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
929	call	sfmmu_fixup_or		! patch value of ktsb_szcode
930	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
931#endif /* sun4v */
932
933	set	ktsb4m_szcode, %o4
934	ld	[%o4], %o4
935	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
936	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
937	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
938
939#ifdef sun4v
940	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
941	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
942	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
943#endif /* sun4v */
944
945	ret
946	restore
947	SET_SIZE(sfmmu_patch_ktsb)
948
949	ENTRY_NP(sfmmu_kpm_patch_tlbm)
950	/*
951	 * Fixup trap handlers in common segkpm case.  This is reserved
952	 * for future use should kpm TSB be changed to be other than the
953	 * kernel TSB.
954	 */
955	retl
956	nop
957	SET_SIZE(sfmmu_kpm_patch_tlbm)
958
959	ENTRY_NP(sfmmu_kpm_patch_tsbm)
960	/*
961	 * nop the branch to sfmmu_kpm_dtsb_miss_small
962	 * in the case where we are using large pages for
963	 * seg_kpm (and hence must probe the second TSB for
964	 * seg_kpm VAs)
965	 */
966	set	dktsb4m_kpmcheck_small, %o0
967	MAKE_NOP_INSTR(%o1)
968	st	%o1, [%o0]
969	flush	%o0
970	retl
971	nop
972	SET_SIZE(sfmmu_kpm_patch_tsbm)
973
974	ENTRY_NP(sfmmu_patch_utsb)
975#ifdef sun4v
976	retl
977	nop
978#else /* sun4v */
979	/*
980	 * We need to hot patch utsb_vabase and utsb4m_vabase
981	 */
982	save	%sp, -SA(MINFRAME), %sp
983
984	/* patch value of utsb_vabase */
985	set	utsb_vabase, %o1
986	ldx	[%o1], %o4
987	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
988	call	sfmmu_fixup_setx
989	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
990	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
991	call	sfmmu_fixup_setx
992	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
993	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
994	call	sfmmu_fixup_setx
995	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
996
997	/* patch value of utsb4m_vabase */
998	set	utsb4m_vabase, %o1
999	ldx	[%o1], %o4
1000	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1001	call	sfmmu_fixup_setx
1002	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1003	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1004	call	sfmmu_fixup_setx
1005	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1006	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1007	call	sfmmu_fixup_setx
1008	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1009
1010	/*
1011	 * Patch TSB base register masks and shifts if needed.
1012	 * By default the TSB base register contents are set up for 4M slab.
1013	 * If we're using a smaller slab size and reserved VA range we need
1014	 * to patch up those values here.
1015	 */
1016	set	tsb_slab_shift, %o1
1017	set	MMU_PAGESHIFT4M, %o4
1018	ldsw	[%o1], %o3
1019	subcc	%o4, %o3, %o4
1020	bz,pt	%icc, 1f
1021	  /* delay slot safe */
1022
1023	/* patch reserved VA range size if needed. */
1024	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1025	call	sfmmu_fixup_shiftx
1026	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1027	call	sfmmu_fixup_shiftx
1028	  add	%o0, I_SIZE, %o0
1029	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1030	call	sfmmu_fixup_shiftx
1031	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1032	call	sfmmu_fixup_shiftx
1033	  add	%o0, I_SIZE, %o0
10341:
1035	/* patch TSBREG_VAMASK used to set up TSB base register */
1036	set	tsb_slab_mask, %o1
1037	lduw	[%o1], %o4
1038	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1039	call	sfmmu_fixup_or
1040	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1041	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1042	call	sfmmu_fixup_or
1043	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1044
1045	ret
1046	restore
1047#endif /* sun4v */
1048	SET_SIZE(sfmmu_patch_utsb)
1049
1050
1051	/*
1052	 * Routine that loads an entry into a tsb using virtual addresses.
1053	 * Locking is required since all cpus can use the same TSB.
1054	 * Note that it is no longer required to have a valid context
1055	 * when calling this function.
1056	 */
1057	ENTRY_NP(sfmmu_load_tsbe)
1058	/*
1059	 * %o0 = pointer to tsbe to load
1060	 * %o1 = tsb tag
1061	 * %o2 = virtual pointer to TTE
1062	 * %o3 = 1 if physical address in %o0 else 0
1063	 */
1064	rdpr	%pstate, %o5
1065#ifdef DEBUG
1066	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1067	bnz,pt 	%icc, 1f			/* disabled, panic	 */
1068	  nop
1069
1070	sethi	%hi(panicstr), %g1
1071	ldx	[%g1 + %lo(panicstr)], %g1
1072	tst	%g1
1073	bnz,pt	%icc, 1f
1074	  nop
1075
1076	save	%sp, -SA(MINFRAME), %sp
1077	sethi	%hi(sfmmu_panic1), %o0
1078	call	panic
1079	 or	%o0, %lo(sfmmu_panic1), %o0
10801:
1081#endif /* DEBUG */
1082
1083	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1084
1085	SETUP_TSB_ASI(%o3, %g3)
1086	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1087
1088	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1089
1090	retl
1091	membar	#StoreStore|#StoreLoad
1092	SET_SIZE(sfmmu_load_tsbe)
1093
1094	/*
1095	 * Flush TSB of a given entry if the tag matches.
1096	 */
1097	ENTRY(sfmmu_unload_tsbe)
1098	/*
1099	 * %o0 = pointer to tsbe to be flushed
1100	 * %o1 = tag to match
1101	 * %o2 = 1 if physical address in %o0 else 0
1102	 */
1103	SETUP_TSB_ASI(%o2, %g1)
1104	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1105	retl
1106	membar	#StoreStore|#StoreLoad
1107	SET_SIZE(sfmmu_unload_tsbe)
1108
1109	/*
1110	 * Routine that loads a TTE into the kpm TSB from C code.
1111	 * Locking is required since kpm TSB is shared among all CPUs.
1112	 */
1113	ENTRY_NP(sfmmu_kpm_load_tsb)
1114	/*
1115	 * %o0 = vaddr
1116	 * %o1 = ttep
1117	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1118	 */
1119	rdpr	%pstate, %o5			! %o5 = saved pstate
1120#ifdef DEBUG
1121	andcc	%o5, PSTATE_IE, %g0		! if interrupts already
1122	bnz,pt	%icc, 1f			! disabled, panic
1123	  nop
1124
1125	sethi	%hi(panicstr), %g1
1126	ldx	[%g1 + %lo(panicstr)], %g1
1127	tst	%g1
1128	bnz,pt	%icc, 1f
1129	  nop
1130
1131	save	%sp, -SA(MINFRAME), %sp
1132	sethi	%hi(sfmmu_panic1), %o0
1133	call	panic
1134	  or	%o0, %lo(sfmmu_panic1), %o0
11351:
1136#endif /* DEBUG */
1137	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1138
1139#ifndef sun4v
1140	sethi	%hi(ktsb_phys), %o4
1141	mov	ASI_N, %o3
1142	ld	[%o4 + %lo(ktsb_phys)], %o4
1143	movrnz	%o4, ASI_MEM, %o3
1144	mov	%o3, %asi
1145#endif
1146	mov	%o0, %g1			! %g1 = vaddr
1147
1148	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1149	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1150	/* %g2 = tsbep, %g1 clobbered */
1151
1152	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1153	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1154	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1155
1156	wrpr	%g0, %o5, %pstate		! enable interrupts
1157	retl
1158	  membar #StoreStore|#StoreLoad
1159	SET_SIZE(sfmmu_kpm_load_tsb)
1160
1161	/*
1162	 * Routine that shoots down a TTE in the kpm TSB or in the
1163	 * kernel TSB depending on virtpg. Locking is required since
1164	 * kpm/kernel TSB is shared among all CPUs.
1165	 */
1166	ENTRY_NP(sfmmu_kpm_unload_tsb)
1167	/*
1168	 * %o0 = vaddr
1169	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1170	 */
1171#ifndef sun4v
1172	sethi	%hi(ktsb_phys), %o4
1173	mov	ASI_N, %o3
1174	ld	[%o4 + %lo(ktsb_phys)], %o4
1175	movrnz	%o4, ASI_MEM, %o3
1176	mov	%o3, %asi
1177#endif
1178	mov	%o0, %g1			! %g1 = vaddr
1179
1180	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1181	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1182	/* %g2 = tsbep, %g1 clobbered */
1183
1184	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1185	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1186	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1187
1188	retl
1189	  membar	#StoreStore|#StoreLoad
1190	SET_SIZE(sfmmu_kpm_unload_tsb)
1191
1192#endif /* lint */
1193
1194
1195#if defined (lint)
1196
1197/*ARGSUSED*/
1198pfn_t
1199sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1200{ return(0); }
1201
1202#else /* lint */
1203
1204	ENTRY_NP(sfmmu_ttetopfn)
1205	ldx	[%o0], %g1			/* read tte */
1206	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1207	/*
1208	 * g1 = pfn
1209	 */
1210	retl
1211	mov	%g1, %o0
1212	SET_SIZE(sfmmu_ttetopfn)
1213
1214#endif /* !lint */
1215
1216
1217#if defined (lint)
1218/*
1219 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1220 * the hash list.
1221 */
1222/* ARGSUSED */
1223void
1224sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1225	uint64_t hblkpa)
1226{
1227}
1228
1229/*
1230 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1231 * hash list.
1232 */
1233/* ARGSUSED */
1234void
1235sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1236	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1237{
1238}
1239#else /* lint */
1240
1241/*
1242 * Functions to grab/release hme bucket list lock.  I only use a byte
1243 * instead of the whole int because eventually we might want to
1244 * put some counters on the other bytes (of course, these routines would
1245 * have to change).  The code that grab this lock should execute
1246 * with interrupts disabled and hold the lock for the least amount of time
1247 * possible.
1248 */
1249
1250/*
1251 * Even though hmeh_listlock is updated using pa there's no need to flush
1252 * dcache since hmeh_listlock will be restored to the original value (0)
1253 * before interrupts are reenabled.
1254 */
1255
1256/*
1257 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1258 * routines still use virtual addresses to update the bucket fields. But they
1259 * must not cause a TLB miss after grabbing the low level bucket lock. To
1260 * achieve this we must make sure the bucket structure is completely within an
1261 * 8K page.
1262 */
1263
1264#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1265#error - the size of hmehash_bucket structure is not power of 2
1266#endif
1267
1268#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1269	mov     0xff, tmp2                                      ;\
1270	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1271label1:                                                         ;\
1272	casa    [tmp1]asi, %g0, tmp2                            ;\
1273	brnz,pn tmp2, label1                                    ;\
1274	mov     0xff, tmp2                                      ;\
1275	membar  #LoadLoad
1276
1277#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1278	membar  #LoadStore|#StoreStore                          ;\
1279	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1280	sta     %g0, [tmp1]asi
1281
1282	.seg	".data"
1283hblk_add_panic1:
1284	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1285	.byte	0
1286hblk_add_panic2:
1287	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1288	.byte	0
1289	.align	4
1290	.seg	".text"
1291
1292	ENTRY_NP(sfmmu_hblk_hash_add)
1293	/*
1294	 * %o0 = hmebp
1295	 * %o1 = hmeblkp
1296	 * %o2 = hblkpa
1297	 */
1298	rdpr	%pstate, %o5
1299#ifdef DEBUG
1300	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1301	bnz,pt %icc, 3f				/* disabled, panic	 */
1302	  nop
1303	save	%sp, -SA(MINFRAME), %sp
1304	sethi	%hi(hblk_add_panic1), %o0
1305	call	panic
1306	 or	%o0, %lo(hblk_add_panic1), %o0
1307	ret
1308	restore
1309
13103:
1311#endif /* DEBUG */
1312	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1313	mov	%o2, %g1
1314
1315	/*
1316	 * g1 = hblkpa
1317	 */
1318	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1319	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1320#ifdef	DEBUG
1321	cmp	%o4, %g0
1322	bne,pt %xcc, 1f
1323	 nop
1324	brz,pt %g2, 1f
1325	 nop
1326	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1327	save	%sp, -SA(MINFRAME), %sp
1328	sethi	%hi(hblk_add_panic2), %o0
1329	call	panic
1330	  or	%o0, %lo(hblk_add_panic2), %o0
1331	ret
1332	restore
13331:
1334#endif /* DEBUG */
1335	/*
1336	 * We update hmeblks entries before grabbing lock because the stores
1337	 * could take a tlb miss and require the hash lock.  The buckets
1338	 * are part of the nucleus so we are cool with those stores.
1339	 *
1340	 * if buckets are not part of the nucleus our game is to
1341	 * not touch any other page via va until we drop the lock.
1342	 * This guarantees we won't get a tlb miss before the lock release
1343	 * since interrupts are disabled.
1344	 */
1345	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1346	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1347	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1348	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1349	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1350	HMELOCK_EXIT(%o0, %g2, ASI_N)
1351	retl
1352	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1353	SET_SIZE(sfmmu_hblk_hash_add)
1354
1355	ENTRY_NP(sfmmu_hblk_hash_rm)
1356	/*
1357	 * This function removes an hmeblk from the hash chain.
1358	 * It is written to guarantee we don't take a tlb miss
1359	 * by using physical addresses to update the list.
1360	 *
1361	 * %o0 = hmebp
1362	 * %o1 = hmeblkp
1363	 * %o2 = hmeblkp previous pa
1364	 * %o3 = hmeblkp previous
1365	 */
1366
1367	mov	%o3, %o4			/* o4 = hmeblkp previous */
1368
1369	rdpr	%pstate, %o5
1370#ifdef DEBUG
1371	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1372	bnz,pt 	%icc, 3f			/* disabled, panic	 */
1373	  nop
1374
1375	sethi	%hi(panicstr), %g1
1376	ldx	[%g1 + %lo(panicstr)], %g1
1377	tst	%g1
1378	bnz,pt	%icc, 3f
1379	  nop
1380
1381	sethi	%hi(sfmmu_panic1), %o0
1382	call	panic
1383	 or	%o0, %lo(sfmmu_panic1), %o0
13843:
1385#endif /* DEBUG */
1386	/*
1387	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1388	 */
1389	andn    %o5, PSTATE_IE, %g1
1390	wrpr    %g1, 0, %pstate
1391
1392#ifndef sun4v
1393	sethi   %hi(dcache_line_mask), %g4
1394	ld      [%g4 + %lo(dcache_line_mask)], %g4
1395#endif /* sun4v */
1396
1397	/*
1398	 * if buckets are not part of the nucleus our game is to
1399	 * not touch any other page via va until we drop the lock.
1400	 * This guarantees we won't get a tlb miss before the lock release
1401	 * since interrupts are disabled.
1402	 */
1403	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1404	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1405	cmp	%g2, %o1
1406	bne,pt	%ncc,1f
1407	 mov	ASI_MEM, %asi
1408	/*
1409	 * hmeblk is first on list
1410	 */
1411	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1412	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1413	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1414	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1415	ba,pt	%xcc, 2f
1416	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
14171:
1418	/* hmeblk is not first on list */
1419
1420	mov	%o2, %g3
1421#ifndef sun4v
1422	GET_CPU_IMPL(%g2)
1423	cmp %g2, CHEETAH_IMPL
1424	bge %icc, hblk_hash_rm_1
1425	and	%o4, %g4, %g2
1426	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1427	add	%o4, HMEBLK_NEXT, %o4
1428	and	%o4, %g4, %g2
1429	ba	hblk_hash_rm_2
1430	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1431hblk_hash_rm_1:
1432
1433	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1434	membar	#Sync
1435	add     %g3, HMEBLK_NEXT, %g2
1436	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1437hblk_hash_rm_2:
1438	membar	#Sync
1439#endif /* sun4v */
1440	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1441	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1442	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1443	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1444	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
14452:
1446	HMELOCK_EXIT(%o0, %g2, ASI_N)
1447	retl
1448	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1449	SET_SIZE(sfmmu_hblk_hash_rm)
1450
1451#endif /* lint */
1452
1453/*
1454 * These macros are used to update global sfmmu hme hash statistics
1455 * in perf critical paths. It is only enabled in debug kernels or
1456 * if SFMMU_STAT_GATHER is defined
1457 */
1458#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1459#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1460	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1461	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1462	cmp	tmp1, hatid						;\
1463	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1464	set	sfmmu_global_stat, tmp1					;\
1465	add	tmp1, tmp2, tmp1					;\
1466	ld	[tmp1], tmp2						;\
1467	inc	tmp2							;\
1468	st	tmp2, [tmp1]
1469
1470#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1471	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1472	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1473	cmp	tmp1, hatid						;\
1474	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1475	set	sfmmu_global_stat, tmp1					;\
1476	add	tmp1, tmp2, tmp1					;\
1477	ld	[tmp1], tmp2						;\
1478	inc	tmp2							;\
1479	st	tmp2, [tmp1]
1480
1481
1482#else /* DEBUG || SFMMU_STAT_GATHER */
1483
1484#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1485
1486#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1487
1488#endif  /* DEBUG || SFMMU_STAT_GATHER */
1489
1490/*
1491 * This macro is used to update global sfmmu kstas in non
1492 * perf critical areas so they are enabled all the time
1493 */
1494#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1495	sethi	%hi(sfmmu_global_stat), tmp1				;\
1496	add	tmp1, statname, tmp1					;\
1497	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1498	inc	tmp2							;\
1499	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1500
1501/*
1502 * These macros are used to update per cpu stats in non perf
1503 * critical areas so they are enabled all the time
1504 */
1505#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1506	ld	[tsbarea + stat], tmp1					;\
1507	inc	tmp1							;\
1508	st	tmp1, [tsbarea + stat]
1509
1510/*
1511 * These macros are used to update per cpu stats in non perf
1512 * critical areas so they are enabled all the time
1513 */
1514#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1515	lduh	[tsbarea + stat], tmp1					;\
1516	inc	tmp1							;\
1517	stuh	tmp1, [tsbarea + stat]
1518
1519#if defined(KPM_TLBMISS_STATS_GATHER)
1520	/*
1521	 * Count kpm dtlb misses separately to allow a different
1522	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1523	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1524	 */
1525#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1526	brgez	tagacc, label	/* KPM VA? */				;\
1527	nop								;\
1528	CPU_INDEX(tmp1, tsbma)						;\
1529	sethi	%hi(kpmtsbm_area), tsbma				;\
1530	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1531	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1532	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1533	/* VA range check */						;\
1534	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1535	cmp	tagacc, val						;\
1536	blu,pn	%xcc, label						;\
1537	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1538	cmp	tagacc, tmp1						;\
1539	bgeu,pn	%xcc, label						;\
1540	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1541	inc	val							;\
1542	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1543label:
1544#else
1545#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1546#endif	/* KPM_TLBMISS_STATS_GATHER */
1547
1548#if defined (lint)
1549/*
1550 * The following routines are jumped to from the mmu trap handlers to do
1551 * the setting up to call systrap.  They are separate routines instead of
1552 * being part of the handlers because the handlers would exceed 32
1553 * instructions and since this is part of the slow path the jump
1554 * cost is irrelevant.
1555 */
1556void
1557sfmmu_pagefault(void)
1558{
1559}
1560
1561void
1562sfmmu_mmu_trap(void)
1563{
1564}
1565
1566void
1567sfmmu_window_trap(void)
1568{
1569}
1570
1571void
1572sfmmu_kpm_exception(void)
1573{
1574}
1575
1576#else /* lint */
1577
1578#ifdef	PTL1_PANIC_DEBUG
1579	.seg	".data"
1580	.global	test_ptl1_panic
1581test_ptl1_panic:
1582	.word	0
1583	.align	8
1584
1585	.seg	".text"
1586	.align	4
1587#endif	/* PTL1_PANIC_DEBUG */
1588
1589
1590	ENTRY_NP(sfmmu_pagefault)
1591	SET_GL_REG(1)
1592	USE_ALTERNATE_GLOBALS(%g5)
1593	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1594	rdpr	%tt, %g6
1595	cmp	%g6, FAST_IMMU_MISS_TT
1596	be,a,pn	%icc, 1f
1597	  mov	T_INSTR_MMU_MISS, %g3
1598	cmp	%g6, T_INSTR_MMU_MISS
1599	be,a,pn	%icc, 1f
1600	  mov	T_INSTR_MMU_MISS, %g3
1601	mov	%g5, %g2
1602	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1603	cmp	%g6, FAST_DMMU_MISS_TT
1604	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1605	cmp	%g6, T_DATA_MMU_MISS
1606	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1607
1608#ifdef  PTL1_PANIC_DEBUG
1609	/* check if we want to test the tl1 panic */
1610	sethi	%hi(test_ptl1_panic), %g4
1611	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1612	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1613	cmp	%g1, %g0
1614	bne,a,pn %icc, ptl1_panic
1615	  or	%g0, PTL1_BAD_DEBUG, %g1
1616#endif	/* PTL1_PANIC_DEBUG */
16171:
1618	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1619	/*
1620	 * g2 = tag access reg
1621	 * g3.l = type
1622	 * g3.h = 0
1623	 */
1624	sethi	%hi(trap), %g1
1625	or	%g1, %lo(trap), %g1
16262:
1627	ba,pt	%xcc, sys_trap
1628	  mov	-1, %g4
1629	SET_SIZE(sfmmu_pagefault)
1630
1631	ENTRY_NP(sfmmu_mmu_trap)
1632	SET_GL_REG(1)
1633	USE_ALTERNATE_GLOBALS(%g5)
1634	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1635	rdpr	%tt, %g6
1636	cmp	%g6, FAST_IMMU_MISS_TT
1637	be,a,pn	%icc, 1f
1638	  mov	T_INSTR_MMU_MISS, %g3
1639	cmp	%g6, T_INSTR_MMU_MISS
1640	be,a,pn	%icc, 1f
1641	  mov	T_INSTR_MMU_MISS, %g3
1642	mov	%g5, %g2
1643	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1644	cmp	%g6, FAST_DMMU_MISS_TT
1645	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1646	cmp	%g6, T_DATA_MMU_MISS
1647	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
16481:
1649	/*
1650	 * g2 = tag access reg
1651	 * g3 = type
1652	 */
1653	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1654	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1655	ba,pt	%xcc, sys_trap
1656	  mov	-1, %g4
1657	/*NOTREACHED*/
1658	SET_SIZE(sfmmu_mmu_trap)
1659
1660	ENTRY_NP(sfmmu_suspend_tl)
1661	SET_GL_REG(1)
1662	USE_ALTERNATE_GLOBALS(%g5)
1663	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1664	rdpr	%tt, %g6
1665	cmp	%g6, FAST_IMMU_MISS_TT
1666	be,a,pn	%icc, 1f
1667	  mov	T_INSTR_MMU_MISS, %g3
1668	mov	%g5, %g2
1669	cmp	%g6, FAST_DMMU_MISS_TT
1670	move	%icc, T_DATA_MMU_MISS, %g3
1671	movne	%icc, T_DATA_PROT, %g3
16721:
1673	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1674	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1675	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1676	ba,pt	%xcc, sys_trap
1677	  mov	PIL_15, %g4
1678	/*NOTREACHED*/
1679	SET_SIZE(sfmmu_suspend_tl)
1680
1681	/*
1682	 * No %g registers in use at this point.
1683	 */
1684	ENTRY_NP(sfmmu_window_trap)
1685	rdpr	%tpc, %g1
1686#ifdef sun4v
1687#ifdef DEBUG
1688	/* We assume previous %gl was 1 */
1689	rdpr	%tstate, %g4
1690	srlx	%g4, TSTATE_GL_SHIFT, %g4
1691	and	%g4, TSTATE_GL_MASK, %g4
1692	cmp	%g4, 1
1693	bne,a,pn %icc, ptl1_panic
1694	  mov	PTL1_BAD_WTRAP, %g1
1695#endif /* DEBUG */
1696	/* user miss at tl>1. better be the window handler or user_rtt */
1697	/* in user_rtt? */
1698	set	rtt_fill_start, %g4
1699	cmp	%g1, %g4
1700	blu,pn %xcc, 6f
1701	 .empty
1702	set	rtt_fill_end, %g4
1703	cmp	%g1, %g4
1704	bgeu,pn %xcc, 6f
1705	 nop
1706	set	fault_rtt_fn1, %g1
1707	wrpr	%g0, %g1, %tnpc
1708	ba,a	7f
17096:
1710	! must save this trap level before descending trap stack
1711	! no need to save %tnpc, either overwritten or discarded
1712	! already got it: rdpr	%tpc, %g1
1713	rdpr	%tstate, %g6
1714	rdpr	%tt, %g7
1715	! trap level saved, go get underlying trap type
1716	rdpr	%tl, %g5
1717	sub	%g5, 1, %g3
1718	wrpr	%g3, %tl
1719	rdpr	%tt, %g2
1720	wrpr	%g5, %tl
1721	! restore saved trap level
1722	wrpr	%g1, %tpc
1723	wrpr	%g6, %tstate
1724	wrpr	%g7, %tt
1725#else /* sun4v */
1726	/* user miss at tl>1. better be the window handler */
1727	rdpr	%tl, %g5
1728	sub	%g5, 1, %g3
1729	wrpr	%g3, %tl
1730	rdpr	%tt, %g2
1731	wrpr	%g5, %tl
1732#endif /* sun4v */
1733	and	%g2, WTRAP_TTMASK, %g4
1734	cmp	%g4, WTRAP_TYPE
1735	bne,pn	%xcc, 1f
1736	 nop
1737	/* tpc should be in the trap table */
1738	set	trap_table, %g4
1739	cmp	%g1, %g4
1740	blt,pn %xcc, 1f
1741	 .empty
1742	set	etrap_table, %g4
1743	cmp	%g1, %g4
1744	bge,pn %xcc, 1f
1745	 .empty
1746	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1747	add	%g1, WTRAP_FAULTOFF, %g1
1748	wrpr	%g0, %g1, %tnpc
17497:
1750	/*
1751	 * some wbuf handlers will call systrap to resolve the fault
1752	 * we pass the trap type so they figure out the correct parameters.
1753	 * g5 = trap type, g6 = tag access reg
1754	 */
1755
1756	/*
1757	 * only use g5, g6, g7 registers after we have switched to alternate
1758	 * globals.
1759	 */
1760	SET_GL_REG(1)
1761	USE_ALTERNATE_GLOBALS(%g5)
1762	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1763	rdpr	%tt, %g7
1764	cmp	%g7, FAST_IMMU_MISS_TT
1765	be,a,pn	%icc, ptl1_panic
1766	  mov	PTL1_BAD_WTRAP, %g1
1767	cmp	%g7, T_INSTR_MMU_MISS
1768	be,a,pn	%icc, ptl1_panic
1769	  mov	PTL1_BAD_WTRAP, %g1
1770	mov	T_DATA_PROT, %g5
1771	cmp	%g7, FAST_DMMU_MISS_TT
1772	move	%icc, T_DATA_MMU_MISS, %g5
1773	cmp	%g7, T_DATA_MMU_MISS
1774	move	%icc, T_DATA_MMU_MISS, %g5
1775	! XXXQ AGS re-check out this one
1776	done
17771:
1778	CPU_ADDR(%g1, %g4)
1779	ld	[%g1 + CPU_TL1_HDLR], %g4
1780	brnz,a,pt %g4, sfmmu_mmu_trap
1781	  st	%g0, [%g1 + CPU_TL1_HDLR]
1782	ba,pt	%icc, ptl1_panic
1783	  mov	PTL1_BAD_TRAP, %g1
1784	SET_SIZE(sfmmu_window_trap)
1785
1786	ENTRY_NP(sfmmu_kpm_exception)
1787	/*
1788	 * We have accessed an unmapped segkpm address or a legal segkpm
1789	 * address which is involved in a VAC alias conflict prevention.
1790	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1791	 * set. If it is, we will instead note that a fault has occurred
1792	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1793	 * a "retry"). This will step over the faulting instruction.
1794	 * Note that this means that a legal segkpm address involved in
1795	 * a VAC alias conflict prevention (a rare case to begin with)
1796	 * cannot be used in DTrace.
1797	 */
1798	CPU_INDEX(%g1, %g2)
1799	set	cpu_core, %g2
1800	sllx	%g1, CPU_CORE_SHIFT, %g1
1801	add	%g1, %g2, %g1
1802	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1803	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1804	bz	0f
1805	or	%g2, CPU_DTRACE_BADADDR, %g2
1806	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1807	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1808	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1809	done
18100:
1811	TSTAT_CHECK_TL1(1f, %g1, %g2)
18121:
1813	SET_GL_REG(1)
1814	USE_ALTERNATE_GLOBALS(%g5)
1815	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1816	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1817	/*
1818	 * g2=tagacc g3.l=type g3.h=0
1819	 */
1820	sethi	%hi(trap), %g1
1821	or	%g1, %lo(trap), %g1
1822	ba,pt	%xcc, sys_trap
1823	mov	-1, %g4
1824	SET_SIZE(sfmmu_kpm_exception)
1825
1826#endif /* lint */
1827
1828#if defined (lint)
1829
1830void
1831sfmmu_tsb_miss(void)
1832{
1833}
1834
1835void
1836sfmmu_kpm_dtsb_miss(void)
1837{
1838}
1839
1840void
1841sfmmu_kpm_dtsb_miss_small(void)
1842{
1843}
1844
1845#else /* lint */
1846
1847
1848#if (CTX_SIZE != (1 << CTX_SZ_SHIFT))
1849#error - size of context struct does not match with CTX_SZ_SHIFT
1850#endif
1851
1852#if (IMAP_SEG != 0)
1853#error - ism_map->ism_seg offset is not zero
1854#endif
1855
1856/*
1857 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1858 * tlb miss and branches to label "ismhit". If this is not an ISM
1859 * process or an ISM tlb miss it falls thru.
1860 *
1861 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1862 * this process.
1863 * If so, it will branch to label "ismhit".  If not, it will fall through.
1864 *
1865 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1866 * so that any other threads of this process will not try and walk the ism
1867 * maps while they are being changed.
1868 *
1869 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
1870 *       will make sure of that. This means we can terminate our search on
1871 *       the first zero mapping we find.
1872 *
1873 * Parameters:
1874 * tagacc	= tag access register (vaddr + ctx) (in)
1875 * tsbmiss	= address of tsb miss area (in)
1876 * ismseg	= contents of ism_seg for this ism map (out)
1877 * ismhat	= physical address of imap_ismhat for this ism map (out)
1878 * tmp1		= scratch reg (CLOBBERED)
1879 * tmp2		= scratch reg (CLOBBERED)
1880 * tmp3		= scratch reg (CLOBBERED)
1881 * label:    temporary labels
1882 * ismhit:   label where to jump to if an ism dtlb miss
1883 * exitlabel:label where to jump if hat is busy due to hat_unshare.
1884 */
1885#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
1886	label, ismhit)							\
1887	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
1888	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
1889	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
1890label/**/1:								;\
1891	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
1892	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
1893label/**/2:								;\
1894	brz,pt  ismseg, label/**/3		/* no mapping */	;\
1895	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
1896	lduha	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
1897	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
1898	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
1899	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
1900	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
1901	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
1902	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
1903	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
1904	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
1905	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
1906									;\
1907	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
1908	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
1909	cmp	ismhat, tmp1						;\
1910	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
1911	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
1912									;\
1913	add	tmp3, IBLK_NEXTPA, tmp1					;\
1914	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
1915	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
1916	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
1917label/**/3:
1918
1919/*
1920 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
1921 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
1922 * Parameters:
1923 * vaddr = reg containing virtual address
1924 * hatid = reg containing sfmmu pointer
1925 * hmeshift = constant/register to shift vaddr to obtain vapg
1926 * hmebp = register where bucket pointer will be stored
1927 * vapg = register where virtual page will be stored
1928 * tmp1, tmp2 = tmp registers
1929 */
1930
1931
1932#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
1933	vapg, label, tmp1, tmp2)					\
1934	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
1935	brnz,a,pt tmp1, label/**/1					;\
1936	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
1937	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
1938	ba,pt	%xcc, label/**/2					;\
1939	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
1940label/**/1:								;\
1941	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
1942label/**/2:								;\
1943	srlx	tagacc, hmeshift, vapg					;\
1944	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
1945	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
1946	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
1947	add	hmebp, tmp1, hmebp
1948
1949/*
1950 * hashtag includes bspage + hashno (64 bits).
1951 */
1952
1953#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
1954	sllx	vapg, hmeshift, vapg					;\
1955	or	vapg, hashno, hblktag
1956
1957/*
1958 * Function to traverse hmeblk hash link list and find corresponding match.
1959 * The search is done using physical pointers. It returns the physical address
1960 * and virtual address pointers to the hmeblk that matches with the tag
1961 * provided.
1962 * Parameters:
1963 * hmebp	= register that points to hme hash bucket, also used as
1964 *		  tmp reg (clobbered)
1965 * hmeblktag	= register with hmeblk tag match
1966 * hatid	= register with hatid
1967 * hmeblkpa	= register where physical ptr will be stored
1968 * hmeblkva	= register where virtual ptr will be stored
1969 * tmp1		= tmp reg
1970 * label: temporary label
1971 */
1972
1973#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
1974	tsbarea, tmp1, label)					 	\
1975	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
1976	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
1977	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
1978	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
1979	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1980label/**/1:								;\
1981	brz,pn	hmeblkva, label/**/2					;\
1982	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
1983	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
1984	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
1985	add	hmebp, CLONGSIZE, hmebp					;\
1986	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
1987	xor	tmp1, hmeblktag, tmp1					;\
1988	xor	hmebp, hatid, hmebp					;\
1989	or	hmebp, tmp1, hmebp					;\
1990	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
1991	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
1992	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
1993	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
1994	ba,pt	%xcc, label/**/1					;\
1995	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
1996label/**/2:
1997
1998
1999#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2000#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2001#endif
2002
2003/*
2004 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2005 * he offset for the corresponding hment.
2006 * Parameters:
2007 * vaddr = register with virtual address
2008 * hmeblkpa = physical pointer to hme_blk
2009 * hment = register where address of hment will be stored
2010 * hmentoff = register where hment offset will be stored
2011 * label1 = temporary label
2012 */
2013#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, tmp1, label1)	\
2014	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2015	lda	[hmentoff]ASI_MEM, tmp1 				;\
2016	andcc	tmp1, HBLK_SZMASK, %g0	 /* tmp1 = get_hblk_sz(%g5) */	;\
2017	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2018	  or	%g0, HMEBLK_HME1, hmentoff				;\
2019	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2020	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2021	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2022	add	tmp1, HMEBLK_HME1, hmentoff				;\
2023label1:
2024
2025/*
2026 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2027 *
2028 * tagacc	= tag access register (vaddr + ctx) (in)
2029 * hatid	= sfmmu pointer for TSB miss (in)
2030 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2031 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2032 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
2033 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2034 * hmentoff	= temporarily stores hment offset (clobbered)
2035 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2036 *		  for this page size.
2037 * hashno	= constant/register hash number
2038 * label	= temporary label for branching within macro.
2039 * foundlabel	= label to jump to when tte is found.
2040 * suspendlabel= label to jump to when tte is suspended.
2041 * exitlabel	= label to jump to when tte is not found.  The hmebp lock
2042 *		  is still held at this time.
2043 *
2044 * The caller should set up the tsbmiss->scratch[2] field correctly before
2045 * calling this funciton  (aka TSBMISS_SCRATCH + TSBMISS_HATID)
2046 */
2047#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmentoff, \
2048		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
2049									;\
2050	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2051	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2052	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2053		hmeblkpa, label/**/5, hmentoff, hmeblkva)		;\
2054									;\
2055	/*								;\
2056	 * tagacc = tagacc						;\
2057	 * hatid = hatid						;\
2058	 * tsbarea = tsbarea						;\
2059	 * tte   = hmebp (hme bucket pointer)				;\
2060	 * hmeblkpa  = vapg  (virtual page)				;\
2061	 * hmentoff, hmeblkva = scratch					;\
2062	 */								;\
2063	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmentoff)	;\
2064									;\
2065	/*								;\
2066	 * tagacc = tagacc						;\
2067	 * hatid = hatid						;\
2068	 * tte   = hmebp						;\
2069	 * hmeblkpa  = CLOBBERED					;\
2070	 * hmentoff  = htag_bspage & hashno				;\
2071	 * hmeblkva  = scratch						;\
2072	 */								;\
2073	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2074	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2075	HMEHASH_SEARCH(tte, hmentoff, hatid, hmeblkpa, hmeblkva, 	\
2076		tsbarea, tagacc, label/**/1)				;\
2077	/*								;\
2078	 * tagacc = CLOBBERED						;\
2079	 * tte = CLOBBERED						;\
2080	 * hmeblkpa = hmeblkpa						;\
2081	 * hmeblkva = hmeblkva						;\
2082	 */								;\
2083	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2084	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2085	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2086	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2087	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2088	  nop								;\
2089label/**/4:								;\
2090	/*								;\
2091	 * We have found the hmeblk containing the hment.		;\
2092	 * Now we calculate the corresponding tte.			;\
2093	 *								;\
2094	 * tagacc = tagacc						;\
2095	 * hatid = clobbered						;\
2096	 * tte   = hmebp						;\
2097	 * hmeblkpa  = hmeblkpa						;\
2098	 * hmentoff  = hblktag						;\
2099	 * hmeblkva  = hmeblkva 					;\
2100	 */								;\
2101	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hmentoff, hatid, label/**/2)	;\
2102									;\
2103	add	hmentoff, SFHME_TTE, hmentoff				;\
2104	add	hmeblkpa, hmentoff, hmeblkpa				;\
2105	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2106	add	hmeblkva, hmentoff, hmeblkva				;\
2107	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2108	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmentoff ;\
2109	HMELOCK_EXIT(hmentoff, hmentoff, ASI_MEM)	/* drop lock */	;\
2110	set	TTE_SUSPEND, hmentoff					;\
2111	TTE_SUSPEND_INT_SHIFT(hmentoff)					;\
2112	btst	tte, hmentoff						;\
2113	bz,pt	%xcc, foundlabel					;\
2114	 nop								;\
2115									;\
2116	/*								;\
2117	 * Mapping is suspended, so goto suspend label.			;\
2118	 */								;\
2119	ba,pt	%xcc, suspendlabel					;\
2120	  nop
2121
2122	/*
2123	 * KERNEL PROTECTION HANDLER
2124	 *
2125	 * g1 = tsb8k pointer register (clobbered)
2126	 * g2 = tag access register (ro)
2127	 * g3 - g7 = scratch registers
2128	 *
2129	 * Note: This function is patched at runtime for performance reasons.
2130	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2131	 */
2132	ENTRY_NP(sfmmu_kprot_trap)
2133	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2134sfmmu_kprot_patch_ktsb_base:
2135	RUNTIME_PATCH_SETX(%g1, %g6)
2136	/* %g1 = contents of ktsb_base or ktsb_pbase */
2137sfmmu_kprot_patch_ktsb_szcode:
2138	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2139
2140	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2141	! %g1 = First TSB entry pointer, as TSB miss handler expects
2142
2143	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2144sfmmu_kprot_patch_ktsb4m_base:
2145	RUNTIME_PATCH_SETX(%g3, %g6)
2146	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2147sfmmu_kprot_patch_ktsb4m_szcode:
2148	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2149
2150	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2151	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2152
2153	CPU_TSBMISS_AREA(%g6, %g7)
2154	HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2155	ba,pt	%xcc, sfmmu_tsb_miss_tt
2156	  nop
2157
2158	/*
2159	 * USER PROTECTION HANDLER
2160	 *
2161	 * g1 = tsb8k pointer register (ro)
2162	 * g2 = tag access register (ro)
2163	 * g3 = faulting context (clobbered, currently not used)
2164	 * g4 - g7 = scratch registers
2165	 */
2166	ALTENTRY(sfmmu_uprot_trap)
2167#ifdef sun4v
2168	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2169	/* %g1 = first TSB entry ptr now, %g2 preserved */
2170
2171	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2172	brlz,pt %g3, 9f			/* check for 2nd TSB */
2173	  mov	%g0, %g3		/* clear second tsbe ptr */
2174
2175	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2176	/* %g3 = second TSB entry ptr now, %g2 preserved */
2177
2178#else /* sun4v */
2179
2180	brgez,pt %g1, 9f		/* check for 2nd TSB */
2181	  mov	%g0, %g3		/* clear second tsbe ptr */
2182
2183	mov	%g2, %g7
2184	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2185	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2186	mov	%g1, %g7
2187	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2188
2189#endif /* sun4v */
21909:
2191	CPU_TSBMISS_AREA(%g6, %g7)
2192	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2193	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2194	  nop
2195
2196	/*
2197	 * Kernel 8K page iTLB miss.  We also get here if we took a
2198	 * fast instruction access mmu miss trap while running in
2199	 * invalid context.
2200	 *
2201	 * %g1 = 8K TSB pointer register (not used, clobbered)
2202	 * %g2 = tag access register (used)
2203	 * %g3 = faulting context id (used)
2204	 * %g7 = 4M virtual page number for tag matching  (used)
2205	 */
2206	.align	64
2207	ALTENTRY(sfmmu_kitlb_miss)
2208	brnz,pn %g3, tsb_tl0_noctxt
2209	  nop
2210
2211	/* kernel miss */
2212	/* get kernel tsb pointer */
2213	/* we patch the next set of instructions at run time */
2214	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2215iktsbbase:
2216	RUNTIME_PATCH_SETX(%g4, %g5)
2217	/* %g4 = contents of ktsb_base or ktsb_pbase */
2218
2219iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2220	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2221	or	%g4, %g1, %g1			! form tsb ptr
2222	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2223	cmp	%g4, %g7
2224	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2225	  andcc %g5, TTE_EXECPRM_INT, %g0	! check exec bit
2226	bz,pn	%icc, exec_fault
2227	  nop
2228	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2229	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2230	retry
2231
2232	/*
2233	 * Kernel dTLB miss.  We also get here if we took a fast data
2234	 * access mmu miss trap while running in invalid context.
2235	 *
2236	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2237	 *	We select the TSB miss handler to branch to depending on
2238	 *	the virtual address of the access.  In the future it may
2239	 *	be desirable to separate kpm TTEs into their own TSB,
2240	 *	in which case all that needs to be done is to set
2241	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2242	 *	early in the miss if we detect a kpm VA to a new handler.
2243	 *
2244	 * %g1 = 8K TSB pointer register (not used, clobbered)
2245	 * %g2 = tag access register (used)
2246	 * %g3 = faulting context id (used)
2247	 */
2248	.align	64
2249	ALTENTRY(sfmmu_kdtlb_miss)
2250	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2251	  nop
2252
2253	/* Gather some stats for kpm misses in the TLB. */
2254	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2255	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2256
2257	/*
2258	 * Get first TSB offset and look for 8K/64K/512K mapping
2259	 * using the 8K virtual page as the index.
2260	 *
2261	 * We patch the next set of instructions at run time;
2262	 * any changes here require sfmmu_patch_ktsb changes too.
2263	 */
2264dktsbbase:
2265	RUNTIME_PATCH_SETX(%g7, %g6)
2266	/* %g7 = contents of ktsb_base or ktsb_pbase */
2267
2268dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2269	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2270
2271	/*
2272	 * At this point %g1 is our index into the TSB.
2273	 * We just masked off enough bits of the VA depending
2274	 * on our TSB size code.
2275	 */
2276	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2277	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2278	cmp	%g6, %g4			! compare tag
2279	bne,pn	%xcc, dktsb4m_kpmcheck_small
2280	  add	%g7, %g1, %g1			/* form tsb ptr */
2281	TT_TRACE(trace_tsbhit)
2282	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2283	/* trapstat expects tte in %g5 */
2284	retry
2285
2286	/*
2287	 * If kpm is using large pages, the following instruction needs
2288	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2289	 * so that we will probe the 4M TSB regardless of the VA.  In
2290	 * the case kpm is using small pages, we know no large kernel
2291	 * mappings are located above 0x80000000.00000000 so we skip the
2292	 * probe as an optimization.
2293	 */
2294dktsb4m_kpmcheck_small:
2295	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2296	  /* delay slot safe, below */
2297
2298	/*
2299	 * Get second TSB offset and look for 4M mapping
2300	 * using 4M virtual page as the TSB index.
2301	 *
2302	 * Here:
2303	 * %g1 = 8K TSB pointer.  Don't squash it.
2304	 * %g2 = tag access register (we still need it)
2305	 */
2306	srlx	%g2, MMU_PAGESHIFT4M, %g3
2307
2308	/*
2309	 * We patch the next set of instructions at run time;
2310	 * any changes here require sfmmu_patch_ktsb changes too.
2311	 */
2312dktsb4mbase:
2313	RUNTIME_PATCH_SETX(%g7, %g6)
2314	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2315dktsb4m:
2316	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2317	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2318
2319	/*
2320	 * At this point %g3 is our index into the TSB.
2321	 * We just masked off enough bits of the VA depending
2322	 * on our TSB size code.
2323	 */
2324	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2325	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2326	cmp	%g6, %g4			! compare tag
2327
2328dktsb4m_tsbmiss:
2329	bne,pn	%xcc, dktsb4m_kpmcheck
2330	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2331	TT_TRACE(trace_tsbhit)
2332	/* we don't check TTE size here since we assume 4M TSB is separate */
2333	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2334	/* trapstat expects tte in %g5 */
2335	retry
2336
2337	/*
2338	 * So, we failed to find a valid TTE to match the faulting
2339	 * address in either TSB.  There are a few cases that could land
2340	 * us here:
2341	 *
2342	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2343	 *    to sfmmu_tsb_miss_tt to handle the miss.
2344	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2345	 *    4M TSB.  Let segkpm handle it.
2346	 *
2347	 * Note that we shouldn't land here in the case of a kpm VA when
2348	 * kpm_smallpages is active -- we handled that case earlier at
2349	 * dktsb4m_kpmcheck_small.
2350	 *
2351	 * At this point:
2352	 *  g1 = 8K-indexed primary TSB pointer
2353	 *  g2 = tag access register
2354	 *  g3 = 4M-indexed secondary TSB pointer
2355	 */
2356dktsb4m_kpmcheck:
2357	cmp	%g2, %g0
2358	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2359	  nop
2360	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2361	  nop
2362
2363#ifdef sun4v
2364	/*
2365	 * User instruction miss w/ single TSB.
2366	 * The first probe covers 8K, 64K, and 512K page sizes,
2367	 * because 64K and 512K mappings are replicated off 8K
2368	 * pointer.
2369	 *
2370	 * g1 = tsb8k pointer register
2371	 * g2 = tag access register
2372	 * g3 - g6 = scratch registers
2373	 * g7 = TSB tag to match
2374	 */
2375	.align	64
2376	ALTENTRY(sfmmu_uitlb_fastpath)
2377
2378	SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
2379	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2380	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2381	ba,pn	%xcc, sfmmu_tsb_miss_tt
2382	  mov	%g0, %g3
2383
2384	/*
2385	 * User data miss w/ single TSB.
2386	 * The first probe covers 8K, 64K, and 512K page sizes,
2387	 * because 64K and 512K mappings are replicated off 8K
2388	 * pointer.
2389	 *
2390	 * g1 = tsb8k pointer register
2391	 * g2 = tag access register
2392	 * g3 - g6 = scratch registers
2393	 * g7 = TSB tag to match
2394	 */
2395	.align 64
2396	ALTENTRY(sfmmu_udtlb_fastpath)
2397
2398	SETUP_UTSB_ATOMIC_ASI(%g4, %g6)
2399	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2400	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2401	ba,pn	%xcc, sfmmu_tsb_miss_tt
2402	  mov	%g0, %g3
2403
2404#endif /* sun4v */
2405
2406	/*
2407	 * User instruction miss w/ multiple TSBs.
2408	 * The first probe covers 8K, 64K, and 512K page sizes,
2409	 * because 64K and 512K mappings are replicated off 8K
2410	 * pointer.  Second probe covers 4M page size only.
2411	 *
2412	 * Just like sfmmu_udtlb_slowpath, except:
2413	 *   o Uses ASI_ITLB_IN
2414	 *   o checks for execute permission
2415	 *   o No ISM prediction.
2416	 *
2417	 * g1 = tsb8k pointer register
2418	 * g2 = tag access register
2419	 * g3 - g6 = scratch registers
2420	 * g7 = TSB tag to match
2421	 */
2422	.align	64
2423	ALTENTRY(sfmmu_uitlb_slowpath)
2424
2425#ifdef sun4v
2426	SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
2427	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2428
2429	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2430	/* g4 - g5 = clobbered here */
2431
2432	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2433	/* g1 = first TSB pointer, g3 = second TSB pointer */
2434	srlx	%g2, TAG_VALO_SHIFT, %g7
2435	PROBE_2ND_ITSB(%g3, %g7)
2436	/* NOT REACHED */
2437#else /* sun4v */
2438	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2439	SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
2440	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2441
2442	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2443	/* g4 - g5 = clobbered here */
2444
2445	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2446	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2447	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2448	/* g1 = first TSB pointer, g3 = second TSB pointer */
2449	srlx	%g2, TAG_VALO_SHIFT, %g7
2450	PROBE_2ND_ITSB(%g3, %g7, isynth)
2451	/* NOT REACHED */
2452#endif /* sun4v */
2453
2454	/*
2455	 * User data miss w/ multiple TSBs.
2456	 * The first probe covers 8K, 64K, and 512K page sizes,
2457	 * because 64K and 512K mappings are replicated off 8K
2458	 * pointer.  Second probe covers 4M page size only.
2459	 *
2460	 * We consider probing for 4M pages first if the VA falls
2461	 * in a range that's likely to be ISM.
2462	 *
2463	 * g1 = tsb8k pointer register
2464	 * g2 = tag access register
2465	 * g3 - g6 = scratch registers
2466	 * g7 = TSB tag to match
2467	 */
2468	.align 64
2469	ALTENTRY(sfmmu_udtlb_slowpath)
2470
2471	SETUP_UTSB_ATOMIC_ASI(%g4, %g6)
2472
2473	/*
2474	 * Check for ISM.  If it exists, look for 4M mappings in the second TSB
2475	 * first, then probe for other mappings in the first TSB if that fails.
2476	 */
2477	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2478	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2479	  mov	%g1, %g3
2480
2481udtlb_miss_probefirst:
2482	/*
2483	 * g1 = 8K TSB pointer register
2484	 * g2 = tag access register
2485	 * g3 = (potentially) second TSB entry ptr
2486	 * g6 = ism pred.
2487	 * g7 = vpg_4m
2488	 */
2489#ifdef sun4v
2490	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2491	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2492
2493	/*
2494	 * Here:
2495	 *   g1 = first TSB pointer
2496	 *   g2 = tag access reg
2497	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2498	 */
2499	brgz,pn	%g6, sfmmu_tsb_miss_tt
2500	  nop
2501#else /* sun4v */
2502	mov	%g1, %g4
2503	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2504	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2505
2506	/*
2507	 * Here:
2508	 *   g1 = first TSB pointer
2509	 *   g2 = tag access reg
2510	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2511	 */
2512	brgz,pn	%g6, sfmmu_tsb_miss_tt
2513	  nop
2514	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2515	/* fall through in 8K->4M probe order */
2516#endif /* sun4v */
2517
2518udtlb_miss_probesecond:
2519	/*
2520	 * Look in the second TSB for the TTE
2521	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2522	 * g2 = tag access reg
2523	 * g3 = 8K TSB pointer register
2524	 * g6 = ism pred.
2525	 * g7 = vpg_4m
2526	 */
2527#ifdef sun4v
2528	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2529	/* tagacc (%g2) not destroyed */
2530	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2531	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2532#else
2533	mov	%g3, %g7
2534	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2535	/* %g2 clobbered, %g3 =second tsbe ptr */
2536	mov	MMU_TAG_ACCESS, %g2
2537	ldxa	[%g2]ASI_DMMU, %g2
2538#endif
2539
2540	srlx	%g2, TAG_VALO_SHIFT, %g7
2541	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2542	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
2543	brgz,pn	%g6, udtlb_miss_probefirst
2544	  nop
2545
2546	/* fall through to sfmmu_tsb_miss_tt */
2547
2548	ALTENTRY(sfmmu_tsb_miss_tt)
2549	TT_TRACE(trace_tsbmiss)
2550	/*
2551	 * We get here if there is a TSB miss OR a write protect trap.
2552	 *
2553	 * g1 = First TSB entry pointer
2554	 * g2 = tag access register
2555	 * g3 = 4M TSB entry pointer; NULL if no 2nd TSB
2556	 * g4 - g7 = scratch registers
2557	 */
2558
2559	ALTENTRY(sfmmu_tsb_miss)
2560
2561	/*
2562	 * If trapstat is running, we need to shift the %tpc and %tnpc to
2563	 * point to trapstat's TSB miss return code (note that trapstat
2564	 * itself will patch the correct offset to add).
2565	 */
2566	rdpr	%tl, %g7
2567	cmp	%g7, 1
2568	ble,pt	%xcc, 0f
2569	  sethi	%hi(KERNELBASE), %g6
2570	rdpr	%tpc, %g7
2571	or	%g6, %lo(KERNELBASE), %g6
2572	cmp	%g7, %g6
2573	bgeu,pt	%xcc, 0f
2574	/* delay slot safe */
2575
2576	ALTENTRY(tsbmiss_trapstat_patch_point)
2577	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
2578	wrpr	%g7, %tpc
2579	add	%g7, 4, %g7
2580	wrpr	%g7, %tnpc
25810:
2582	CPU_TSBMISS_AREA(%g6, %g7)
2583
2584	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save first tsb pointer */
2585	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save second tsb pointer */
2586
2587	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
2588	brz,a,pn %g3, 1f			/* skip ahead if kernel */
2589	  ldn	[%g6 + TSBMISS_KHATID], %g7
2590	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
2591	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
2592
2593	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
2594
2595	cmp	%g3, INVALID_CONTEXT
2596	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
2597	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
2598
2599	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
2600	/*
2601	 * The miss wasn't in an ISM segment.
2602	 *
2603	 * %g1 %g3, %g4, %g5, %g7 all clobbered
2604	 * %g2 = tag access (vaddr + ctx)
2605	 */
2606
2607	ba,pt	%icc, 2f
2608	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
2609
26101:
2611	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
2612	/*
2613	 * 8K and 64K hash.
2614	 */
26152:
2616
2617	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2618		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
2619		sfmmu_suspend_tl, tsb_512K)
2620	/* NOT REACHED */
2621
2622tsb_512K:
2623	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2624	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2625	brz,pn	%g5, 3f
2626	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2627	and	%g4, HAT_512K_FLAG, %g5
2628
2629	/*
2630	 * Note that there is a small window here where we may have
2631	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
2632	 * flag yet, so we will skip searching the 512k hash list.
2633	 * In this case we will end up in pagefault which will find
2634	 * the mapping and return.  So, in this instance we will end up
2635	 * spending a bit more time resolving this TSB miss, but it can
2636	 * only happen once per process and even then, the chances of that
2637	 * are very small, so it's not worth the extra overhead it would
2638	 * take to close this window.
2639	 */
2640	brz,pn	%g5, tsb_4M
2641	  nop
26423:
2643	/*
2644	 * 512K hash
2645	 */
2646
2647	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2648		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
2649		sfmmu_suspend_tl, tsb_4M)
2650	/* NOT REACHED */
2651
2652tsb_4M:
2653	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2654	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2655	brz,pn	%g5, 4f
2656	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2657	and	%g4, HAT_4M_FLAG, %g5
2658	brz,pn	%g5, tsb_32M
2659	  nop
26604:
2661	/*
2662	 * 4M hash
2663	 */
2664
2665	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2666		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
2667		sfmmu_suspend_tl, tsb_32M)
2668	/* NOT REACHED */
2669
2670tsb_32M:
2671#ifndef sun4v
2672	GET_CPU_IMPL(%g5)
2673	cmp	%g5, PANTHER_IMPL
2674	bne,pt	%xcc, tsb_pagefault
2675	  nop
2676#endif
2677
2678	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2679	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2680#ifdef sun4v
2681	brz,pn	%g5, 6f
2682#else
2683	brz,pn	%g5, tsb_pagefault
2684#endif
2685	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2686	and	%g4, HAT_32M_FLAG, %g5
2687	brz,pn	%g5, tsb_256M
2688	  nop
26895:
2690	/*
2691	 * 32M hash
2692	 */
2693
2694	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2695		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
2696		sfmmu_suspend_tl, tsb_256M)
2697	/* NOT REACHED */
2698
2699tsb_256M:
2700	lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2701	and	%g4, HAT_256M_FLAG, %g5
2702	brz,pn	%g5, tsb_pagefault
2703	  nop
27046:
2705	/*
2706	 * 256M hash
2707	 */
2708
2709	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2710	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
2711	    sfmmu_suspend_tl, tsb_pagefault)
2712	/* NOT REACHED */
2713
2714tsb_checktte:
2715	/*
2716	 * g3 = tte
2717	 * g4 = tte pa
2718	 * g5 = tte va
2719	 * g6 = tsbmiss area
2720	 */
2721	brgez,pn %g3, tsb_pagefault	/* if tte invalid branch */
2722	  nop
2723
2724tsb_validtte:
2725	/*
2726	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
2727	 */
2728	rdpr	%tt, %g7
2729	cmp	%g7, FAST_PROT_TT
2730	bne,pt	%icc, 4f
2731	  nop
2732
2733	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
2734	    tsb_protfault)
2735
2736	rdpr	%tt, %g5
2737	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
2738	ba,pt	%xcc, tsb_update_tl1
2739	  nop
2740
27414:
2742	/*
2743	 * If ITLB miss check exec bit.
2744	 * If not set treat as invalid TTE.
2745	 */
2746	cmp     %g7, T_INSTR_MMU_MISS
2747	be,pn	%icc, 5f
2748	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
2749	cmp     %g7, FAST_IMMU_MISS_TT
2750	bne,pt %icc, 3f
2751	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
27525:
2753	bz,pn %icc, tsb_protfault
2754	  nop
2755
27563:
2757	/*
2758	 * Set reference bit if not already set
2759	 */
2760	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
2761
2762	/*
2763	 * Now, load into TSB/TLB.  At this point:
2764	 * g3 = tte
2765	 * g4 = patte
2766	 * g6 = tsbmiss area
2767	 */
2768	rdpr	%tt, %g5
2769#ifdef sun4v
2770	MMU_FAULT_STATUS_AREA(%g2)
2771	cmp	%g5, T_INSTR_MMU_MISS
2772	be,a,pt	%icc, 9f
2773	  nop
2774	cmp	%g5, FAST_IMMU_MISS_TT
2775	be,a,pt	%icc, 9f
2776	  nop
2777	add	%g2, MMFSA_D_, %g2
27789:
2779	ldx	[%g2 + MMFSA_CTX_], %g7
2780	sllx	%g7, TTARGET_CTX_SHIFT, %g7
2781	ldx	[%g2 + MMFSA_ADDR_], %g2
2782	srlx	%g2, TTARGET_VA_SHIFT, %g2
2783	or	%g2, %g7, %g2
2784#else
2785	cmp	%g5, FAST_IMMU_MISS_TT
2786	be,a,pt	%icc, tsb_update_tl1
2787	  ldxa	[%g0]ASI_IMMU, %g2
2788	ldxa	[%g0]ASI_DMMU, %g2
2789#endif
2790tsb_update_tl1:
2791	srlx	%g2, TTARGET_CTX_SHIFT, %g7
2792	brz,pn	%g7, tsb_kernel
2793#ifdef sun4v
2794	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
2795#else
2796	  srlx	%g3, TTE_SZ_SHFT, %g7
2797#endif
2798
2799tsb_user:
2800#ifdef sun4v
2801	cmp	%g7, TTE4M
2802	bge,pn	%icc, tsb_user4m
2803	  nop
2804#else
2805	cmp	%g7, TTESZ_VALID | TTE4M
2806	be,pn	%icc, tsb_user4m
2807	  srlx	%g3, TTE_SZ2_SHFT, %g7
2808	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
2809	bnz,a,pn %icc, tsb_user_pn_synth
2810	  cmp	%g5, FAST_IMMU_MISS_TT
2811#endif
2812
2813tsb_user8k:
2814	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = first TSB ptr
2815
2816#ifndef sun4v
2817	mov	ASI_N, %g7	! user TSBs always accessed by VA
2818	mov	%g7, %asi
2819#endif /* sun4v */
2820
2821	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
2822
2823#ifdef sun4v
2824	cmp	%g5, T_INSTR_MMU_MISS
2825	be,a,pn	%xcc, 9f
2826	  mov	%g3, %g5
2827#endif /* sun4v */
2828	cmp	%g5, FAST_IMMU_MISS_TT
2829	be,pn	%xcc, 9f
2830	  mov	%g3, %g5
2831
2832	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2833	! trapstat wants TTE in %g5
2834	retry
28359:
2836	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2837	! trapstat wants TTE in %g5
2838	retry
2839
2840tsb_user4m:
2841	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
28424:
2843	brz,pn	%g1, 5f	/* Check to see if we have 2nd TSB programmed */
2844	  nop
2845
2846#ifndef sun4v
2847        mov     ASI_N, %g7      ! user TSBs always accessed by VA
2848        mov     %g7, %asi
2849#endif
2850
2851        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
2852
28535:
2854#ifdef sun4v
2855        cmp     %g5, T_INSTR_MMU_MISS
2856        be,a,pn %xcc, 9f
2857          mov   %g3, %g5
2858#endif /* sun4v */
2859        cmp     %g5, FAST_IMMU_MISS_TT
2860        be,pn   %xcc, 9f
2861        mov     %g3, %g5
2862
2863        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2864        ! trapstat wants TTE in %g5
2865        retry
28669:
2867        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2868        ! trapstat wants TTE in %g5
2869        retry
2870
2871#ifndef sun4v
2872	/*
2873	 * Panther ITLB synthesis.
2874	 * The Panther 32M and 256M ITLB code simulates these two large page
2875	 * sizes with 4M pages, to provide support for programs, for example
2876	 * Java, that may copy instructions into a 32M or 256M data page and
2877	 * then execute them. The code below generates the 4M pfn bits and
2878	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
2879	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
2880	 * are ignored by the hardware.
2881	 *
2882	 * Now, load into TSB/TLB.  At this point:
2883	 * g2 = tagtarget
2884	 * g3 = tte
2885	 * g4 = patte
2886	 * g5 = tt
2887	 * g6 = tsbmiss area
2888	 */
2889tsb_user_pn_synth:
2890	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
2891	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
2892	bz,pn %icc, 4b				/* if not, been here before */
2893	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
2894	brz,a,pn %g1, 5f			/* no 2nd tsb */
2895	  mov	%g3, %g5
2896
2897	mov	MMU_TAG_ACCESS, %g7
2898	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
2899	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
2900
2901	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
2902	mov	%g7, %asi
2903	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */
29045:
2905        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2906        retry
2907
2908tsb_user_itlb_synth:
2909	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
2910
2911	mov	MMU_TAG_ACCESS, %g7
2912	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
2913	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
2914	brz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
2915	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
2916
2917	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
2918	mov	%g7, %asi
2919	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */
29207:
2921	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
2922        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2923        retry
2924#endif
2925
2926tsb_kernel:
2927#ifdef sun4v
2928	cmp	%g7, TTE4M
2929	bge,pn	%icc, 5f
2930#else
2931	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
2932	be,pn	%icc, 5f
2933#endif
2934	  nop
2935	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8k tsbptr
2936	ba,pt	%xcc, 6f
2937	  nop
29385:
2939	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4m tsbptr
2940	brz,pn	%g1, 3f		/* skip programming if 4m TSB ptr is NULL */
2941	  nop
29426:
2943#ifndef sun4v
2944tsb_kernel_patch_asi:
2945	or	%g0, RUNTIME_PATCH, %g6
2946	mov	%g6, %asi	! XXX avoid writing to %asi !!
2947#endif
2948	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
29493:
2950#ifdef sun4v
2951	cmp	%g5, T_INSTR_MMU_MISS
2952	be,a,pn	%icc, 1f
2953	  mov	%g3, %g5			! trapstat wants TTE in %g5
2954#endif /* sun4v */
2955	cmp	%g5, FAST_IMMU_MISS_TT
2956	be,pn	%icc, 1f
2957	  mov	%g3, %g5			! trapstat wants TTE in %g5
2958	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2959	! trapstat wants TTE in %g5
2960	retry
29611:
2962	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2963	! trapstat wants TTE in %g5
2964	retry
2965
2966tsb_ism:
2967	/*
2968	 * This is an ISM [i|d]tlb miss.  We optimize for largest
2969	 * page size down to smallest.
2970	 *
2971	 * g2 = vaddr + ctx	aka tag access register
2972	 * g3 = ismmap->ism_seg
2973	 * g4 = physical address of ismmap->ism_sfmmu
2974	 * g6 = tsbmiss area
2975	 */
2976	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
2977	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
2978	  mov	PTL1_BAD_ISM, %g1
2979						/* g5 = pa of imap_vb_shift */
2980	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
2981	lduha	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
2982	srlx	%g3, %g4, %g3			/* clr size field */
2983	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
2984	sllx	%g3, %g4, %g3			/* g3 = ism vbase */
2985	and	%g2, %g1, %g4			/* g4 = ctx number */
2986	andn	%g2, %g1, %g1			/* g1 = tlb miss vaddr */
2987	sub	%g1, %g3, %g2			/* g2 = offset in ISM seg */
2988	or	%g2, %g4, %g2			/* g2 = tagacc (vaddr + ctx) */
2989
2990	/*
2991	 * ISM pages are always locked down.
2992	 * If we can't find the tte then pagefault
2993	 * and let the spt segment driver resovle it.
2994	 *
2995	 * g2 = ISM vaddr (offset in ISM seg)
2996	 * g6 = tsb miss area
2997	 * g7 = ISM hatid
2998	 */
2999	sub	%g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3000	lduha	[%g5]ASI_MEM, %g4		/* g5 = pa of imap_hatflags */
3001	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3002	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3003	  nop
3004
3005tsb_ism_32M:
3006	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3007	brz,pn	%g5, tsb_ism_256M
3008	  nop
3009
3010	/*
3011	 * 32M hash.
3012	 */
3013
3014	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
3015	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3016	    tsb_ism_4M)
3017	/* NOT REACHED */
3018
3019tsb_ism_32M_found:
3020	brlz,pt %g3, tsb_validtte
3021	  nop
3022	ba,pt	%xcc, tsb_ism_4M
3023	  nop
3024
3025tsb_ism_256M:
3026	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3027	brz,a,pn %g5, ptl1_panic
3028	  mov	PTL1_BAD_ISM, %g1
3029
3030	/*
3031	 * 256M hash.
3032	 */
3033	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
3034	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3035	    tsb_ism_4M)
3036
3037tsb_ism_256M_found:
3038	brlz,pt %g3, tsb_validtte
3039	  nop
3040
3041tsb_ism_4M:
3042	/*
3043	 * 4M hash.
3044	 */
3045	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
3046	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3047	    tsb_ism_8K)
3048	/* NOT REACHED */
3049
3050tsb_ism_4M_found:
3051	brlz,pt %g3, tsb_validtte
3052	  nop
3053
3054tsb_ism_8K:
3055	/*
3056	 * 8K and 64K hash.
3057	 */
3058
3059	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
3060	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3061	    tsb_pagefault)
3062	/* NOT REACHED */
3063
3064tsb_ism_8K_found:
3065	brlz,pt	%g3, tsb_validtte
3066	  nop
3067
3068tsb_pagefault:
3069	rdpr	%tt, %g7
3070	cmp	%g7, FAST_PROT_TT
3071	be,a,pn	%icc, tsb_protfault
3072	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3073
3074tsb_protfault:
3075	/*
3076	 * we get here if we couldn't find a valid tte in the hash.
3077	 *
3078	 * If user and we are at tl>1 we go to window handling code.
3079	 *
3080	 * If kernel and the fault is on the same page as our stack
3081	 * pointer, then we know the stack is bad and the trap handler
3082	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3083	 *
3084	 * If this is a kernel trap and tl>1, panic.
3085	 *
3086	 * Otherwise we call pagefault.
3087	 */
3088	cmp	%g7, FAST_IMMU_MISS_TT
3089#ifdef sun4v
3090	MMU_FAULT_STATUS_AREA(%g4)
3091	ldx	[%g4 + MMFSA_I_CTX], %g5
3092	ldx	[%g4 + MMFSA_D_CTX], %g4
3093	move	%icc, %g5, %g4
3094	cmp	%g7, T_INSTR_MMU_MISS
3095	move	%icc, %g5, %g4
3096#else
3097	mov	MMU_TAG_ACCESS, %g4
3098	ldxa	[%g4]ASI_DMMU, %g2
3099	ldxa	[%g4]ASI_IMMU, %g5
3100	move	%icc, %g5, %g2
3101	cmp	%g7, T_INSTR_MMU_MISS
3102	move	%icc, %g5, %g2
3103	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3104#endif
3105	brnz,pn	%g4, 3f				/* skip if not kernel */
3106	  rdpr	%tl, %g5
3107
3108	add	%sp, STACK_BIAS, %g3
3109	srlx	%g3, MMU_PAGESHIFT, %g3
3110	srlx	%g2, MMU_PAGESHIFT, %g4
3111	cmp	%g3, %g4
3112	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3113	  mov	PTL1_BAD_STACK, %g1
3114
3115	cmp	%g5, 1
3116	ble,pt	%icc, 2f
3117	  nop
3118	TSTAT_CHECK_TL1(2f, %g1, %g2)
3119	rdpr	%tt, %g2
3120	cmp	%g2, FAST_PROT_TT
3121	mov	PTL1_BAD_KPROT_FAULT, %g1
3122	movne	%icc, PTL1_BAD_KMISS, %g1
3123	ba,pt	%icc, ptl1_panic
3124	  nop
3125
31262:
3127	/*
3128	 * We are taking a pagefault in the kernel on a kernel address.  If
3129	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3130	 * want to call sfmmu_pagefault -- we will instead note that a fault
3131	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3132	 * (instead of a "retry").  This will step over the faulting
3133	 * instruction.
3134	 */
3135	CPU_INDEX(%g1, %g2)
3136	set	cpu_core, %g2
3137	sllx	%g1, CPU_CORE_SHIFT, %g1
3138	add	%g1, %g2, %g1
3139	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3140	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3141	bz	sfmmu_pagefault
3142	or	%g2, CPU_DTRACE_BADADDR, %g2
3143	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3144	GET_MMU_D_ADDR(%g3, %g4)
3145	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3146	done
3147
31483:
3149	cmp	%g5, 1
3150	ble,pt	%icc, 4f
3151	  nop
3152	TSTAT_CHECK_TL1(4f, %g1, %g2)
3153	ba,pt	%icc, sfmmu_window_trap
3154	  nop
3155
31564:
3157	/*
3158	 * We are taking a pagefault on a non-kernel address.  If we are in
3159	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3160	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3161	 */
3162	CPU_INDEX(%g1, %g2)
3163	set	cpu_core, %g2
3164	sllx	%g1, CPU_CORE_SHIFT, %g1
3165	add	%g1, %g2, %g1
3166	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3167	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3168	bz	sfmmu_pagefault
3169	or	%g2, CPU_DTRACE_BADADDR, %g2
3170	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3171	GET_MMU_D_ADDR(%g3, %g4)
3172	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3173
3174	/*
3175	 * Be sure that we're actually taking this miss from the kernel --
3176	 * otherwise we have managed to return to user-level with
3177	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3178	 */
3179	rdpr	%tstate, %g2
3180	btst	TSTATE_PRIV, %g2
3181	bz,a	ptl1_panic
3182	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3183	done
3184
3185	ALTENTRY(tsb_tl0_noctxt)
3186	/*
3187	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3188	 * if it is, indicated that we have faulted and issue a done.
3189	 */
3190	CPU_INDEX(%g5, %g6)
3191	set	cpu_core, %g6
3192	sllx	%g5, CPU_CORE_SHIFT, %g5
3193	add	%g5, %g6, %g5
3194	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3195	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3196	bz	1f
3197	or	%g6, CPU_DTRACE_BADADDR, %g6
3198	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3199	GET_MMU_D_ADDR(%g3, %g4)
3200	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3201
3202	/*
3203	 * Be sure that we're actually taking this miss from the kernel --
3204	 * otherwise we have managed to return to user-level with
3205	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3206	 */
3207	rdpr	%tstate, %g5
3208	btst	TSTATE_PRIV, %g5
3209	bz,a	ptl1_panic
3210	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3211	done
3212
32131:
3214	rdpr	%tt, %g5
3215	cmp	%g5, FAST_IMMU_MISS_TT
3216#ifdef sun4v
3217	MMU_FAULT_STATUS_AREA(%g2)
3218	be,a,pt	%icc, 2f
3219	  ldx	[%g2 + MMFSA_I_CTX], %g3
3220	cmp	%g5, T_INSTR_MMU_MISS
3221	be,a,pt	%icc, 2f
3222	  ldx	[%g2 + MMFSA_I_CTX], %g3
3223	ldx	[%g2 + MMFSA_D_CTX], %g3
32242:
3225#else
3226	mov	MMU_TAG_ACCESS, %g2
3227	be,a,pt	%icc, 2f
3228	  ldxa	[%g2]ASI_IMMU, %g3
3229	ldxa	[%g2]ASI_DMMU, %g3
32302:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3231#endif
3232	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3233	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3234	rdpr	%tl, %g5
3235	cmp	%g5, 1
3236	ble,pt	%icc, sfmmu_mmu_trap
3237	  nop
3238	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3239	ba,pt	%icc, sfmmu_window_trap
3240	  nop
3241	SET_SIZE(sfmmu_tsb_miss)
3242
3243#if (1<< TSBMISS_SHIFT) != TSBMISS_SIZE
3244#error - TSBMISS_SHIFT does not correspond to size of tsbmiss struct
3245#endif
3246
3247#endif /* lint */
3248
3249#if defined (lint)
3250/*
3251 * This routine will look for a user or kernel vaddr in the hash
3252 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3253 * grab any locks.  It should only be used by other sfmmu routines.
3254 */
3255/* ARGSUSED */
3256pfn_t
3257sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3258{
3259	return(0);
3260}
3261
3262#else /* lint */
3263
3264	ENTRY_NP(sfmmu_vatopfn)
3265 	/*
3266 	 * disable interrupts
3267 	 */
3268 	rdpr	%pstate, %o3
3269#ifdef DEBUG
3270	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
3271	bnz,pt	%icc, 1f			/* disabled, panic	 */
3272	  nop
3273
3274	sethi	%hi(panicstr), %g1
3275	ldx	[%g1 + %lo(panicstr)], %g1
3276	tst	%g1
3277	bnz,pt	%icc, 1f
3278	  nop
3279
3280	save	%sp, -SA(MINFRAME), %sp
3281	sethi	%hi(sfmmu_panic1), %o0
3282	call	panic
3283	 or	%o0, %lo(sfmmu_panic1), %o0
32841:
3285#endif
3286	/*
3287	 * disable interrupts to protect the TSBMISS area
3288	 */
3289	andn    %o3, PSTATE_IE, %o5
3290	wrpr    %o5, 0, %pstate
3291
3292	/*
3293	 * o0 = vaddr
3294	 * o1 = sfmmup
3295	 * o2 = ttep
3296	 */
3297	CPU_TSBMISS_AREA(%g1, %o5)
3298	ldn	[%g1 + TSBMISS_KHATID], %o4
3299	cmp	%o4, %o1
3300	bne,pn	%ncc, vatopfn_nokernel
3301	  mov	TTE64K, %g5			/* g5 = rehash # */
3302	mov %g1,%o5				/* o5 = tsbmiss_area */
3303	/*
3304	 * o0 = vaddr
3305	 * o1 & o4 = hatid
3306	 * o2 = ttep
3307	 * o5 = tsbmiss area
3308	 */
3309	mov	HBLK_RANGE_SHIFT, %g6
33101:
3311
3312	/*
3313	 * o0 = vaddr
3314	 * o1 = sfmmup
3315	 * o2 = ttep
3316	 * o3 = old %pstate
3317	 * o4 = hatid
3318	 * o5 = tsbmiss
3319	 * g5 = rehash #
3320	 * g6 = hmeshift
3321	 *
3322	 * The first arg to GET_TTE is actually tagaccess register
3323	 * not just vaddr. Since this call is for kernel we need to clear
3324	 * any lower vaddr bits that would be interpreted as ctx bits.
3325	 */
3326	set     TAGACC_CTX_MASK, %g1
3327	andn    %o0, %g1, %o0
3328	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
3329		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3330
3331kvtop_hblk_found:
3332	/*
3333	 * o0 = vaddr
3334	 * o1 = sfmmup
3335	 * o2 = ttep
3336	 * g1 = tte
3337	 * g2 = tte pa
3338	 * g3 = tte va
3339	 * o2 = tsbmiss area
3340	 * o1 = hat id
3341	 */
3342	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3343	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3344	stx %g1,[%o2]				/* put tte into *ttep */
3345	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3346	/*
3347	 * o0 = vaddr
3348	 * o1 = sfmmup
3349	 * o2 = ttep
3350	 * g1 = pfn
3351	 */
3352	ba,pt	%xcc, 6f
3353	  mov	%g1, %o0
3354
3355kvtop_nohblk:
3356	/*
3357	 * we get here if we couldn't find valid hblk in hash.  We rehash
3358	 * if neccesary.
3359	 */
3360	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3361#ifdef sun4v
3362	cmp	%g5, MAX_HASHCNT
3363#else
3364	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3365#endif
3366	be,a,pn	%icc, 6f
3367	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3368	mov	%o1, %o4			/* restore hatid */
3369#ifdef sun4v
3370        add	%g5, 2, %g5
3371	cmp	%g5, 3
3372	move	%icc, MMU_PAGESHIFT4M, %g6
3373	ba,pt	%icc, 1b
3374	movne	%icc, MMU_PAGESHIFT256M, %g6
3375#else
3376        inc	%g5
3377	cmp	%g5, 2
3378	move	%icc, MMU_PAGESHIFT512K, %g6
3379	ba,pt	%icc, 1b
3380	movne	%icc, MMU_PAGESHIFT4M, %g6
3381#endif
33826:
3383	retl
3384 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3385
3386tsb_suspend:
3387	/*
3388	 * o0 = vaddr
3389	 * o1 = sfmmup
3390	 * o2 = ttep
3391	 * g1 = tte
3392	 * g2 = tte pa
3393	 * g3 = tte va
3394	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
3395	 */
3396	stx %g1,[%o2]				/* put tte into *ttep */
3397	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
3398	  sub	%g0, 1, %o0			/* output = -1 (PFN_INVALID) */
3399	TTETOPFN(%g1, %o0, vatopfn_l3, %g2, %g3, %g4)
3400	/*
3401	 * o0 = PFN return value PFN_INVALID, PFN_SUSPENDED, or pfn#
3402	 * o1 = sfmmup
3403	 * o2 = ttep
3404	 * g1 = pfn
3405	 */
3406	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
34078:
3408	retl
3409	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
3410
3411vatopfn_nokernel:
3412	/*
3413	 * This routine does NOT support user addresses
3414	 * There is a routine in C that supports this.
3415	 * The only reason why we don't have the C routine
3416	 * support kernel addresses as well is because
3417	 * we do va_to_pa while holding the hashlock.
3418	 */
3419 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3420	save	%sp, -SA(MINFRAME), %sp
3421	sethi	%hi(sfmmu_panic3), %o0
3422	call	panic
3423	 or	%o0, %lo(sfmmu_panic3), %o0
3424
3425	SET_SIZE(sfmmu_vatopfn)
3426#endif /* lint */
3427
3428
3429
3430#if !defined(lint)
3431
3432/*
3433 * kpm lock used between trap level tsbmiss handler and kpm C level.
3434 */
3435#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
3436	mov     0xff, tmp1						;\
3437label1:									;\
3438	casa    [kpmlckp]asi, %g0, tmp1					;\
3439	brnz,pn tmp1, label1						;\
3440	mov     0xff, tmp1						;\
3441	membar  #LoadLoad
3442
3443#define KPMLOCK_EXIT(kpmlckp, asi)					\
3444	membar  #LoadStore|#StoreStore					;\
3445	sta     %g0, [kpmlckp]asi
3446
3447/*
3448 * Lookup a memseg for a given pfn and if found, return the physical
3449 * address of the corresponding struct memseg in mseg, otherwise
3450 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
3451 * tsbmp, %asi is assumed to be ASI_MEM.
3452 * This lookup is done by strictly traversing only the physical memseg
3453 * linkage. The more generic approach, to check the virtual linkage
3454 * before using the physical (used e.g. with hmehash buckets), cannot
3455 * be used here. Memory DR operations can run in parallel to this
3456 * lookup w/o any locks and updates of the physical and virtual linkage
3457 * cannot be done atomically wrt. to each other. Because physical
3458 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
3459 * as "physical NULL" pointer.
3460 */
3461#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
3462	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
3463	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
3464	udivx	pfn, mseg, mseg						;\
3465	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
3466	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
3467	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
3468	add	tmp1, mseg, tmp1					;\
3469	ldxa	[tmp1]%asi, mseg					;\
3470	cmp	mseg, MSEG_NULLPTR_PA					;\
3471	be,pn	%xcc, label/**/1		/* if not found */	;\
3472	  nop								;\
3473	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3474	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3475	blu,pn	%xcc, label/**/1					;\
3476	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3477	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3478	bgeu,pn	%xcc, label/**/1					;\
3479	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
3480	mulx	tmp1, PAGE_SIZE, tmp1					;\
3481	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
3482	add	tmp2, tmp1, tmp1			/* pp */	;\
3483	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
3484	cmp	tmp2, pfn						;\
3485	be,pt	%xcc, label/**/_ok			/* found */	;\
3486label/**/1:								;\
3487	/* brute force lookup */					;\
3488	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
3489	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
3490label/**/2:								;\
3491	cmp	mseg, MSEG_NULLPTR_PA					;\
3492	be,pn	%xcc, label/**/_ok		/* if not found */	;\
3493	  nop								;\
3494	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3495	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3496	blu,a,pt %xcc, label/**/2					;\
3497	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3498	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3499	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3500	bgeu,a,pt %xcc, label/**/2					;\
3501	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3502label/**/_ok:
3503
3504	/*
3505	 * kpm tsb miss handler large pages
3506	 * g1 = 8K kpm TSB entry pointer
3507	 * g2 = tag access register
3508	 * g3 = 4M kpm TSB entry pointer
3509	 */
3510	ALTENTRY(sfmmu_kpm_dtsb_miss)
3511	TT_TRACE(trace_tsbmiss)
3512
3513	CPU_INDEX(%g7, %g6)
3514	sethi	%hi(kpmtsbm_area), %g6
3515	sllx	%g7, KPMTSBM_SHIFT, %g7
3516	or	%g6, %lo(kpmtsbm_area), %g6
3517	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3518
3519	/* check enable flag */
3520	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3521	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3522	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3523	  nop
3524
3525	/* VA range check */
3526	ldx	[%g6 + KPMTSBM_VBASE], %g7
3527	cmp	%g2, %g7
3528	blu,pn	%xcc, sfmmu_tsb_miss
3529	  ldx	[%g6 + KPMTSBM_VEND], %g5
3530	cmp	%g2, %g5
3531	bgeu,pn	%xcc, sfmmu_tsb_miss
3532	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
3533
3534	/*
3535	 * check TL tsbmiss handling flag
3536	 * bump tsbmiss counter
3537	 */
3538	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3539#ifdef	DEBUG
3540	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
3541	inc	%g5
3542	brz,pn	%g3, sfmmu_kpm_exception
3543	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3544#else
3545	inc	%g5
3546	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3547#endif
3548	/*
3549	 * At this point:
3550	 *  g1 = 8K kpm TSB pointer (not used)
3551	 *  g2 = tag access register
3552	 *  g3 = clobbered
3553	 *  g6 = per-CPU kpm tsbmiss area
3554	 *  g7 = kpm_vbase
3555	 */
3556
3557	/* vaddr2pfn */
3558	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3559	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3560	srax    %g4, %g3, %g2			/* which alias range (r) */
3561	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3562	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3563
3564	/*
3565	 * Setup %asi
3566	 * mseg_pa = page_numtomemseg_nolock(pfn)
3567	 * if (mseg_pa == NULL) sfmmu_kpm_exception
3568	 * g2=pfn
3569	 */
3570	mov	ASI_MEM, %asi
3571	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
3572	cmp	%g3, MSEG_NULLPTR_PA
3573	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3574	  nop
3575
3576	/*
3577	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
3578	 * g2=pfn g3=mseg_pa
3579	 */
3580	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
3581	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3582	srlx	%g2, %g5, %g4
3583	sllx	%g4, %g5, %g4
3584	sub	%g4, %g7, %g4
3585	srlx	%g4, %g5, %g4
3586
3587	/*
3588	 * Validate inx value
3589	 * g2=pfn g3=mseg_pa g4=inx
3590	 */
3591#ifdef	DEBUG
3592	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3593	cmp	%g4, %g5			/* inx - nkpmpgs */
3594	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3595	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3596#else
3597	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3598#endif
3599	/*
3600	 * kp = &mseg_pa->kpm_pages[inx]
3601	 */
3602	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
3603	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
3604	add	%g5, %g4, %g5			/* kp */
3605
3606	/*
3607	 * KPMP_HASH(kp)
3608	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
3609	 */
3610	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3611	sub	%g7, 1, %g7			/* mask */
3612	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
3613	add	%g5, %g1, %g5			/* y = ksp + x */
3614	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3615
3616	/*
3617	 * Calculate physical kpm_page pointer
3618	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3619	 */
3620	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
3621	add	%g1, %g4, %g1			/* kp_pa */
3622
3623	/*
3624	 * Calculate physical hash lock address
3625	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
3626	 */
3627	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
3628	sllx	%g5, KPMHLK_SHIFT, %g5
3629	add	%g4, %g5, %g3
3630	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
3631
3632	/*
3633	 * Assemble tte
3634	 * g1=kp_pa g2=pfn g3=hlck_pa
3635	 */
3636#ifdef sun4v
3637	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
3638	sllx	%g5, 32, %g5
3639	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3640	or	%g4, TTE4M, %g4
3641	or	%g5, %g4, %g5
3642#else
3643	sethi	%hi(TTE_VALID_INT), %g4
3644	mov	TTE4M, %g5
3645	sllx	%g5, TTE_SZ_SHFT_INT, %g5
3646	or	%g5, %g4, %g5			/* upper part */
3647	sllx	%g5, 32, %g5
3648	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3649	or	%g5, %g4, %g5
3650#endif
3651	sllx	%g2, MMU_PAGESHIFT, %g4
3652	or	%g5, %g4, %g5			/* tte */
3653	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
3654	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3655
3656	/*
3657	 * tsb dropin
3658	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
3659	 */
3660
3661	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
3662	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
3663
3664	/* use C-handler if there's no go for dropin */
3665	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
3666	cmp	%g7, -1
3667	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
3668	  nop
3669
3670#ifdef	DEBUG
3671	/* double check refcnt */
3672	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
3673	brz,pn	%g7, 5f			/* let C-handler deal with this */
3674	  nop
3675#endif
3676
3677#ifndef sun4v
3678	ldub	[%g6 + KPMTSBM_FLAGS], %g7
3679	mov	ASI_N, %g1
3680	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
3681	movnz	%icc, ASI_MEM, %g1
3682	mov	%g1, %asi
3683#endif
3684
3685	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
3686	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
3687
3688	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
3689	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
3690
3691	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
3692
3693	/* KPMLOCK_EXIT(kpmlckp, asi) */
3694	KPMLOCK_EXIT(%g3, ASI_MEM)
3695
3696	/*
3697	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3698	 * point to trapstat's TSB miss return code (note that trapstat
3699	 * itself will patch the correct offset to add).
3700	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
3701	 */
3702	rdpr	%tl, %g7
3703	cmp	%g7, 1
3704	ble	%icc, 0f
3705	sethi	%hi(KERNELBASE), %g6
3706	rdpr	%tpc, %g7
3707	or	%g6, %lo(KERNELBASE), %g6
3708	cmp	%g7, %g6
3709	bgeu	%xcc, 0f
3710	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
3711	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3712	wrpr	%g7, %tpc
3713	add	%g7, 4, %g7
3714	wrpr	%g7, %tnpc
37150:
3716	retry
37175:
3718	/* g3=hlck_pa */
3719	KPMLOCK_EXIT(%g3, ASI_MEM)
3720	ba,pt	%icc, sfmmu_kpm_exception
3721	  nop
3722	SET_SIZE(sfmmu_kpm_dtsb_miss)
3723
3724	/*
3725	 * kpm tsbmiss handler for smallpages
3726	 * g1 = 8K kpm TSB pointer
3727	 * g2 = tag access register
3728	 * g3 = 4M kpm TSB pointer
3729	 */
3730	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
3731	TT_TRACE(trace_tsbmiss)
3732	CPU_INDEX(%g7, %g6)
3733	sethi	%hi(kpmtsbm_area), %g6
3734	sllx	%g7, KPMTSBM_SHIFT, %g7
3735	or	%g6, %lo(kpmtsbm_area), %g6
3736	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3737
3738	/* check enable flag */
3739	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3740	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3741	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3742	  nop
3743
3744	/*
3745	 * VA range check
3746	 * On fail: goto sfmmu_tsb_miss
3747	 */
3748	ldx	[%g6 + KPMTSBM_VBASE], %g7
3749	cmp	%g2, %g7
3750	blu,pn	%xcc, sfmmu_tsb_miss
3751	  ldx	[%g6 + KPMTSBM_VEND], %g5
3752	cmp	%g2, %g5
3753	bgeu,pn	%xcc, sfmmu_tsb_miss
3754	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
3755
3756	/*
3757	 * check TL tsbmiss handling flag
3758	 * bump tsbmiss counter
3759	 */
3760	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3761#ifdef	DEBUG
3762	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
3763	inc	%g5
3764	brz,pn	%g1, sfmmu_kpm_exception
3765	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3766#else
3767	inc	%g5
3768	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3769#endif
3770	/*
3771	 * At this point:
3772	 *  g1 = clobbered
3773	 *  g2 = tag access register
3774	 *  g3 = 4M kpm TSB pointer (not used)
3775	 *  g6 = per-CPU kpm tsbmiss area
3776	 *  g7 = kpm_vbase
3777	 */
3778
3779	/* vaddr2pfn */
3780	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3781	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3782	srax    %g4, %g3, %g2			/* which alias range (r) */
3783	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3784	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3785
3786	/*
3787	 * Setup %asi
3788	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
3789	 * if (mseg not found) sfmmu_kpm_exception
3790	 * g2=pfn
3791	 */
3792	mov	ASI_MEM, %asi
3793	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
3794	cmp	%g3, MSEG_NULLPTR_PA
3795	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3796	  nop
3797
3798	/*
3799	 * inx = pfn - mseg_pa->kpm_pbase
3800	 * g2=pfn g3=mseg_pa
3801	 */
3802	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3803	sub	%g2, %g7, %g4
3804
3805#ifdef	DEBUG
3806	/*
3807	 * Validate inx value
3808	 * g2=pfn g3=mseg_pa g4=inx
3809	 */
3810	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3811	cmp	%g4, %g5			/* inx - nkpmpgs */
3812	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3813	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3814#else
3815	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3816#endif
3817	/* ksp = &mseg_pa->kpm_spages[inx] */
3818	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
3819	add	%g5, %g4, %g5			/* ksp */
3820
3821	/*
3822	 * KPMP_SHASH(kp)
3823	 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
3824	 */
3825	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3826	sub	%g7, 1, %g7			/* mask */
3827	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
3828	add	%g5, %g1, %g5			/* y = ksp + x */
3829	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3830
3831	/*
3832	 * Calculate physical kpm_spage pointer
3833	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3834	 */
3835	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
3836	add	%g1, %g4, %g1			/* ksp_pa */
3837
3838	/*
3839	 * Calculate physical hash lock address.
3840	 * Note: Changes in kpm_shlk_t must be reflected here.
3841	 * g1=ksp_pa g2=pfn g5=hashinx
3842	 */
3843	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
3844	sllx	%g5, KPMSHLK_SHIFT, %g5
3845	add	%g4, %g5, %g3			/* hlck_pa */
3846
3847	/*
3848	 * Assemble tte
3849	 * g1=ksp_pa g2=pfn g3=hlck_pa
3850	 */
3851	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
3852	sllx	%g5, 32, %g5
3853	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
3854	or	%g5, %g4, %g5
3855	sllx	%g2, MMU_PAGESHIFT, %g4
3856	or	%g5, %g4, %g5			/* tte */
3857	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
3858	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3859
3860	/*
3861	 * tsb dropin
3862	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
3863	 */
3864
3865	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
3866	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
3867
3868	/* use C-handler if there's no go for dropin */
3869	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
3870	cmp	%g7, -1
3871	bne,pn	%xcc, 5f
3872	  nop
3873
3874#ifndef sun4v
3875	ldub	[%g6 + KPMTSBM_FLAGS], %g7
3876	mov	ASI_N, %g1
3877	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
3878	movnz	%icc, ASI_MEM, %g1
3879	mov	%g1, %asi
3880#endif
3881
3882	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
3883	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
3884
3885	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
3886	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
3887
3888	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
3889
3890	/* KPMLOCK_EXIT(kpmlckp, asi) */
3891	KPMLOCK_EXIT(%g3, ASI_MEM)
3892
3893	/*
3894	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3895	 * point to trapstat's TSB miss return code (note that trapstat
3896	 * itself will patch the correct offset to add).
3897	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
3898	 */
3899	rdpr	%tl, %g7
3900	cmp	%g7, 1
3901	ble	%icc, 0f
3902	sethi	%hi(KERNELBASE), %g6
3903	rdpr	%tpc, %g7
3904	or	%g6, %lo(KERNELBASE), %g6
3905	cmp	%g7, %g6
3906	bgeu	%xcc, 0f
3907	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
3908	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3909	wrpr	%g7, %tpc
3910	add	%g7, 4, %g7
3911	wrpr	%g7, %tnpc
39120:
3913	retry
39145:
3915	/* g3=hlck_pa */
3916	KPMLOCK_EXIT(%g3, ASI_MEM)
3917	ba,pt	%icc, sfmmu_kpm_exception
3918	  nop
3919	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
3920
3921#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
3922#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
3923#endif
3924
3925#endif /* lint */
3926
3927#ifdef	lint
3928/*
3929 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
3930 * Called from C-level, sets/clears "go" indication for trap level handler.
3931 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
3932 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
3933 * Assumes khl_mutex is held when called from C-level.
3934 */
3935/* ARGSUSED */
3936void
3937sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
3938{
3939}
3940
3941/*
3942 * kpm_smallpages: stores val to byte at address mapped within
3943 * low level lock brackets. The old value is returned.
3944 * Called from C-level.
3945 */
3946/* ARGSUSED */
3947int
3948sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
3949{
3950	return (0);
3951}
3952
3953#else /* lint */
3954
3955	.seg	".data"
3956sfmmu_kpm_tsbmtl_panic:
3957	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
3958	.byte	0
3959sfmmu_kpm_stsbmtl_panic:
3960	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
3961	.byte	0
3962	.align	4
3963	.seg	".text"
3964
3965	ENTRY_NP(sfmmu_kpm_tsbmtl)
3966	rdpr	%pstate, %o3
3967	/*
3968	 * %o0 = &kp_refcntc
3969	 * %o1 = &khl_lock
3970	 * %o2 = 0/1 (off/on)
3971	 * %o3 = pstate save
3972	 */
3973#ifdef DEBUG
3974	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
3975	bnz,pt %icc, 1f				/* disabled, panic	 */
3976	  nop
3977	save	%sp, -SA(MINFRAME), %sp
3978	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
3979	call	panic
3980	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
3981	ret
3982	restore
39831:
3984#endif /* DEBUG */
3985	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
3986
3987	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
3988	mov	-1, %o5
3989	brz,a	%o2, 2f
3990	  mov	0, %o5
39912:
3992	sth	%o5, [%o0]
3993	KPMLOCK_EXIT(%o1, ASI_N)
3994
3995	retl
3996	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
3997	SET_SIZE(sfmmu_kpm_tsbmtl)
3998
3999	ENTRY_NP(sfmmu_kpm_stsbmtl)
4000	rdpr	%pstate, %o3
4001	/*
4002	 * %o0 = &mapped
4003	 * %o1 = &kshl_lock
4004	 * %o2 = val
4005	 * %o3 = pstate save
4006	 */
4007#ifdef DEBUG
4008	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4009	bnz,pt %icc, 1f				/* disabled, panic	 */
4010	  nop
4011	save	%sp, -SA(MINFRAME), %sp
4012	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4013	call	panic
4014	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4015	ret
4016	restore
40171:
4018#endif /* DEBUG */
4019	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4020
4021	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4022	ldsb	[%o0], %o5
4023	stb	%o2, [%o0]
4024	KPMLOCK_EXIT(%o1, ASI_N)
4025
4026	mov	%o5, %o0			/* return old val */
4027	retl
4028	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4029	SET_SIZE(sfmmu_kpm_stsbmtl)
4030
4031#endif /* lint */
4032
4033#ifndef lint
4034#ifdef sun4v
4035	/*
4036	 * User/kernel data miss w// multiple TSBs
4037	 * The first probe covers 8K, 64K, and 512K page sizes,
4038	 * because 64K and 512K mappings are replicated off 8K
4039	 * pointer.  Second probe covers 4M page size only.
4040	 *
4041	 * MMU fault area contains miss address and context.
4042	 */
4043	ALTENTRY(sfmmu_slow_dmmu_miss)
4044	GET_MMU_D_TAGACC_CTX(%g2, %g3)	! %g2 = tagacc, %g3 = ctx
4045
4046slow_miss_common:
4047	/*
4048	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4049	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4050	 */
4051	brnz,pt	%g3, 8f			! check for user context
4052	  nop
4053
4054	/*
4055	 * Kernel miss
4056	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4057	 * branch to sfmmu_tsb_miss_tt to handle it.
4058	 */
4059	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4060sfmmu_dslow_patch_ktsb_base:
4061	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4062sfmmu_dslow_patch_ktsb_szcode:
4063	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4064
4065	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4066	! %g1 = First TSB entry pointer, as TSB miss handler expects
4067
4068	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4069sfmmu_dslow_patch_ktsb4m_base:
4070	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4071sfmmu_dslow_patch_ktsb4m_szcode:
4072	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4073
4074	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4075	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4076	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4077	.empty
4078
40798:
4080	/*
4081	 * User miss
4082	 * Get first TSB pointer in %g1
4083	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4084	 * Branch to sfmmu_tsb_miss_tt to handle it
4085	 */
4086	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4087	/* %g1 = first TSB entry ptr now, %g2 preserved */
4088
4089	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4090	brlz,a,pt %g3, sfmmu_tsb_miss_tt	/* done if no 2nd TSB */
4091	  mov	%g0, %g3
4092
4093	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4094	/* %g3 = second TSB entry ptr now, %g2 preserved */
40959:
4096	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4097	.empty
4098	SET_SIZE(sfmmu_slow_dmmu_miss)
4099
4100
4101	/*
4102	 * User/kernel instruction miss w/ multiple TSBs
4103	 * The first probe covers 8K, 64K, and 512K page sizes,
4104	 * because 64K and 512K mappings are replicated off 8K
4105	 * pointer.  Second probe covers 4M page size only.
4106	 *
4107	 * MMU fault area contains miss address and context.
4108	 */
4109	ALTENTRY(sfmmu_slow_immu_miss)
4110	MMU_FAULT_STATUS_AREA(%g2)
4111	ldx	[%g2 + MMFSA_I_CTX], %g3
4112	ldx	[%g2 + MMFSA_I_ADDR], %g2
4113	srlx	%g2, MMU_PAGESHIFT, %g2	! align address to page boundry
4114	sllx	%g2, MMU_PAGESHIFT, %g2
4115	ba,pt	%xcc, slow_miss_common
4116	or	%g2, %g3, %g2
4117	SET_SIZE(sfmmu_slow_immu_miss)
4118
4119#endif /* sun4v */
4120#endif	/* lint */
4121
4122#ifndef lint
4123
4124/*
4125 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4126 */
4127	.seg	".data"
4128	.align	64
4129	.global tsbmiss_area
4130tsbmiss_area:
4131	.skip	(TSBMISS_SIZE * NCPU)
4132
4133	.align	64
4134	.global kpmtsbm_area
4135kpmtsbm_area:
4136	.skip	(KPMTSBM_SIZE * NCPU)
4137#endif	/* lint */
4138