xref: /titanic_52/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 7eea693d6b672899726e75993fddc4e95b52647f)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#if defined(lint)
32#include <sys/types.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/asm_linkage.h>
38#include <sys/machtrap.h>
39#include <sys/machasi.h>
40#include <sys/sun4asi.h>
41#include <sys/pte.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44#include <vm/seg_spt.h>
45#include <sys/machparam.h>
46#include <sys/privregs.h>
47#include <sys/scb.h>
48#include <sys/intreg.h>
49#include <sys/machthread.h>
50#include <sys/intr.h>
51#include <sys/clock.h>
52#include <sys/trapstat.h>
53
54#ifdef TRAPTRACE
55#include <sys/traptrace.h>
56
57/*
58 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
59 */
60#define	TT_TRACE(label)		\
61	ba	label		;\
62	rd	%pc, %g7
63#else
64
65#define	TT_TRACE(label)
66
67#endif /* TRAPTRACE */
68
69#ifndef	lint
70
71#if (TTE_SUSPEND_SHIFT > 0)
72#define	TTE_SUSPEND_INT_SHIFT(reg)				\
73	sllx	reg, TTE_SUSPEND_SHIFT, reg
74#else
75#define	TTE_SUSPEND_INT_SHIFT(reg)
76#endif
77
78#endif /* lint */
79
80#ifndef	lint
81
82/*
83 * Assumes TSBE_TAG is 0
84 * Assumes TSBE_INTHI is 0
85 * Assumes TSBREG.split is 0
86 */
87
88#if TSBE_TAG != 0
89#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
90#endif
91
92#if TSBTAG_INTHI != 0
93#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
94#endif
95
96/*
97 * The following code assumes the tsb is not split.
98 *
99 * With TSBs no longer shared between processes, it's no longer
100 * necessary to hash the context bits into the tsb index to get
101 * tsb coloring; the new implementation treats the TSB as a
102 * direct-mapped, virtually-addressed cache.
103 *
104 * In:
105 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
106 *    tsbbase = base address of TSB (clobbered)
107 *    tagacc = tag access register (clobbered)
108 *    szc = size code of TSB (ro)
109 *    tmp = scratch reg
110 * Out:
111 *    tsbbase = pointer to entry in TSB
112 */
113#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
114	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
115	srlx	tagacc, vpshift, tagacc 				;\
116	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
117	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
118	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
119	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
120	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
121
122/*
123 * When the kpm TSB is used it is assumed that it is direct mapped
124 * using (vaddr>>vpshift)%tsbsz as the index.
125 *
126 * Note that, for now, the kpm TSB and kernel TSB are the same for
127 * each mapping size.  However that need not always be the case.  If
128 * the trap handlers are updated to search a different TSB for kpm
129 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
130 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
131 *
132 * In:
133 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
134 *    vaddr = virtual address (clobbered)
135 *    tsbp, szc, tmp = scratch
136 * Out:
137 *    tsbp = pointer to entry in TSB
138 */
139#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
140	cmp	vpshift, MMU_PAGESHIFT					;\
141	bne,pn	%icc, 1f		/* branch if large case */	;\
142	  sethi	%hi(kpmsm_tsbsz), szc					;\
143	sethi	%hi(kpmsm_tsbbase), tsbp				;\
144	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
145	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
146	ba,pt	%icc, 2f						;\
147	  nop								;\
1481:	sethi	%hi(kpm_tsbsz), szc					;\
149	sethi	%hi(kpm_tsbbase), tsbp					;\
150	ld	[szc + %lo(kpm_tsbsz)], szc				;\
151	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1522:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
153
154/*
155 * Lock the TSBE at virtual address tsbep.
156 *
157 * tsbep = TSBE va (ro)
158 * tmp1, tmp2 = scratch registers (clobbered)
159 * label = label to use for branches (text)
160 * %asi = ASI to use for TSB access
161 *
162 * NOTE that we flush the TSB using fast VIS instructions that
163 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
164 * not be treated as a locked entry or we'll get stuck spinning on
165 * an entry that isn't locked but really invalid.
166 */
167
168#if defined(UTSB_PHYS)
169
170#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
171	lda	[tsbep]ASI_MEM, tmp1					;\
172label:									;\
173	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
174	cmp	tmp1, tmp2 						;\
175	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
176	  lda	[tsbep]ASI_MEM, tmp1					;\
177	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
178	cmp	tmp1, tmp2 						;\
179	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
180	  lda	[tsbep]ASI_MEM, tmp1					;\
181	/* tsbe lock acquired */					;\
182	membar #StoreStore
183
184#else /* UTSB_PHYS */
185
186#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
187	lda	[tsbep]%asi, tmp1					;\
188label:									;\
189	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
190	cmp	tmp1, tmp2 						;\
191	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
192	  lda	[tsbep]%asi, tmp1					;\
193	casa	[tsbep]%asi, tmp1, tmp2					;\
194	cmp	tmp1, tmp2 						;\
195	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
196	  lda	[tsbep]%asi, tmp1					;\
197	/* tsbe lock acquired */					;\
198	membar #StoreStore
199
200#endif /* UTSB_PHYS */
201
202/*
203 * Atomically write TSBE at virtual address tsbep.
204 *
205 * tsbep = TSBE va (ro)
206 * tte = TSBE TTE (ro)
207 * tagtarget = TSBE tag (ro)
208 * %asi = ASI to use for TSB access
209 */
210
211#if defined(UTSB_PHYS)
212
213#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
214	add	tsbep, TSBE_TTE, tmp1					;\
215	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
216	membar #StoreStore						;\
217	add	tsbep, TSBE_TAG, tmp1					;\
218	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
219
220#else /* UTSB_PHYS */
221
222#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
223	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
224	membar #StoreStore						;\
225	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
226
227#endif /* UTSB_PHYS */
228
229/*
230 * Load an entry into the TSB at TL > 0.
231 *
232 * tsbep = pointer to the TSBE to load as va (ro)
233 * tte = value of the TTE retrieved and loaded (wo)
234 * tagtarget = tag target register.  To get TSBE tag to load,
235 *   we need to mask off the context and leave only the va (clobbered)
236 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
237 * tmp1, tmp2 = scratch registers
238 * label = label to use for branches (text)
239 * %asi = ASI to use for TSB access
240 */
241
242#if defined(UTSB_PHYS)
243
244#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
245	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
246	/*								;\
247	 * I don't need to update the TSB then check for the valid tte.	;\
248	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
249	 * we always invalidate the hash table before we unload the TSB.;\
250	 */								;\
251	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
252	ldxa	[ttepa]ASI_MEM, tte					;\
253	TTE_CLR_SOFTEXEC_ML(tte)					;\
254	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
255	sethi	%hi(TSBTAG_INVALID), tmp2				;\
256	add	tsbep, TSBE_TAG, tmp1					;\
257	brgez,a,pn tte, label/**/f					;\
258	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
259	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
260label:
261
262#else /* UTSB_PHYS */
263
264#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
265	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
266	/*								;\
267	 * I don't need to update the TSB then check for the valid tte.	;\
268	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
269	 * we always invalidate the hash table before we unload the TSB.;\
270	 */								;\
271	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
272	ldxa	[ttepa]ASI_MEM, tte					;\
273	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
274	sethi	%hi(TSBTAG_INVALID), tmp2				;\
275	brgez,a,pn tte, label/**/f					;\
276	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
277	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
278label:
279
280#endif /* UTSB_PHYS */
281
282/*
283 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
284 *   for ITLB synthesis.
285 *
286 * tsbep = pointer to the TSBE to load as va (ro)
287 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
288 *   with exec_perm turned off and exec_synth turned on
289 * tagtarget = tag target register.  To get TSBE tag to load,
290 *   we need to mask off the context and leave only the va (clobbered)
291 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
292 * tmp1, tmp2 = scratch registers
293 * label = label to use for branch (text)
294 * %asi = ASI to use for TSB access
295 */
296
297#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
298	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
299	/*								;\
300	 * I don't need to update the TSB then check for the valid tte.	;\
301	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
302	 * we always invalidate the hash table before we unload the TSB.;\
303	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
304	 * and exec_synth bit to 1.					;\
305	 */								;\
306	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
307	mov	tte, tmp1						;\
308	ldxa	[ttepa]ASI_MEM, tte					;\
309	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
310	sethi	%hi(TSBTAG_INVALID), tmp2				;\
311	brgez,a,pn tte, label/**/f					;\
312	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
313	or	tte, tmp1, tte						;\
314	andn	tte, TTE_EXECPRM_INT, tte				;\
315	or	tte, TTE_E_SYNTH_INT, tte				;\
316	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
317label:
318
319/*
320 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
321 *
322 * tte = value of the TTE, used to get tte_size bits (ro)
323 * tagaccess = tag access register, used to get 4M pfn bits (ro)
324 * pfn = 4M pfn bits shifted to offset for tte (out)
325 * tmp1 = scratch register
326 * label = label to use for branch (text)
327 */
328
329#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
330	/*								;\
331	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
332	 * Return them, shifted, in pfn.				;\
333	 */								;\
334	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
335	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
336	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
337	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
338	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
339	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
340label:									;\
341	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
342
343/*
344 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
345 * for ITLB synthesis.
346 *
347 * tte = value of the TTE, used to get tte_size bits (rw)
348 * tmp1 = scratch register
349 */
350
351#define	SET_TTE4M_PN(tte, tmp)						\
352	/*								;\
353	 * Set 4M pagesize tte bits. 					;\
354	 */								;\
355	set	TTE4M, tmp						;\
356	sllx	tmp, TTE_SZ_SHFT, tmp					;\
357	or	tte, tmp, tte
358
359/*
360 * Load an entry into the TSB at TL=0.
361 *
362 * tsbep = pointer to the TSBE to load as va (ro)
363 * tteva = pointer to the TTE to load as va (ro)
364 * tagtarget = TSBE tag to load (which contains no context), synthesized
365 * to match va of MMU tag target register only (ro)
366 * tmp1, tmp2 = scratch registers (clobbered)
367 * label = label to use for branches (text)
368 * %asi = ASI to use for TSB access
369 */
370
371#if defined(UTSB_PHYS)
372
373#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
374	/* can't rd tteva after locking tsb because it can tlb miss */	;\
375	ldx	[tteva], tteva			/* load tte */		;\
376	TTE_CLR_SOFTEXEC_ML(tteva)					;\
377	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
378	sethi	%hi(TSBTAG_INVALID), tmp2				;\
379	add	tsbep, TSBE_TAG, tmp1					;\
380	brgez,a,pn tteva, label/**/f					;\
381	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
382	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
383label:
384
385#else /* UTSB_PHYS */
386
387#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
388	/* can't rd tteva after locking tsb because it can tlb miss */	;\
389	ldx	[tteva], tteva			/* load tte */		;\
390	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
391	sethi	%hi(TSBTAG_INVALID), tmp2				;\
392	brgez,a,pn tteva, label/**/f					;\
393	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
394	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
395label:
396
397#endif /* UTSB_PHYS */
398
399/*
400 * Invalidate a TSB entry in the TSB.
401 *
402 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
403 *	 about this earlier to ensure this is true.  Thus when we are
404 *	 directly referencing tsbep below, we are referencing the tte_tag
405 *	 field of the TSBE.  If this  offset ever changes, the code below
406 *	 will need to be modified.
407 *
408 * tsbep = pointer to TSBE as va (ro)
409 * tag = invalidation is done if this matches the TSBE tag (ro)
410 * tmp1 - tmp3 = scratch registers (clobbered)
411 * label = label name to use for branches (text)
412 * %asi = ASI to use for TSB access
413 */
414
415#if defined(UTSB_PHYS)
416
417#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
418	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
419	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
420label/**/1:								;\
421	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
422	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
423	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
424	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
425	cmp	tag, tmp3		/* compare tags */		;\
426	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
427	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
428	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
429	cmp	tmp1, tmp3		/* if not successful */		;\
430	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
431	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
432label/**/2:
433
434#else /* UTSB_PHYS */
435
436#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
437	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
438	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
439label/**/1:								;\
440	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
441	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
442	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
443	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
444	cmp	tag, tmp3		/* compare tags */		;\
445	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
446	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
447	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
448	cmp	tmp1, tmp3		/* if not successful */		;\
449	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
450	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
451label/**/2:
452
453#endif /* UTSB_PHYS */
454
455#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
456#error	- TSB_SOFTSZ_MASK too small
457#endif
458
459
460/*
461 * An implementation of setx which will be hot patched at run time.
462 * since it is being hot patched, there is no value passed in.
463 * Thus, essentially we are implementing
464 *	setx value, tmp, dest
465 * where value is RUNTIME_PATCH (aka 0) in this case.
466 */
467#define	RUNTIME_PATCH_SETX(dest, tmp)					\
468	sethi	%hh(RUNTIME_PATCH), tmp					;\
469	sethi	%lm(RUNTIME_PATCH), dest				;\
470	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
471	or	dest, %lo(RUNTIME_PATCH), dest				;\
472	sllx	tmp, 32, tmp						;\
473	nop				/* for perf reasons */		;\
474	or	tmp, dest, dest		/* contents of patched value */
475
476#endif /* lint */
477
478
479#if defined (lint)
480
481/*
482 * sfmmu related subroutines
483 */
484uint_t
485sfmmu_disable_intrs()
486{ return(0); }
487
488/* ARGSUSED */
489void
490sfmmu_enable_intrs(uint_t pstate_save)
491{}
492
493/* ARGSUSED */
494int
495sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
496{ return(0); }
497
498/*
499 * Use cas, if tte has changed underneath us then reread and try again.
500 * In the case of a retry, it will update sttep with the new original.
501 */
502/* ARGSUSED */
503int
504sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
505{ return(0); }
506
507/*
508 * Use cas, if tte has changed underneath us then return 1, else return 0
509 */
510/* ARGSUSED */
511int
512sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
513{ return(0); }
514
515/* ARGSUSED */
516void
517sfmmu_copytte(tte_t *sttep, tte_t *dttep)
518{}
519
520/*ARGSUSED*/
521struct tsbe *
522sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
523{ return(0); }
524
525/*ARGSUSED*/
526uint64_t
527sfmmu_make_tsbtag(caddr_t va)
528{ return(0); }
529
530#else	/* lint */
531
532	.seg	".data"
533	.global	sfmmu_panic1
534sfmmu_panic1:
535	.asciz	"sfmmu_asm: interrupts already disabled"
536
537	.global	sfmmu_panic3
538sfmmu_panic3:
539	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
540
541	.global	sfmmu_panic4
542sfmmu_panic4:
543	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
544
545	.global	sfmmu_panic5
546sfmmu_panic5:
547	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
548
549	.global	sfmmu_panic6
550sfmmu_panic6:
551	.asciz	"sfmmu_asm: interrupts not disabled"
552
553	.global	sfmmu_panic7
554sfmmu_panic7:
555	.asciz	"sfmmu_asm: kernel as"
556
557	.global	sfmmu_panic8
558sfmmu_panic8:
559	.asciz	"sfmmu_asm: gnum is zero"
560
561	.global	sfmmu_panic9
562sfmmu_panic9:
563	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
564
565	.global	sfmmu_panic10
566sfmmu_panic10:
567	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
568
569        ENTRY(sfmmu_disable_intrs)
570        rdpr    %pstate, %o0
571#ifdef DEBUG
572	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
573#endif /* DEBUG */
574        retl
575          wrpr   %o0, PSTATE_IE, %pstate
576        SET_SIZE(sfmmu_disable_intrs)
577
578	ENTRY(sfmmu_enable_intrs)
579        retl
580          wrpr    %g0, %o0, %pstate
581        SET_SIZE(sfmmu_enable_intrs)
582
583/*
584 * This routine is called both by resume() and sfmmu_get_ctx() to
585 * allocate a new context for the process on a MMU.
586 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
587 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
588 * is the case when sfmmu_alloc_ctx is called from resume().
589 *
590 * The caller must disable interrupts before entering this routine.
591 * To reduce ctx switch overhead, the code contains both 'fast path' and
592 * 'slow path' code. The fast path code covers the common case where only
593 * a quick check is needed and the real ctx allocation is not required.
594 * It can be done without holding the per-process (PP) lock.
595 * The 'slow path' code must be protected by the PP Lock and performs ctx
596 * allocation.
597 * Hardware context register and HAT mmu cnum are updated accordingly.
598 *
599 * %o0 - sfmmup
600 * %o1 - allocflag
601 * %o2 - CPU
602 * %o3 - sfmmu private/shared flag
603 *
604 * ret - 0: no ctx is allocated
605 *       1: a ctx is allocated
606 */
607        ENTRY_NP(sfmmu_alloc_ctx)
608
609#ifdef DEBUG
610	sethi   %hi(ksfmmup), %g1
611	ldx     [%g1 + %lo(ksfmmup)], %g1
612	cmp     %g1, %o0
613	bne,pt   %xcc, 0f
614	  nop
615
616	sethi   %hi(panicstr), %g1		! if kernel as, panic
617        ldx     [%g1 + %lo(panicstr)], %g1
618        tst     %g1
619        bnz,pn  %icc, 7f
620          nop
621
622	sethi	%hi(sfmmu_panic7), %o0
623	call	panic
624	  or	%o0, %lo(sfmmu_panic7), %o0
625
6267:
627	retl
628	  mov	%g0, %o0			! %o0 = ret = 0
629
6300:
631	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
632#endif /* DEBUG */
633
634	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
635
636	! load global mmu_ctxp info
637	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
638        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
639
640	! load global mmu_ctxp gnum
641	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
642
643#ifdef DEBUG
644	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
645	bne,pt	%xcc, 3f
646	  nop
647
648	sethi   %hi(panicstr), %g1	! test if panicstr is already set
649        ldx     [%g1 + %lo(panicstr)], %g1
650        tst     %g1
651        bnz,pn  %icc, 1f
652          nop
653
654	sethi	%hi(sfmmu_panic8), %o0
655	call	panic
656	  or	%o0, %lo(sfmmu_panic8), %o0
6571:
658	retl
659	  mov	%g0, %o0			! %o0 = ret = 0
6603:
661#endif
662
663	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
664
665	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
666	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
667
668	/*
669	 * %g5 = sfmmu gnum returned
670	 * %g6 = sfmmu cnum returned
671	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
672	 * %g4 = scratch
673	 *
674	 * Fast path code, do a quick check.
675	 */
676	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
677
678	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
679	bne,pt	%icc, 1f			! valid hat cnum, check gnum
680	  nop
681
682	! cnum == INVALID, check allocflag
683	mov	%g0, %g4	! %g4 = ret = 0
684	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
685	  mov	%g6, %o1
686
687	! (invalid HAT cnum) && (allocflag == 1)
688	ba,pt	%icc, 2f
689	  nop
6901:
691	! valid HAT cnum, check gnum
692	cmp	%g5, %o4
693	mov	1, %g4				!%g4 = ret = 1
694	be,a,pt	%icc, 8f			! gnum unchanged, go to done
695	  mov	%g6, %o1
696
6972:
698	/*
699	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
700	 * followed by the 'slow path' code.
701	 */
702	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7033:
704	brz	%g3, 5f
705	  nop
7064:
707	brnz,a,pt       %g3, 4b				! spin if lock is 1
708	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
709	ba	%xcc, 3b				! retry the lock
710	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
711
7125:
713	membar  #LoadLoad
714	/*
715	 * %g5 = sfmmu gnum returned
716	 * %g6 = sfmmu cnum returned
717	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
718	 * %g4 = scratch
719	 */
720	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
721
722	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
723	bne,pt	%icc, 1f			! valid hat cnum, check gnum
724	  nop
725
726	! cnum == INVALID, check allocflag
727	mov	%g0, %g4	! %g4 = ret = 0
728	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
729	  mov	%g6, %o1
730
731	! (invalid HAT cnum) && (allocflag == 1)
732	ba,pt	%icc, 6f
733	  nop
7341:
735	! valid HAT cnum, check gnum
736	cmp	%g5, %o4
737	mov	1, %g4				! %g4 = ret  = 1
738	be,a,pt	%icc, 2f			! gnum unchanged, go to done
739	  mov	%g6, %o1
740
741	ba,pt	%icc, 6f
742	  nop
7432:
744	membar  #LoadStore|#StoreStore
745	ba,pt %icc, 8f
746	  clrb  [%o0 + SFMMU_CTX_LOCK]
7476:
748	/*
749	 * We get here if we do not have a valid context, or
750	 * the HAT gnum does not match global gnum. We hold
751	 * sfmmu_ctx_lock spinlock. Allocate that context.
752	 *
753	 * %o3 = mmu_ctxp
754	 */
755	add	%o3, MMU_CTX_CNUM, %g3
756	ld	[%o3 + MMU_CTX_NCTXS], %g4
757
758	/*
759         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
760         * %g3 = mmu cnum address
761	 * %g4 = mmu nctxs
762	 *
763	 * %o0 = sfmmup
764	 * %o1 = mmu current cnum value (used as new cnum)
765	 * %o4 = mmu gnum
766	 *
767	 * %o5 = scratch
768	 */
769	ld	[%g3], %o1
7700:
771	cmp	%o1, %g4
772	bl,a,pt %icc, 1f
773	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
774
775	/*
776	 * cnum reachs max, bail, so wrap around can be performed later.
777	 */
778	set	INVALID_CONTEXT, %o1
779	mov	%g0, %g4		! %g4 = ret = 0
780
781	membar  #LoadStore|#StoreStore
782	ba,pt	%icc, 8f
783	  clrb	[%o0 + SFMMU_CTX_LOCK]
7841:
785	! %g3 = addr of mmu_ctxp->cnum
786	! %o5 = mmu_ctxp->cnum + 1
787	cas	[%g3], %o1, %o5
788	cmp	%o1, %o5
789	bne,a,pn %xcc, 0b	! cas failed
790	  ld	[%g3], %o1
791
792#ifdef DEBUG
793        set	MAX_SFMMU_CTX_VAL, %o5
794	cmp	%o1, %o5
795	ble,pt %icc, 2f
796	  nop
797
798	sethi	%hi(sfmmu_panic9), %o0
799	call	panic
800	  or	%o0, %lo(sfmmu_panic9), %o0
8012:
802#endif
803	! update hat gnum and cnum
804	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
805	or	%o4, %o1, %o4
806	stx	%o4, [%g2 + SFMMU_CTXS]
807
808	membar  #LoadStore|#StoreStore
809	clrb	[%o0 + SFMMU_CTX_LOCK]
810
811	mov	1, %g4			! %g4 = ret = 1
8128:
813	/*
814	 * program the secondary context register
815	 *
816	 * %o1 = cnum
817	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
818	 */
819
820	/*
821	 * When we come here and context is invalid, we want to set both
822	 * private and shared ctx regs to INVALID. In order to
823	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
824	 * so that private ctx reg will be set to invalid.
825	 * Note that on sun4v values written to private context register are
826	 * automatically written to corresponding shared context register as
827	 * well. On sun4u SET_SECCTX() will invalidate shared context register
828	 * when it sets a private secondary context register.
829	 */
830
831	cmp	%o1, INVALID_CONTEXT
832	be,a,pn	%icc, 9f
833	  clr	%g1
8349:
835
836#ifdef	sun4u
837	ldub	[%o0 + SFMMU_CEXT], %o2
838	sll	%o2, CTXREG_EXT_SHIFT, %o2
839	or	%o1, %o2, %o1
840#endif /* sun4u */
841
842	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
843
844        retl
845          mov   %g4, %o0                        ! %o0 = ret
846
847	SET_SIZE(sfmmu_alloc_ctx)
848
849	ENTRY_NP(sfmmu_modifytte)
850	ldx	[%o2], %g3			/* current */
851	ldx	[%o0], %g1			/* original */
8522:
853	ldx	[%o1], %g2			/* modified */
854	cmp	%g2, %g3			/* is modified = current? */
855	be,a,pt	%xcc,1f				/* yes, don't write */
856	stx	%g3, [%o0]			/* update new original */
857	casx	[%o2], %g1, %g2
858	cmp	%g1, %g2
859	be,pt	%xcc, 1f			/* cas succeeded - return */
860	  nop
861	ldx	[%o2], %g3			/* new current */
862	stx	%g3, [%o0]			/* save as new original */
863	ba,pt	%xcc, 2b
864	  mov	%g3, %g1
8651:	retl
866	membar	#StoreLoad
867	SET_SIZE(sfmmu_modifytte)
868
869	ENTRY_NP(sfmmu_modifytte_try)
870	ldx	[%o1], %g2			/* modified */
871	ldx	[%o2], %g3			/* current */
872	ldx	[%o0], %g1			/* original */
873	cmp	%g3, %g2			/* is modified = current? */
874	be,a,pn %xcc,1f				/* yes, don't write */
875	mov	0, %o1				/* as if cas failed. */
876
877	casx	[%o2], %g1, %g2
878	membar	#StoreLoad
879	cmp	%g1, %g2
880	movne	%xcc, -1, %o1			/* cas failed. */
881	move	%xcc, 1, %o1			/* cas succeeded. */
8821:
883	stx	%g2, [%o0]			/* report "current" value */
884	retl
885	mov	%o1, %o0
886	SET_SIZE(sfmmu_modifytte_try)
887
888	ENTRY_NP(sfmmu_copytte)
889	ldx	[%o0], %g1
890	retl
891	stx	%g1, [%o1]
892	SET_SIZE(sfmmu_copytte)
893
894
895	/*
896	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
897	 * %o0 = TSB base address (in), pointer to TSB entry (out)
898	 * %o1 = vaddr (in)
899	 * %o2 = vpshift (in)
900	 * %o3 = tsb size code (in)
901	 * %o4 = scratch register
902	 */
903	ENTRY_NP(sfmmu_get_tsbe)
904	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
905	retl
906	nop
907	SET_SIZE(sfmmu_get_tsbe)
908
909	/*
910	 * Return a TSB tag for the given va.
911	 * %o0 = va (in/clobbered)
912	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
913	 */
914	ENTRY_NP(sfmmu_make_tsbtag)
915	retl
916	srln	%o0, TTARGET_VA_SHIFT, %o0
917	SET_SIZE(sfmmu_make_tsbtag)
918
919#endif /* lint */
920
921/*
922 * Other sfmmu primitives
923 */
924
925
926#if defined (lint)
927void
928sfmmu_patch_ktsb(void)
929{
930}
931
932void
933sfmmu_kpm_patch_tlbm(void)
934{
935}
936
937void
938sfmmu_kpm_patch_tsbm(void)
939{
940}
941
942void
943sfmmu_patch_shctx(void)
944{
945}
946
947/* ARGSUSED */
948void
949sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
950{
951}
952
953/* ARGSUSED */
954void
955sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
956{
957}
958
959/* ARGSUSED */
960void
961sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
962{
963}
964
965/* ARGSUSED */
966void
967sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
968{
969}
970
971#else /* lint */
972
973#define	I_SIZE		4
974
975	ENTRY_NP(sfmmu_fix_ktlb_traptable)
976	/*
977	 * %o0 = start of patch area
978	 * %o1 = size code of TSB to patch
979	 * %o3 = scratch
980	 */
981	/* fix sll */
982	ld	[%o0], %o3			/* get sll */
983	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
984	st	%o3, [%o0]			/* write sll */
985	flush	%o0
986	/* fix srl */
987	add	%o0, I_SIZE, %o0		/* goto next instr. */
988	ld	[%o0], %o3			/* get srl */
989	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
990	st	%o3, [%o0]			/* write srl */
991	retl
992	flush	%o0
993	SET_SIZE(sfmmu_fix_ktlb_traptable)
994
995	ENTRY_NP(sfmmu_fixup_ktsbbase)
996	/*
997	 * %o0 = start of patch area
998	 * %o5 = kernel virtual or physical tsb base address
999	 * %o2, %o3 are used as scratch registers.
1000	 */
1001	/* fixup sethi instruction */
1002	ld	[%o0], %o3
1003	srl	%o5, 10, %o2			! offset is bits 32:10
1004	or	%o3, %o2, %o3			! set imm22
1005	st	%o3, [%o0]
1006	/* fixup offset of lduw/ldx */
1007	add	%o0, I_SIZE, %o0		! next instr
1008	ld	[%o0], %o3
1009	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
1010	or	%o3, %o2, %o3
1011	st	%o3, [%o0]
1012	retl
1013	flush	%o0
1014	SET_SIZE(sfmmu_fixup_ktsbbase)
1015
1016	ENTRY_NP(sfmmu_fixup_setx)
1017	/*
1018	 * %o0 = start of patch area
1019	 * %o4 = 64 bit value to patch
1020	 * %o2, %o3 are used as scratch registers.
1021	 *
1022	 * Note: Assuming that all parts of the instructions which need to be
1023	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1024	 *
1025	 * Note the implementation of setx which is being patched is as follows:
1026	 *
1027	 * sethi   %hh(RUNTIME_PATCH), tmp
1028	 * sethi   %lm(RUNTIME_PATCH), dest
1029	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1030	 * or      dest, %lo(RUNTIME_PATCH), dest
1031	 * sllx    tmp, 32, tmp
1032	 * nop
1033	 * or      tmp, dest, dest
1034	 *
1035	 * which differs from the implementation in the
1036	 * "SPARC Architecture Manual"
1037	 */
1038	/* fixup sethi instruction */
1039	ld	[%o0], %o3
1040	srlx	%o4, 42, %o2			! bits [63:42]
1041	or	%o3, %o2, %o3			! set imm22
1042	st	%o3, [%o0]
1043	/* fixup sethi instruction */
1044	add	%o0, I_SIZE, %o0		! next instr
1045	ld	[%o0], %o3
1046	sllx	%o4, 32, %o2			! clear upper bits
1047	srlx	%o2, 42, %o2			! bits [31:10]
1048	or	%o3, %o2, %o3			! set imm22
1049	st	%o3, [%o0]
1050	/* fixup or instruction */
1051	add	%o0, I_SIZE, %o0		! next instr
1052	ld	[%o0], %o3
1053	srlx	%o4, 32, %o2			! bits [63:32]
1054	and	%o2, 0x3ff, %o2			! bits [41:32]
1055	or	%o3, %o2, %o3			! set imm
1056	st	%o3, [%o0]
1057	/* fixup or instruction */
1058	add	%o0, I_SIZE, %o0		! next instr
1059	ld	[%o0], %o3
1060	and	%o4, 0x3ff, %o2			! bits [9:0]
1061	or	%o3, %o2, %o3			! set imm
1062	st	%o3, [%o0]
1063	retl
1064	flush	%o0
1065	SET_SIZE(sfmmu_fixup_setx)
1066
1067	ENTRY_NP(sfmmu_fixup_or)
1068	/*
1069	 * %o0 = start of patch area
1070	 * %o4 = 32 bit value to patch
1071	 * %o2, %o3 are used as scratch registers.
1072	 * Note: Assuming that all parts of the instructions which need to be
1073	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1074	 */
1075	ld	[%o0], %o3
1076	and	%o4, 0x3ff, %o2			! bits [9:0]
1077	or	%o3, %o2, %o3			! set imm
1078	st	%o3, [%o0]
1079	retl
1080	flush	%o0
1081	SET_SIZE(sfmmu_fixup_or)
1082
1083	ENTRY_NP(sfmmu_fixup_shiftx)
1084	/*
1085	 * %o0 = start of patch area
1086	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1087	 * %o2, %o3 are used as scratch registers.
1088	 *
1089	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1090	 * so we do a simple add.  The caller must be careful to prevent
1091	 * overflow, which could easily occur if the initial value is nonzero!
1092	 */
1093	ld	[%o0], %o3			! %o3 = instruction to patch
1094	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1095	add	%o2, %o4, %o2			! %o2 = new imm value
1096	andn	%o3, 0x3f, %o3			! clear old imm value
1097	and	%o2, 0x3f, %o2			! truncate new imm value
1098	or	%o3, %o2, %o3			! set new imm value
1099	st	%o3, [%o0]			! store updated instruction
1100	retl
1101	flush	%o0
1102	SET_SIZE(sfmmu_fixup_shiftx)
1103
1104	ENTRY_NP(sfmmu_fixup_mmu_asi)
1105	/*
1106	 * Patch imm_asi of all ldda instructions in the MMU
1107	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1108	 * starting from the itlb miss handler (trap 0x64).
1109	 * %o0 = address of tt[0,1]_itlbmiss
1110	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1111	 * %o3 = number of instructions to search
1112	 * %o4 = reserved by caller: called from leaf routine
1113	 */
11141:	ldsw	[%o0], %o2			! load instruction to %o2
1115	brgez,pt %o2, 2f
1116	  srl	%o2, 30, %o5
1117	btst	1, %o5				! test bit 30; skip if not set
1118	bz,pt	%icc, 2f
1119	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1120	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1121	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1122	brnz,pt	%o5, 2f				! skip if not a match
1123	  or	%o2, %o1, %o2			! or in imm_asi
1124	st	%o2, [%o0]			! write patched instruction
11252:	dec	%o3
1126	brnz,a,pt %o3, 1b			! loop until we're done
1127	  add	%o0, I_SIZE, %o0
1128	retl
1129	flush	%o0
1130	SET_SIZE(sfmmu_fixup_mmu_asi)
1131
1132	/*
1133	 * Patch immediate ASI used to access the TSB in the
1134	 * trap table.
1135	 * inputs: %o0 = value of ktsb_phys
1136	 */
1137	ENTRY_NP(sfmmu_patch_mmu_asi)
1138	mov	%o7, %o4			! save return pc in %o4
1139	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
1140	movrz	%o0, ASI_NQUAD_LD, %o3
1141	sll	%o3, 5, %o1			! imm_asi offset
1142	mov	6, %o3				! number of instructions
1143	sethi	%hi(dktsb), %o0			! to search
1144	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1145	  or	%o0, %lo(dktsb), %o0
1146	mov	6, %o3				! number of instructions
1147	sethi	%hi(dktsb4m), %o0		! to search
1148	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1149	  or	%o0, %lo(dktsb4m), %o0
1150	mov	6, %o3				! number of instructions
1151	sethi	%hi(iktsb), %o0			! to search
1152	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1153	  or	%o0, %lo(iktsb), %o0
1154	mov	6, %o3				! number of instructions
1155	sethi	%hi(iktsb4m), %o0		! to search
1156	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1157	  or	%o0, %lo(iktsb4m), %o0
1158	mov	%o4, %o7			! retore return pc -- leaf
1159	retl
1160	nop
1161	SET_SIZE(sfmmu_patch_mmu_asi)
1162
1163	ENTRY_NP(sfmmu_patch_ktsb)
1164	/*
1165	 * We need to fix iktsb, dktsb, et. al.
1166	 */
1167	save	%sp, -SA(MINFRAME), %sp
1168	set	ktsb_phys, %o1
1169	ld	[%o1], %o4
1170	set	ktsb_base, %o5
1171	set	ktsb4m_base, %l1
1172	brz,pt	%o4, 1f
1173	  nop
1174	set	ktsb_pbase, %o5
1175	set	ktsb4m_pbase, %l1
11761:
1177	sethi	%hi(ktsb_szcode), %o1
1178	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1179
1180	sethi	%hi(iktsb), %o0
1181	call	sfmmu_fix_ktlb_traptable
1182	  or	%o0, %lo(iktsb), %o0
1183
1184	sethi	%hi(dktsb), %o0
1185	call	sfmmu_fix_ktlb_traptable
1186	  or	%o0, %lo(dktsb), %o0
1187
1188	sethi	%hi(ktsb4m_szcode), %o1
1189	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1190
1191	sethi	%hi(iktsb4m), %o0
1192	call	sfmmu_fix_ktlb_traptable
1193	  or	%o0, %lo(iktsb4m), %o0
1194
1195	sethi	%hi(dktsb4m), %o0
1196	call	sfmmu_fix_ktlb_traptable
1197	  or	%o0, %lo(dktsb4m), %o0
1198
1199#ifndef sun4v
1200	mov	ASI_N, %o2
1201	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1202	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1203	sethi	%hi(tsb_kernel_patch_asi), %o0
1204	call	sfmmu_fixup_or
1205	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1206#endif /* !sun4v */
1207
1208	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1209
1210	sethi	%hi(dktsbbase), %o0
1211	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1212	  or	%o0, %lo(dktsbbase), %o0
1213
1214	sethi	%hi(iktsbbase), %o0
1215	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1216	  or	%o0, %lo(iktsbbase), %o0
1217
1218	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1219	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1220	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1221
1222#ifdef sun4v
1223	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1224	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1225	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1226#endif /* sun4v */
1227
1228	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1229
1230	sethi	%hi(dktsb4mbase), %o0
1231	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1232	  or	%o0, %lo(dktsb4mbase), %o0
1233
1234	sethi	%hi(iktsb4mbase), %o0
1235	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1236	  or	%o0, %lo(iktsb4mbase), %o0
1237
1238	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1239	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1240	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1241
1242#ifdef sun4v
1243	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1244	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1245	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1246#endif /* sun4v */
1247
1248	set	ktsb_szcode, %o4
1249	ld	[%o4], %o4
1250	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1251	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1252	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1253
1254#ifdef sun4v
1255	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1256	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1257	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1258#endif /* sun4v */
1259
1260	set	ktsb4m_szcode, %o4
1261	ld	[%o4], %o4
1262	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1263	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1264	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1265
1266#ifdef sun4v
1267	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1268	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1269	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1270#endif /* sun4v */
1271
1272	ret
1273	restore
1274	SET_SIZE(sfmmu_patch_ktsb)
1275
1276	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1277	/*
1278	 * Fixup trap handlers in common segkpm case.  This is reserved
1279	 * for future use should kpm TSB be changed to be other than the
1280	 * kernel TSB.
1281	 */
1282	retl
1283	nop
1284	SET_SIZE(sfmmu_kpm_patch_tlbm)
1285
1286	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1287	/*
1288	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1289	 * in the case where we are using large pages for
1290	 * seg_kpm (and hence must probe the second TSB for
1291	 * seg_kpm VAs)
1292	 */
1293	set	dktsb4m_kpmcheck_small, %o0
1294	MAKE_NOP_INSTR(%o1)
1295	st	%o1, [%o0]
1296	flush	%o0
1297	retl
1298	nop
1299	SET_SIZE(sfmmu_kpm_patch_tsbm)
1300
1301	ENTRY_NP(sfmmu_patch_utsb)
1302#ifdef UTSB_PHYS
1303	retl
1304	nop
1305#else /* UTSB_PHYS */
1306	/*
1307	 * We need to hot patch utsb_vabase and utsb4m_vabase
1308	 */
1309	save	%sp, -SA(MINFRAME), %sp
1310
1311	/* patch value of utsb_vabase */
1312	set	utsb_vabase, %o1
1313	ldx	[%o1], %o4
1314	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1315	call	sfmmu_fixup_setx
1316	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1317	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1318	call	sfmmu_fixup_setx
1319	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1320	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1321	call	sfmmu_fixup_setx
1322	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1323
1324	/* patch value of utsb4m_vabase */
1325	set	utsb4m_vabase, %o1
1326	ldx	[%o1], %o4
1327	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1328	call	sfmmu_fixup_setx
1329	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1330	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1331	call	sfmmu_fixup_setx
1332	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1333	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1334	call	sfmmu_fixup_setx
1335	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1336
1337	/*
1338	 * Patch TSB base register masks and shifts if needed.
1339	 * By default the TSB base register contents are set up for 4M slab.
1340	 * If we're using a smaller slab size and reserved VA range we need
1341	 * to patch up those values here.
1342	 */
1343	set	tsb_slab_shift, %o1
1344	set	MMU_PAGESHIFT4M, %o4
1345	lduw	[%o1], %o3
1346	subcc	%o4, %o3, %o4
1347	bz,pt	%icc, 1f
1348	  /* delay slot safe */
1349
1350	/* patch reserved VA range size if needed. */
1351	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1352	call	sfmmu_fixup_shiftx
1353	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1354	call	sfmmu_fixup_shiftx
1355	  add	%o0, I_SIZE, %o0
1356	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1357	call	sfmmu_fixup_shiftx
1358	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1359	call	sfmmu_fixup_shiftx
1360	  add	%o0, I_SIZE, %o0
13611:
1362	/* patch TSBREG_VAMASK used to set up TSB base register */
1363	set	tsb_slab_mask, %o1
1364	ldx	[%o1], %o4
1365	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1366	call	sfmmu_fixup_or
1367	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1368	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1369	call	sfmmu_fixup_or
1370	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1371
1372	ret
1373	restore
1374#endif /* UTSB_PHYS */
1375	SET_SIZE(sfmmu_patch_utsb)
1376
1377	ENTRY_NP(sfmmu_patch_shctx)
1378#ifdef sun4u
1379	retl
1380	  nop
1381#else /* sun4u */
1382	set	sfmmu_shctx_cpu_mondo_patch, %o0
1383	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1384	st	%o1, [%o0]
1385	flush	%o0
1386	MAKE_NOP_INSTR(%o1)
1387	add	%o0, I_SIZE, %o0	! next instr
1388	st	%o1, [%o0]
1389	flush	%o0
1390
1391	set	sfmmu_shctx_user_rtt_patch, %o0
1392	st      %o1, [%o0]		! nop 1st instruction
1393	flush	%o0
1394	add     %o0, I_SIZE, %o0
1395	st      %o1, [%o0]		! nop 2nd instruction
1396	flush	%o0
1397	add     %o0, I_SIZE, %o0
1398	st      %o1, [%o0]		! nop 3rd instruction
1399	flush	%o0
1400	add     %o0, I_SIZE, %o0
1401	st      %o1, [%o0]		! nop 4th instruction
1402	retl
1403	flush	%o0
1404#endif /* sun4u */
1405	SET_SIZE(sfmmu_patch_shctx)
1406
1407	/*
1408	 * Routine that loads an entry into a tsb using virtual addresses.
1409	 * Locking is required since all cpus can use the same TSB.
1410	 * Note that it is no longer required to have a valid context
1411	 * when calling this function.
1412	 */
1413	ENTRY_NP(sfmmu_load_tsbe)
1414	/*
1415	 * %o0 = pointer to tsbe to load
1416	 * %o1 = tsb tag
1417	 * %o2 = virtual pointer to TTE
1418	 * %o3 = 1 if physical address in %o0 else 0
1419	 */
1420	rdpr	%pstate, %o5
1421#ifdef DEBUG
1422	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1423#endif /* DEBUG */
1424
1425	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1426
1427	SETUP_TSB_ASI(%o3, %g3)
1428	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1429
1430	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1431
1432	retl
1433	membar	#StoreStore|#StoreLoad
1434	SET_SIZE(sfmmu_load_tsbe)
1435
1436	/*
1437	 * Flush TSB of a given entry if the tag matches.
1438	 */
1439	ENTRY(sfmmu_unload_tsbe)
1440	/*
1441	 * %o0 = pointer to tsbe to be flushed
1442	 * %o1 = tag to match
1443	 * %o2 = 1 if physical address in %o0 else 0
1444	 */
1445	SETUP_TSB_ASI(%o2, %g1)
1446	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1447	retl
1448	membar	#StoreStore|#StoreLoad
1449	SET_SIZE(sfmmu_unload_tsbe)
1450
1451	/*
1452	 * Routine that loads a TTE into the kpm TSB from C code.
1453	 * Locking is required since kpm TSB is shared among all CPUs.
1454	 */
1455	ENTRY_NP(sfmmu_kpm_load_tsb)
1456	/*
1457	 * %o0 = vaddr
1458	 * %o1 = ttep
1459	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1460	 */
1461	rdpr	%pstate, %o5			! %o5 = saved pstate
1462#ifdef DEBUG
1463	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1464#endif /* DEBUG */
1465	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1466
1467#ifndef sun4v
1468	sethi	%hi(ktsb_phys), %o4
1469	mov	ASI_N, %o3
1470	ld	[%o4 + %lo(ktsb_phys)], %o4
1471	movrnz	%o4, ASI_MEM, %o3
1472	mov	%o3, %asi
1473#endif /* !sun4v */
1474	mov	%o0, %g1			! %g1 = vaddr
1475
1476	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1477	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1478	/* %g2 = tsbep, %g1 clobbered */
1479
1480	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1481	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1482	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1483
1484	wrpr	%g0, %o5, %pstate		! enable interrupts
1485	retl
1486	  membar #StoreStore|#StoreLoad
1487	SET_SIZE(sfmmu_kpm_load_tsb)
1488
1489	/*
1490	 * Routine that shoots down a TTE in the kpm TSB or in the
1491	 * kernel TSB depending on virtpg. Locking is required since
1492	 * kpm/kernel TSB is shared among all CPUs.
1493	 */
1494	ENTRY_NP(sfmmu_kpm_unload_tsb)
1495	/*
1496	 * %o0 = vaddr
1497	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1498	 */
1499#ifndef sun4v
1500	sethi	%hi(ktsb_phys), %o4
1501	mov	ASI_N, %o3
1502	ld	[%o4 + %lo(ktsb_phys)], %o4
1503	movrnz	%o4, ASI_MEM, %o3
1504	mov	%o3, %asi
1505#endif /* !sun4v */
1506	mov	%o0, %g1			! %g1 = vaddr
1507
1508	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1509	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1510	/* %g2 = tsbep, %g1 clobbered */
1511
1512	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1513	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1514	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1515
1516	retl
1517	  membar	#StoreStore|#StoreLoad
1518	SET_SIZE(sfmmu_kpm_unload_tsb)
1519
1520#endif /* lint */
1521
1522
1523#if defined (lint)
1524
1525/*ARGSUSED*/
1526pfn_t
1527sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1528{ return(0); }
1529
1530#else /* lint */
1531
1532	ENTRY_NP(sfmmu_ttetopfn)
1533	ldx	[%o0], %g1			/* read tte */
1534	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1535	/*
1536	 * g1 = pfn
1537	 */
1538	retl
1539	mov	%g1, %o0
1540	SET_SIZE(sfmmu_ttetopfn)
1541
1542#endif /* !lint */
1543
1544
1545#if defined (lint)
1546/*
1547 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1548 * the hash list.
1549 */
1550/* ARGSUSED */
1551void
1552sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1553	uint64_t hblkpa)
1554{
1555}
1556
1557/*
1558 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1559 * hash list.
1560 */
1561/* ARGSUSED */
1562void
1563sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1564	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1565{
1566}
1567#else /* lint */
1568
1569/*
1570 * Functions to grab/release hme bucket list lock.  I only use a byte
1571 * instead of the whole int because eventually we might want to
1572 * put some counters on the other bytes (of course, these routines would
1573 * have to change).  The code that grab this lock should execute
1574 * with interrupts disabled and hold the lock for the least amount of time
1575 * possible.
1576 */
1577
1578/*
1579 * Even though hmeh_listlock is updated using pa there's no need to flush
1580 * dcache since hmeh_listlock will be restored to the original value (0)
1581 * before interrupts are reenabled.
1582 */
1583
1584/*
1585 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1586 * routines still use virtual addresses to update the bucket fields. But they
1587 * must not cause a TLB miss after grabbing the low level bucket lock. To
1588 * achieve this we must make sure the bucket structure is completely within an
1589 * 8K page.
1590 */
1591
1592#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1593#error - the size of hmehash_bucket structure is not power of 2
1594#endif
1595
1596/*
1597 * Enable backoff to significantly reduce locking overhead and reduce a chance
1598 * of xcall timeout. This is only enabled for sun4v as a Makefile compile-
1599 * time option.
1600 * The rd %ccr is better for performance compared to a non pipeline releasing
1601 * tight spin on N2/VF.
1602 * Backoff based fix is a temporary solution and doesn't allow scaling above
1603 * lock saturation point. The final fix is to eliminate HMELOCK_ENTER()
1604 * to avoid xcall timeouts and improve GET_TTE() performance.
1605 */
1606
1607#ifdef HMELOCK_BACKOFF_ENABLE
1608
1609#define HMELOCK_BACKOFF(reg, val)                               \
1610	set     val, reg                                        ;\
1611	rd	%ccr, %g0                                       ;\
1612	brnz	reg, .-4                                        ;\
1613	dec	reg
1614
1615#define CAS_HME(tmp1, tmp2, exitlabel, asi)                     \
1616	mov     0xff, tmp2                                      ;\
1617	casa    [tmp1]asi, %g0, tmp2                            ;\
1618	brz,a,pt tmp2, exitlabel                                ;\
1619	membar  #LoadLoad
1620
1621#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label, asi)            \
1622	mov     0xff, tmp2                                      ;\
1623	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1624	casa    [tmp1]asi, %g0, tmp2                            ;\
1625	brz,a,pt tmp2, label/**/2                               ;\
1626	membar  #LoadLoad                                       ;\
1627	HMELOCK_BACKOFF(tmp2,0x8)                               ;\
1628	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1629	HMELOCK_BACKOFF(tmp2,0x10)                              ;\
1630	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1631	HMELOCK_BACKOFF(tmp2,0x20)                              ;\
1632	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1633	HMELOCK_BACKOFF(tmp2,0x40)                              ;\
1634	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1635	HMELOCK_BACKOFF(tmp2,0x80)                              ;\
1636	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1637label/**/1:                                                     ;\
1638	HMELOCK_BACKOFF(tmp2,0x100)                             ;\
1639	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1640	HMELOCK_BACKOFF(tmp2,0x200)                             ;\
1641	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1642	HMELOCK_BACKOFF(tmp2,0x400)                             ;\
1643	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1644	HMELOCK_BACKOFF(tmp2,0x800)                             ;\
1645	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1646	HMELOCK_BACKOFF(tmp2,0x1000)                            ;\
1647	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1648	HMELOCK_BACKOFF(tmp2,0x2000)                            ;\
1649	mov     0xff, tmp2                                      ;\
1650	casa    [tmp1]asi, %g0, tmp2                            ;\
1651	brnz,pn tmp2, label/**/1     /* reset backoff */        ;\
1652	membar  #LoadLoad                                       ;\
1653label/**/2:
1654
1655#else /* HMELOCK_BACKOFF_ENABLE */
1656
1657#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1658	mov     0xff, tmp2                                      ;\
1659	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1660label1:                                                         ;\
1661	casa    [tmp1]asi, %g0, tmp2                            ;\
1662	brnz,pn tmp2, label1                                    ;\
1663	mov     0xff, tmp2                                      ;\
1664	membar  #LoadLoad
1665
1666#endif /* HMELOCK_BACKOFF_ENABLE */
1667
1668#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1669	membar  #LoadStore|#StoreStore                          ;\
1670	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1671	sta     %g0, [tmp1]asi
1672
1673	.seg	".data"
1674hblk_add_panic1:
1675	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1676	.byte	0
1677hblk_add_panic2:
1678	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1679	.byte	0
1680	.align	4
1681	.seg	".text"
1682
1683	ENTRY_NP(sfmmu_hblk_hash_add)
1684	/*
1685	 * %o0 = hmebp
1686	 * %o1 = hmeblkp
1687	 * %o2 = hblkpa
1688	 */
1689	rdpr	%pstate, %o5
1690#ifdef DEBUG
1691	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1692	bnz,pt %icc, 3f				/* disabled, panic	 */
1693	  nop
1694	save	%sp, -SA(MINFRAME), %sp
1695	sethi	%hi(hblk_add_panic1), %o0
1696	call	panic
1697	 or	%o0, %lo(hblk_add_panic1), %o0
1698	ret
1699	restore
1700
17013:
1702#endif /* DEBUG */
1703	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1704	mov	%o2, %g1
1705
1706	/*
1707	 * g1 = hblkpa
1708	 */
1709	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1710	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1711#ifdef	DEBUG
1712	cmp	%o4, %g0
1713	bne,pt %xcc, 1f
1714	 nop
1715	brz,pt %g2, 1f
1716	 nop
1717	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1718	save	%sp, -SA(MINFRAME), %sp
1719	sethi	%hi(hblk_add_panic2), %o0
1720	call	panic
1721	  or	%o0, %lo(hblk_add_panic2), %o0
1722	ret
1723	restore
17241:
1725#endif /* DEBUG */
1726	/*
1727	 * We update hmeblks entries before grabbing lock because the stores
1728	 * could take a tlb miss and require the hash lock.  The buckets
1729	 * are part of the nucleus so we are cool with those stores.
1730	 *
1731	 * if buckets are not part of the nucleus our game is to
1732	 * not touch any other page via va until we drop the lock.
1733	 * This guarantees we won't get a tlb miss before the lock release
1734	 * since interrupts are disabled.
1735	 */
1736	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1737	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1738	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1739	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1740	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1741	HMELOCK_EXIT(%o0, %g2, ASI_N)
1742	retl
1743	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1744	SET_SIZE(sfmmu_hblk_hash_add)
1745
1746	ENTRY_NP(sfmmu_hblk_hash_rm)
1747	/*
1748	 * This function removes an hmeblk from the hash chain.
1749	 * It is written to guarantee we don't take a tlb miss
1750	 * by using physical addresses to update the list.
1751	 *
1752	 * %o0 = hmebp
1753	 * %o1 = hmeblkp
1754	 * %o2 = hmeblkp previous pa
1755	 * %o3 = hmeblkp previous
1756	 */
1757
1758	mov	%o3, %o4			/* o4 = hmeblkp previous */
1759
1760	rdpr	%pstate, %o5
1761#ifdef DEBUG
1762	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l4, %g1)
1763#endif /* DEBUG */
1764	/*
1765	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1766	 */
1767	andn    %o5, PSTATE_IE, %g1
1768	wrpr    %g1, 0, %pstate
1769
1770#ifndef sun4v
1771	sethi   %hi(dcache_line_mask), %g4
1772	ld      [%g4 + %lo(dcache_line_mask)], %g4
1773#endif /* sun4v */
1774
1775	/*
1776	 * if buckets are not part of the nucleus our game is to
1777	 * not touch any other page via va until we drop the lock.
1778	 * This guarantees we won't get a tlb miss before the lock release
1779	 * since interrupts are disabled.
1780	 */
1781	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1782	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1783	cmp	%g2, %o1
1784	bne,pt	%ncc,1f
1785	 mov	ASI_MEM, %asi
1786	/*
1787	 * hmeblk is first on list
1788	 */
1789	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1790	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1791	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1792	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1793	ba,pt	%xcc, 2f
1794	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
17951:
1796	/* hmeblk is not first on list */
1797
1798	mov	%o2, %g3
1799#ifndef sun4v
1800	GET_CPU_IMPL(%g2)
1801	cmp 	%g2, CHEETAH_IMPL
1802	bge,a,pt %icc, hblk_hash_rm_1
1803	  and	%o4, %g4, %g2
1804	cmp	%g2, SPITFIRE_IMPL
1805	blt	%icc, hblk_hash_rm_2		/* no flushing needed for OPL */
1806	  and	%o4, %g4, %g2
1807	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1808	add	%o4, HMEBLK_NEXT, %o4
1809	and	%o4, %g4, %g2
1810	ba	hblk_hash_rm_2
1811	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1812hblk_hash_rm_1:
1813
1814	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1815	membar	#Sync
1816	add     %g3, HMEBLK_NEXT, %g2
1817	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1818hblk_hash_rm_2:
1819	membar	#Sync
1820#endif /* sun4v */
1821	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1822	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1823	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1824	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1825	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
18262:
1827	HMELOCK_EXIT(%o0, %g2, ASI_N)
1828	retl
1829	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1830	SET_SIZE(sfmmu_hblk_hash_rm)
1831
1832#endif /* lint */
1833
1834/*
1835 * These macros are used to update global sfmmu hme hash statistics
1836 * in perf critical paths. It is only enabled in debug kernels or
1837 * if SFMMU_STAT_GATHER is defined
1838 */
1839#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1840#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1841	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1842	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1843	cmp	tmp1, hatid						;\
1844	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1845	set	sfmmu_global_stat, tmp1					;\
1846	add	tmp1, tmp2, tmp1					;\
1847	ld	[tmp1], tmp2						;\
1848	inc	tmp2							;\
1849	st	tmp2, [tmp1]
1850
1851#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1852	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1853	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1854	cmp	tmp1, hatid						;\
1855	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1856	set	sfmmu_global_stat, tmp1					;\
1857	add	tmp1, tmp2, tmp1					;\
1858	ld	[tmp1], tmp2						;\
1859	inc	tmp2							;\
1860	st	tmp2, [tmp1]
1861
1862
1863#else /* DEBUG || SFMMU_STAT_GATHER */
1864
1865#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1866
1867#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1868
1869#endif  /* DEBUG || SFMMU_STAT_GATHER */
1870
1871/*
1872 * This macro is used to update global sfmmu kstas in non
1873 * perf critical areas so they are enabled all the time
1874 */
1875#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1876	sethi	%hi(sfmmu_global_stat), tmp1				;\
1877	add	tmp1, statname, tmp1					;\
1878	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1879	inc	tmp2							;\
1880	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1881
1882/*
1883 * These macros are used to update per cpu stats in non perf
1884 * critical areas so they are enabled all the time
1885 */
1886#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1887	ld	[tsbarea + stat], tmp1					;\
1888	inc	tmp1							;\
1889	st	tmp1, [tsbarea + stat]
1890
1891/*
1892 * These macros are used to update per cpu stats in non perf
1893 * critical areas so they are enabled all the time
1894 */
1895#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1896	lduh	[tsbarea + stat], tmp1					;\
1897	inc	tmp1							;\
1898	stuh	tmp1, [tsbarea + stat]
1899
1900#if defined(KPM_TLBMISS_STATS_GATHER)
1901	/*
1902	 * Count kpm dtlb misses separately to allow a different
1903	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1904	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1905	 */
1906#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1907	brgez	tagacc, label	/* KPM VA? */				;\
1908	nop								;\
1909	CPU_INDEX(tmp1, tsbma)						;\
1910	sethi	%hi(kpmtsbm_area), tsbma				;\
1911	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1912	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1913	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1914	/* VA range check */						;\
1915	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1916	cmp	tagacc, val						;\
1917	blu,pn	%xcc, label						;\
1918	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1919	cmp	tagacc, tmp1						;\
1920	bgeu,pn	%xcc, label						;\
1921	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1922	inc	val							;\
1923	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1924label:
1925#else
1926#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1927#endif	/* KPM_TLBMISS_STATS_GATHER */
1928
1929#if defined (lint)
1930/*
1931 * The following routines are jumped to from the mmu trap handlers to do
1932 * the setting up to call systrap.  They are separate routines instead of
1933 * being part of the handlers because the handlers would exceed 32
1934 * instructions and since this is part of the slow path the jump
1935 * cost is irrelevant.
1936 */
1937void
1938sfmmu_pagefault(void)
1939{
1940}
1941
1942void
1943sfmmu_mmu_trap(void)
1944{
1945}
1946
1947void
1948sfmmu_window_trap(void)
1949{
1950}
1951
1952void
1953sfmmu_kpm_exception(void)
1954{
1955}
1956
1957#else /* lint */
1958
1959#ifdef	PTL1_PANIC_DEBUG
1960	.seg	".data"
1961	.global	test_ptl1_panic
1962test_ptl1_panic:
1963	.word	0
1964	.align	8
1965
1966	.seg	".text"
1967	.align	4
1968#endif	/* PTL1_PANIC_DEBUG */
1969
1970
1971	ENTRY_NP(sfmmu_pagefault)
1972	SET_GL_REG(1)
1973	USE_ALTERNATE_GLOBALS(%g5)
1974	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1975	rdpr	%tt, %g6
1976	cmp	%g6, FAST_IMMU_MISS_TT
1977	be,a,pn	%icc, 1f
1978	  mov	T_INSTR_MMU_MISS, %g3
1979	cmp	%g6, T_INSTR_MMU_MISS
1980	be,a,pn	%icc, 1f
1981	  mov	T_INSTR_MMU_MISS, %g3
1982	mov	%g5, %g2
1983	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1984	cmp	%g6, FAST_DMMU_MISS_TT
1985	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1986	cmp	%g6, T_DATA_MMU_MISS
1987	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1988
1989#ifdef  PTL1_PANIC_DEBUG
1990	/* check if we want to test the tl1 panic */
1991	sethi	%hi(test_ptl1_panic), %g4
1992	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1993	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1994	cmp	%g1, %g0
1995	bne,a,pn %icc, ptl1_panic
1996	  or	%g0, PTL1_BAD_DEBUG, %g1
1997#endif	/* PTL1_PANIC_DEBUG */
19981:
1999	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
2000	/*
2001	 * g2 = tag access reg
2002	 * g3.l = type
2003	 * g3.h = 0
2004	 */
2005	sethi	%hi(trap), %g1
2006	or	%g1, %lo(trap), %g1
20072:
2008	ba,pt	%xcc, sys_trap
2009	  mov	-1, %g4
2010	SET_SIZE(sfmmu_pagefault)
2011
2012	ENTRY_NP(sfmmu_mmu_trap)
2013	SET_GL_REG(1)
2014	USE_ALTERNATE_GLOBALS(%g5)
2015	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
2016	rdpr	%tt, %g6
2017	cmp	%g6, FAST_IMMU_MISS_TT
2018	be,a,pn	%icc, 1f
2019	  mov	T_INSTR_MMU_MISS, %g3
2020	cmp	%g6, T_INSTR_MMU_MISS
2021	be,a,pn	%icc, 1f
2022	  mov	T_INSTR_MMU_MISS, %g3
2023	mov	%g5, %g2
2024	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
2025	cmp	%g6, FAST_DMMU_MISS_TT
2026	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2027	cmp	%g6, T_DATA_MMU_MISS
2028	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
20291:
2030	/*
2031	 * g2 = tag access reg
2032	 * g3 = type
2033	 */
2034	sethi	%hi(sfmmu_tsbmiss_exception), %g1
2035	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
2036	ba,pt	%xcc, sys_trap
2037	  mov	-1, %g4
2038	/*NOTREACHED*/
2039	SET_SIZE(sfmmu_mmu_trap)
2040
2041	ENTRY_NP(sfmmu_suspend_tl)
2042	SET_GL_REG(1)
2043	USE_ALTERNATE_GLOBALS(%g5)
2044	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
2045	rdpr	%tt, %g6
2046	cmp	%g6, FAST_IMMU_MISS_TT
2047	be,a,pn	%icc, 1f
2048	  mov	T_INSTR_MMU_MISS, %g3
2049	mov	%g5, %g2
2050	cmp	%g6, FAST_DMMU_MISS_TT
2051	move	%icc, T_DATA_MMU_MISS, %g3
2052	movne	%icc, T_DATA_PROT, %g3
20531:
2054	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
2055	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
2056	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
2057	ba,pt	%xcc, sys_trap
2058	  mov	PIL_15, %g4
2059	/*NOTREACHED*/
2060	SET_SIZE(sfmmu_suspend_tl)
2061
2062	/*
2063	 * No %g registers in use at this point.
2064	 */
2065	ENTRY_NP(sfmmu_window_trap)
2066	rdpr	%tpc, %g1
2067#ifdef sun4v
2068#ifdef DEBUG
2069	/* We assume previous %gl was 1 */
2070	rdpr	%tstate, %g4
2071	srlx	%g4, TSTATE_GL_SHIFT, %g4
2072	and	%g4, TSTATE_GL_MASK, %g4
2073	cmp	%g4, 1
2074	bne,a,pn %icc, ptl1_panic
2075	  mov	PTL1_BAD_WTRAP, %g1
2076#endif /* DEBUG */
2077	/* user miss at tl>1. better be the window handler or user_rtt */
2078	/* in user_rtt? */
2079	set	rtt_fill_start, %g4
2080	cmp	%g1, %g4
2081	blu,pn %xcc, 6f
2082	 .empty
2083	set	rtt_fill_end, %g4
2084	cmp	%g1, %g4
2085	bgeu,pn %xcc, 6f
2086	 nop
2087	set	fault_rtt_fn1, %g1
2088	wrpr	%g0, %g1, %tnpc
2089	ba,a	7f
20906:
2091	! must save this trap level before descending trap stack
2092	! no need to save %tnpc, either overwritten or discarded
2093	! already got it: rdpr	%tpc, %g1
2094	rdpr	%tstate, %g6
2095	rdpr	%tt, %g7
2096	! trap level saved, go get underlying trap type
2097	rdpr	%tl, %g5
2098	sub	%g5, 1, %g3
2099	wrpr	%g3, %tl
2100	rdpr	%tt, %g2
2101	wrpr	%g5, %tl
2102	! restore saved trap level
2103	wrpr	%g1, %tpc
2104	wrpr	%g6, %tstate
2105	wrpr	%g7, %tt
2106#else /* sun4v */
2107	/* user miss at tl>1. better be the window handler */
2108	rdpr	%tl, %g5
2109	sub	%g5, 1, %g3
2110	wrpr	%g3, %tl
2111	rdpr	%tt, %g2
2112	wrpr	%g5, %tl
2113#endif /* sun4v */
2114	and	%g2, WTRAP_TTMASK, %g4
2115	cmp	%g4, WTRAP_TYPE
2116	bne,pn	%xcc, 1f
2117	 nop
2118	/* tpc should be in the trap table */
2119	set	trap_table, %g4
2120	cmp	%g1, %g4
2121	blt,pn %xcc, 1f
2122	 .empty
2123	set	etrap_table, %g4
2124	cmp	%g1, %g4
2125	bge,pn %xcc, 1f
2126	 .empty
2127	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
2128	add	%g1, WTRAP_FAULTOFF, %g1
2129	wrpr	%g0, %g1, %tnpc
21307:
2131	/*
2132	 * some wbuf handlers will call systrap to resolve the fault
2133	 * we pass the trap type so they figure out the correct parameters.
2134	 * g5 = trap type, g6 = tag access reg
2135	 */
2136
2137	/*
2138	 * only use g5, g6, g7 registers after we have switched to alternate
2139	 * globals.
2140	 */
2141	SET_GL_REG(1)
2142	USE_ALTERNATE_GLOBALS(%g5)
2143	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
2144	rdpr	%tt, %g7
2145	cmp	%g7, FAST_IMMU_MISS_TT
2146	be,a,pn	%icc, ptl1_panic
2147	  mov	PTL1_BAD_WTRAP, %g1
2148	cmp	%g7, T_INSTR_MMU_MISS
2149	be,a,pn	%icc, ptl1_panic
2150	  mov	PTL1_BAD_WTRAP, %g1
2151	mov	T_DATA_PROT, %g5
2152	cmp	%g7, FAST_DMMU_MISS_TT
2153	move	%icc, T_DATA_MMU_MISS, %g5
2154	cmp	%g7, T_DATA_MMU_MISS
2155	move	%icc, T_DATA_MMU_MISS, %g5
2156	! XXXQ AGS re-check out this one
2157	done
21581:
2159	CPU_PADDR(%g1, %g4)
2160	add	%g1, CPU_TL1_HDLR, %g1
2161	lda	[%g1]ASI_MEM, %g4
2162	brnz,a,pt %g4, sfmmu_mmu_trap
2163	  sta	%g0, [%g1]ASI_MEM
2164	ba,pt	%icc, ptl1_panic
2165	  mov	PTL1_BAD_TRAP, %g1
2166	SET_SIZE(sfmmu_window_trap)
2167
2168	ENTRY_NP(sfmmu_kpm_exception)
2169	/*
2170	 * We have accessed an unmapped segkpm address or a legal segkpm
2171	 * address which is involved in a VAC alias conflict prevention.
2172	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
2173	 * set. If it is, we will instead note that a fault has occurred
2174	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
2175	 * a "retry"). This will step over the faulting instruction.
2176	 * Note that this means that a legal segkpm address involved in
2177	 * a VAC alias conflict prevention (a rare case to begin with)
2178	 * cannot be used in DTrace.
2179	 */
2180	CPU_INDEX(%g1, %g2)
2181	set	cpu_core, %g2
2182	sllx	%g1, CPU_CORE_SHIFT, %g1
2183	add	%g1, %g2, %g1
2184	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
2185	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
2186	bz	0f
2187	or	%g2, CPU_DTRACE_BADADDR, %g2
2188	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
2189	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
2190	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
2191	done
21920:
2193	TSTAT_CHECK_TL1(1f, %g1, %g2)
21941:
2195	SET_GL_REG(1)
2196	USE_ALTERNATE_GLOBALS(%g5)
2197	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
2198	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2199	/*
2200	 * g2=tagacc g3.l=type g3.h=0
2201	 */
2202	sethi	%hi(trap), %g1
2203	or	%g1, %lo(trap), %g1
2204	ba,pt	%xcc, sys_trap
2205	mov	-1, %g4
2206	SET_SIZE(sfmmu_kpm_exception)
2207
2208#endif /* lint */
2209
2210#if defined (lint)
2211
2212void
2213sfmmu_tsb_miss(void)
2214{
2215}
2216
2217void
2218sfmmu_kpm_dtsb_miss(void)
2219{
2220}
2221
2222void
2223sfmmu_kpm_dtsb_miss_small(void)
2224{
2225}
2226
2227#else /* lint */
2228
2229#if (IMAP_SEG != 0)
2230#error - ism_map->ism_seg offset is not zero
2231#endif
2232
2233/*
2234 * Copies ism mapping for this ctx in param "ism" if this is a ISM
2235 * tlb miss and branches to label "ismhit". If this is not an ISM
2236 * process or an ISM tlb miss it falls thru.
2237 *
2238 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
2239 * this process.
2240 * If so, it will branch to label "ismhit".  If not, it will fall through.
2241 *
2242 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
2243 * so that any other threads of this process will not try and walk the ism
2244 * maps while they are being changed.
2245 *
2246 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2247 *       will make sure of that. This means we can terminate our search on
2248 *       the first zero mapping we find.
2249 *
2250 * Parameters:
2251 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
2252 * tsbmiss	= address of tsb miss area (in)
2253 * ismseg	= contents of ism_seg for this ism map (out)
2254 * ismhat	= physical address of imap_ismhat for this ism map (out)
2255 * tmp1		= scratch reg (CLOBBERED)
2256 * tmp2		= scratch reg (CLOBBERED)
2257 * tmp3		= scratch reg (CLOBBERED)
2258 * label:    temporary labels
2259 * ismhit:   label where to jump to if an ism dtlb miss
2260 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2261 */
2262#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2263	label, ismhit)							\
2264	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
2265	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
2266	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
2267label/**/1:								;\
2268	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
2269	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
2270label/**/2:								;\
2271	brz,pt  ismseg, label/**/3		/* no mapping */	;\
2272	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
2273	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
2274	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
2275	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
2276	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
2277	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
2278	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
2279	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
2280	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
2281	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
2282	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
2283									;\
2284	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
2285	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2286	cmp	ismhat, tmp1						;\
2287	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2288	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2289									;\
2290	add	tmp3, IBLK_NEXTPA, tmp1					;\
2291	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2292	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2293	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2294label/**/3:
2295
2296/*
2297 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2298 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2299 * Parameters:
2300 * tagacc = reg containing virtual address
2301 * hatid = reg containing sfmmu pointer
2302 * hmeshift = constant/register to shift vaddr to obtain vapg
2303 * hmebp = register where bucket pointer will be stored
2304 * vapg = register where virtual page will be stored
2305 * tmp1, tmp2 = tmp registers
2306 */
2307
2308
2309#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2310	vapg, label, tmp1, tmp2)					\
2311	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2312	brnz,a,pt tmp1, label/**/1					;\
2313	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2314	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2315	ba,pt	%xcc, label/**/2					;\
2316	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2317label/**/1:								;\
2318	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2319label/**/2:								;\
2320	srlx	tagacc, hmeshift, vapg					;\
2321	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2322	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2323	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2324	add	hmebp, tmp1, hmebp
2325
2326/*
2327 * hashtag includes bspage + hashno (64 bits).
2328 */
2329
2330#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2331	sllx	vapg, hmeshift, vapg					;\
2332	mov	hashno, hblktag						;\
2333	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
2334	or	vapg, hblktag, hblktag
2335
2336/*
2337 * Function to traverse hmeblk hash link list and find corresponding match.
2338 * The search is done using physical pointers. It returns the physical address
2339 * and virtual address pointers to the hmeblk that matches with the tag
2340 * provided.
2341 * Parameters:
2342 * hmebp	= register that points to hme hash bucket, also used as
2343 *		  tmp reg (clobbered)
2344 * hmeblktag	= register with hmeblk tag match
2345 * hatid	= register with hatid
2346 * hmeblkpa	= register where physical ptr will be stored
2347 * hmeblkva	= register where virtual ptr will be stored
2348 * tmp1		= tmp reg
2349 * label: temporary label
2350 */
2351
2352#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2353	tsbarea, tmp1, label)					 	\
2354	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2355	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2356	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
2357	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2358	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2359label/**/1:								;\
2360	brz,pn	hmeblkva, label/**/2					;\
2361	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2362	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2363	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2364	add	hmebp, CLONGSIZE, hmebp					;\
2365	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2366	xor	tmp1, hmeblktag, tmp1					;\
2367	xor	hmebp, hatid, hmebp					;\
2368	or	hmebp, tmp1, hmebp					;\
2369	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2370	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
2371	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
2372	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2373	ba,pt	%xcc, label/**/1					;\
2374	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2375label/**/2:
2376
2377/*
2378 * Function to traverse hmeblk hash link list and find corresponding match.
2379 * The search is done using physical pointers. It returns the physical address
2380 * and virtual address pointers to the hmeblk that matches with the tag
2381 * provided.
2382 * Parameters:
2383 * hmeblktag	= register with hmeblk tag match (rid field is 0)
2384 * hatid	= register with hatid (pointer to SRD)
2385 * hmeblkpa	= register where physical ptr will be stored
2386 * hmeblkva	= register where virtual ptr will be stored
2387 * tmp1		= tmp reg
2388 * tmp2		= tmp reg
2389 * label: temporary label
2390 */
2391
2392#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2393	tsbarea, tmp1, tmp2, label)			 		\
2394label/**/1:								;\
2395	brz,pn	hmeblkva, label/**/4					;\
2396	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
2397	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
2398	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2399	add	tmp2, CLONGSIZE, tmp2					;\
2400	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
2401	xor	tmp1, hmeblktag, tmp1					;\
2402	xor	tmp2, hatid, tmp2					;\
2403	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
2404	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2405label/**/2:								;\
2406	ldna	[tmp2]ASI_MEM, hmeblkva	/* hmeblk ptr va */		;\
2407	add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2408	ba,pt	%xcc, label/**/1					;\
2409	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
2410label/**/3:								;\
2411	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
2412	bgeu,pt	%xcc, label/**/2					;\
2413	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2414	and	tmp1, BT_ULMASK, tmp2					;\
2415	srlx	tmp1, BT_ULSHIFT, tmp1					;\
2416	sllx	tmp1, CLONGSHIFT, tmp1					;\
2417	add	tsbarea, tmp1, tmp1					;\
2418	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
2419	srlx	tmp1, tmp2, tmp1					;\
2420	btst	0x1, tmp1						;\
2421	bz,pn	%xcc, label/**/2					;\
2422	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2423label/**/4:
2424
2425#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2426#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2427#endif
2428
2429/*
2430 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2431 * he offset for the corresponding hment.
2432 * Parameters:
2433 * In:
2434 *	vaddr = register with virtual address
2435 *	hmeblkpa = physical pointer to hme_blk
2436 * Out:
2437 *	hmentoff = register where hment offset will be stored
2438 *	hmemisc = hblk_misc
2439 * Scratch:
2440 *	tmp1
2441 */
2442#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2443	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2444	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2445	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2446	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2447	  or	%g0, HMEBLK_HME1, hmentoff				;\
2448	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2449	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2450	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2451	add	tmp1, HMEBLK_HME1, hmentoff				;\
2452label1:
2453
2454/*
2455 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2456 *
2457 * tagacc	= (pseudo-)tag access register (in)
2458 * hatid	= sfmmu pointer for TSB miss (in)
2459 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2460 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2461 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
2462 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2463 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2464 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2465 *		  for this page size.
2466 * hashno	= constant/register hash number
2467 * label	= temporary label for branching within macro.
2468 * foundlabel	= label to jump to when tte is found.
2469 * suspendlabel= label to jump to when tte is suspended.
2470 * exitlabel	= label to jump to when tte is not found.
2471 *
2472 */
2473#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmemisc, \
2474		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
2475									;\
2476	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2477	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2478	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2479		hmeblkpa, label/**/5, hmemisc, hmeblkva)		;\
2480									;\
2481	/*								;\
2482	 * tagacc = tagacc						;\
2483	 * hatid = hatid						;\
2484	 * tsbarea = tsbarea						;\
2485	 * tte   = hmebp (hme bucket pointer)				;\
2486	 * hmeblkpa  = vapg  (virtual page)				;\
2487	 * hmemisc, hmeblkva = scratch					;\
2488	 */								;\
2489	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2490	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2491									;\
2492	/*								;\
2493	 * tagacc = tagacc						;\
2494	 * hatid = hatid						;\
2495	 * tte   = hmebp						;\
2496	 * hmeblkpa  = CLOBBERED					;\
2497	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2498	 * hmeblkva  = scratch						;\
2499	 */								;\
2500	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2501	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2502	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, hmeblkva, 	\
2503		tsbarea, tagacc, label/**/1)				;\
2504	/*								;\
2505	 * tagacc = CLOBBERED						;\
2506	 * tte = CLOBBERED						;\
2507	 * hmeblkpa = hmeblkpa						;\
2508	 * hmeblkva = hmeblkva						;\
2509	 */								;\
2510	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2511	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2512	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2513	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2514	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2515	  nop								;\
2516label/**/4:								;\
2517	/*								;\
2518	 * We have found the hmeblk containing the hment.		;\
2519	 * Now we calculate the corresponding tte.			;\
2520	 *								;\
2521	 * tagacc = tagacc						;\
2522	 * hatid = hatid						;\
2523	 * tte   = clobbered						;\
2524	 * hmeblkpa  = hmeblkpa						;\
2525	 * hmemisc  = hblktag						;\
2526	 * hmeblkva  = hmeblkva 					;\
2527	 */								;\
2528	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2529		label/**/2)						;\
2530									;\
2531	/*								;\
2532	 * tagacc = tagacc						;\
2533	 * hatid = hmentoff						;\
2534	 * tte   = clobbered						;\
2535	 * hmeblkpa  = hmeblkpa						;\
2536	 * hmemisc  = hblk_misc						;\
2537	 * hmeblkva  = hmeblkva 					;\
2538	 */								;\
2539									;\
2540	add	hatid, SFHME_TTE, hatid					;\
2541	add	hmeblkpa, hatid, hmeblkpa				;\
2542	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2543	add	hmeblkva, hatid, hmeblkva				;\
2544	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2545	HMELOCK_EXIT(hatid, hatid, ASI_MEM)	/* drop lock */		;\
2546	set	TTE_SUSPEND, hatid					;\
2547	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2548	btst	tte, hatid						;\
2549	bz,pt	%xcc, foundlabel					;\
2550	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2551									;\
2552	/*								;\
2553	 * Mapping is suspended, so goto suspend label.			;\
2554	 */								;\
2555	ba,pt	%xcc, suspendlabel					;\
2556	  nop
2557
2558/*
2559 * GET_SHME_TTE is similar to GET_TTE() except it searches
2560 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2561 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2562 * either 0 (not part of scd) or 1 (part of scd).
2563 */
2564#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea,	\
2565		hmemisc, hmeshift, hashno, label, foundlabel,		\
2566		suspendlabel, exitlabel)				\
2567									;\
2568	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2569	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2570	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2571		hmeblkpa, label/**/5, hmemisc, hmeblkva)		;\
2572									;\
2573	/*								;\
2574	 * tagacc = tagacc						;\
2575	 * hatid = hatid						;\
2576	 * tsbarea = tsbarea						;\
2577	 * tte   = hmebp (hme bucket pointer)				;\
2578	 * hmeblkpa  = vapg  (virtual page)				;\
2579	 * hmemisc, hmeblkva = scratch					;\
2580	 */								;\
2581	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2582									;\
2583	/*								;\
2584	 * tagacc = tagacc						;\
2585	 * hatid = hatid						;\
2586	 * tsbarea = tsbarea						;\
2587	 * tte   = hmebp						;\
2588	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2589	 * hmeblkpa  = CLOBBERED					;\
2590	 * hmeblkva  = scratch						;\
2591	 */								;\
2592	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2593	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2594									;\
2595	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2596	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2597	add     tte, HMEBUCK_HBLK, hmeblkva				;\
2598	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2599	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2600									;\
2601label/**/8:								;\
2602	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa, hmeblkva, 	\
2603		tsbarea, tagacc, tte, label/**/1)			;\
2604	/*								;\
2605	 * tagacc = CLOBBERED						;\
2606	 * tte = CLOBBERED						;\
2607	 * hmeblkpa = hmeblkpa						;\
2608	 * hmeblkva = hmeblkva						;\
2609	 */								;\
2610	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2611	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2612	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2613	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2614	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2615	  nop								;\
2616label/**/4:								;\
2617	/*								;\
2618	 * We have found the hmeblk containing the hment.		;\
2619	 * Now we calculate the corresponding tte.			;\
2620	 *								;\
2621	 * tagacc = tagacc						;\
2622	 * hatid = hatid						;\
2623	 * tte   = clobbered						;\
2624	 * hmeblkpa  = hmeblkpa						;\
2625	 * hmemisc  = hblktag						;\
2626	 * hmeblkva  = hmeblkva 					;\
2627	 * tsbarea = tsbmiss area					;\
2628	 */								;\
2629	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2630		label/**/2)						;\
2631									;\
2632	/*								;\
2633	 * tagacc = tagacc						;\
2634	 * hatid = hmentoff						;\
2635	 * tte = clobbered						;\
2636	 * hmeblkpa  = hmeblkpa						;\
2637	 * hmemisc  = hblk_misc						;\
2638	 * hmeblkva  = hmeblkva						;\
2639	 * tsbarea = tsbmiss area					;\
2640	 */								;\
2641									;\
2642	add	hatid, SFHME_TTE, hatid					;\
2643	add	hmeblkpa, hatid, hmeblkpa				;\
2644	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2645	brlz,pt tte, label/**/6						;\
2646	  add	hmeblkva, hatid, hmeblkva				;\
2647	btst	HBLK_SZMASK, hmemisc					;\
2648	bnz,a,pt %icc, label/**/7					;\
2649	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2650									;\
2651	/*								;\
2652 	 * We found an invalid 8K tte in shme.				;\
2653	 * it may not belong to shme's region since			;\
2654	 * region size/alignment granularity is 8K but different	;\
2655	 * regions don't share hmeblks. Continue the search.		;\
2656	 */								;\
2657	sub	hmeblkpa, hatid, hmeblkpa				;\
2658	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2659	srlx	tagacc, hmeshift, tte					;\
2660	add	hmeblkpa, HMEBLK_NEXT, hmeblkva				;\
2661	ldxa	[hmeblkva]ASI_MEM, hmeblkva				;\
2662	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2663	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2664	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2665	ba,a,pt	%xcc, label/**/8					;\
2666label/**/6:								;\
2667	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2668	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2669label/**/7:								;\
2670	HMELOCK_EXIT(hatid, hatid, ASI_MEM)	/* drop lock */		;\
2671	set	TTE_SUSPEND, hatid					;\
2672	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2673	btst	tte, hatid						;\
2674	bz,pt	%xcc, foundlabel					;\
2675	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2676									;\
2677	/*								;\
2678	 * Mapping is suspended, so goto suspend label.			;\
2679	 */								;\
2680	ba,pt	%xcc, suspendlabel					;\
2681	  nop
2682
2683	/*
2684	 * KERNEL PROTECTION HANDLER
2685	 *
2686	 * g1 = tsb8k pointer register (clobbered)
2687	 * g2 = tag access register (ro)
2688	 * g3 - g7 = scratch registers
2689	 *
2690	 * Note: This function is patched at runtime for performance reasons.
2691	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2692	 */
2693	ENTRY_NP(sfmmu_kprot_trap)
2694	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2695sfmmu_kprot_patch_ktsb_base:
2696	RUNTIME_PATCH_SETX(%g1, %g6)
2697	/* %g1 = contents of ktsb_base or ktsb_pbase */
2698sfmmu_kprot_patch_ktsb_szcode:
2699	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2700
2701	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2702	! %g1 = First TSB entry pointer, as TSB miss handler expects
2703
2704	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2705sfmmu_kprot_patch_ktsb4m_base:
2706	RUNTIME_PATCH_SETX(%g3, %g6)
2707	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2708sfmmu_kprot_patch_ktsb4m_szcode:
2709	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2710
2711	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2712	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2713
2714        CPU_TSBMISS_AREA(%g6, %g7)
2715        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2716	ba,pt	%xcc, sfmmu_tsb_miss_tt
2717	  nop
2718
2719	/*
2720	 * USER PROTECTION HANDLER
2721	 *
2722	 * g1 = tsb8k pointer register (ro)
2723	 * g2 = tag access register (ro)
2724	 * g3 = faulting context (clobbered, currently not used)
2725	 * g4 - g7 = scratch registers
2726	 */
2727	ALTENTRY(sfmmu_uprot_trap)
2728#ifdef sun4v
2729	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2730	/* %g1 = first TSB entry ptr now, %g2 preserved */
2731
2732	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2733	brlz,pt %g3, 9f				/* check for 2nd TSB */
2734	  nop
2735
2736	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2737	/* %g3 = second TSB entry ptr now, %g2 preserved */
2738
2739#else /* sun4v */
2740#ifdef UTSB_PHYS
2741	/* g1 = first TSB entry ptr */
2742	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2743	brlz,pt %g3, 9f			/* check for 2nd TSB */
2744	  nop
2745
2746	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2747	/* %g3 = second TSB entry ptr now, %g2 preserved */
2748#else /* UTSB_PHYS */
2749	brgez,pt %g1, 9f		/* check for 2nd TSB */
2750	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2751
2752	mov	%g2, %g7
2753	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2754	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2755	mov	%g1, %g7
2756	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2757#endif /* UTSB_PHYS */
2758#endif /* sun4v */
27599:
2760	CPU_TSBMISS_AREA(%g6, %g7)
2761	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2762	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2763	  nop
2764
2765	/*
2766	 * Kernel 8K page iTLB miss.  We also get here if we took a
2767	 * fast instruction access mmu miss trap while running in
2768	 * invalid context.
2769	 *
2770	 * %g1 = 8K TSB pointer register (not used, clobbered)
2771	 * %g2 = tag access register (used)
2772	 * %g3 = faulting context id (used)
2773	 * %g7 = TSB tag to match (used)
2774	 */
2775	.align	64
2776	ALTENTRY(sfmmu_kitlb_miss)
2777	brnz,pn %g3, tsb_tl0_noctxt
2778	  nop
2779
2780	/* kernel miss */
2781	/* get kernel tsb pointer */
2782	/* we patch the next set of instructions at run time */
2783	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2784iktsbbase:
2785	RUNTIME_PATCH_SETX(%g4, %g5)
2786	/* %g4 = contents of ktsb_base or ktsb_pbase */
2787
2788iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2789	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2790	or	%g4, %g1, %g1			! form tsb ptr
2791	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2792	cmp	%g4, %g7
2793	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2794	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2795
2796	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2797	bz,pn	%icc, exec_fault
2798	  nop
2799	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2800	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2801	retry
2802
2803iktsb4mbase:
2804        RUNTIME_PATCH_SETX(%g4, %g6)
2805        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2806iktsb4m:
2807	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2808        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2809	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2810	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2811	cmp	%g4, %g7
2812	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2813	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2814	bz,pn	%icc, exec_fault
2815	  nop
2816	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2817	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2818	retry
2819
2820	/*
2821	 * Kernel dTLB miss.  We also get here if we took a fast data
2822	 * access mmu miss trap while running in invalid context.
2823	 *
2824	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2825	 *	We select the TSB miss handler to branch to depending on
2826	 *	the virtual address of the access.  In the future it may
2827	 *	be desirable to separate kpm TTEs into their own TSB,
2828	 *	in which case all that needs to be done is to set
2829	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2830	 *	early in the miss if we detect a kpm VA to a new handler.
2831	 *
2832	 * %g1 = 8K TSB pointer register (not used, clobbered)
2833	 * %g2 = tag access register (used)
2834	 * %g3 = faulting context id (used)
2835	 */
2836	.align	64
2837	ALTENTRY(sfmmu_kdtlb_miss)
2838	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2839	  nop
2840
2841	/* Gather some stats for kpm misses in the TLB. */
2842	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2843	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2844
2845	/*
2846	 * Get first TSB offset and look for 8K/64K/512K mapping
2847	 * using the 8K virtual page as the index.
2848	 *
2849	 * We patch the next set of instructions at run time;
2850	 * any changes here require sfmmu_patch_ktsb changes too.
2851	 */
2852dktsbbase:
2853	RUNTIME_PATCH_SETX(%g7, %g6)
2854	/* %g7 = contents of ktsb_base or ktsb_pbase */
2855
2856dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2857	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2858
2859	/*
2860	 * At this point %g1 is our index into the TSB.
2861	 * We just masked off enough bits of the VA depending
2862	 * on our TSB size code.
2863	 */
2864	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2865	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2866	cmp	%g6, %g4			! compare tag
2867	bne,pn	%xcc, dktsb4m_kpmcheck_small
2868	  add	%g7, %g1, %g1			/* form tsb ptr */
2869	TT_TRACE(trace_tsbhit)
2870	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2871	/* trapstat expects tte in %g5 */
2872	retry
2873
2874	/*
2875	 * If kpm is using large pages, the following instruction needs
2876	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2877	 * so that we will probe the 4M TSB regardless of the VA.  In
2878	 * the case kpm is using small pages, we know no large kernel
2879	 * mappings are located above 0x80000000.00000000 so we skip the
2880	 * probe as an optimization.
2881	 */
2882dktsb4m_kpmcheck_small:
2883	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2884	  /* delay slot safe, below */
2885
2886	/*
2887	 * Get second TSB offset and look for 4M mapping
2888	 * using 4M virtual page as the TSB index.
2889	 *
2890	 * Here:
2891	 * %g1 = 8K TSB pointer.  Don't squash it.
2892	 * %g2 = tag access register (we still need it)
2893	 */
2894	srlx	%g2, MMU_PAGESHIFT4M, %g3
2895
2896	/*
2897	 * We patch the next set of instructions at run time;
2898	 * any changes here require sfmmu_patch_ktsb changes too.
2899	 */
2900dktsb4mbase:
2901	RUNTIME_PATCH_SETX(%g7, %g6)
2902	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2903dktsb4m:
2904	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2905	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2906
2907	/*
2908	 * At this point %g3 is our index into the TSB.
2909	 * We just masked off enough bits of the VA depending
2910	 * on our TSB size code.
2911	 */
2912	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2913	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2914	cmp	%g6, %g4			! compare tag
2915
2916dktsb4m_tsbmiss:
2917	bne,pn	%xcc, dktsb4m_kpmcheck
2918	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2919	TT_TRACE(trace_tsbhit)
2920	/* we don't check TTE size here since we assume 4M TSB is separate */
2921	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2922	/* trapstat expects tte in %g5 */
2923	retry
2924
2925	/*
2926	 * So, we failed to find a valid TTE to match the faulting
2927	 * address in either TSB.  There are a few cases that could land
2928	 * us here:
2929	 *
2930	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2931	 *    to sfmmu_tsb_miss_tt to handle the miss.
2932	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2933	 *    4M TSB.  Let segkpm handle it.
2934	 *
2935	 * Note that we shouldn't land here in the case of a kpm VA when
2936	 * kpm_smallpages is active -- we handled that case earlier at
2937	 * dktsb4m_kpmcheck_small.
2938	 *
2939	 * At this point:
2940	 *  g1 = 8K-indexed primary TSB pointer
2941	 *  g2 = tag access register
2942	 *  g3 = 4M-indexed secondary TSB pointer
2943	 */
2944dktsb4m_kpmcheck:
2945	cmp	%g2, %g0
2946	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2947	  nop
2948	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2949	  nop
2950
2951#ifdef sun4v
2952	/*
2953	 * User instruction miss w/ single TSB.
2954	 * The first probe covers 8K, 64K, and 512K page sizes,
2955	 * because 64K and 512K mappings are replicated off 8K
2956	 * pointer.
2957	 *
2958	 * g1 = tsb8k pointer register
2959	 * g2 = tag access register
2960	 * g3 - g6 = scratch registers
2961	 * g7 = TSB tag to match
2962	 */
2963	.align	64
2964	ALTENTRY(sfmmu_uitlb_fastpath)
2965
2966	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2967	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2968	ba,pn	%xcc, sfmmu_tsb_miss_tt
2969	  mov	-1, %g3
2970
2971	/*
2972	 * User data miss w/ single TSB.
2973	 * The first probe covers 8K, 64K, and 512K page sizes,
2974	 * because 64K and 512K mappings are replicated off 8K
2975	 * pointer.
2976	 *
2977	 * g1 = tsb8k pointer register
2978	 * g2 = tag access register
2979	 * g3 - g6 = scratch registers
2980	 * g7 = TSB tag to match
2981	 */
2982	.align 64
2983	ALTENTRY(sfmmu_udtlb_fastpath)
2984
2985	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2986	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2987	ba,pn	%xcc, sfmmu_tsb_miss_tt
2988	  mov	-1, %g3
2989
2990	/*
2991	 * User instruction miss w/ multiple TSBs (sun4v).
2992	 * The first probe covers 8K, 64K, and 512K page sizes,
2993	 * because 64K and 512K mappings are replicated off 8K
2994	 * pointer.  Second probe covers 4M page size only.
2995	 *
2996	 * Just like sfmmu_udtlb_slowpath, except:
2997	 *   o Uses ASI_ITLB_IN
2998	 *   o checks for execute permission
2999	 *   o No ISM prediction.
3000	 *
3001	 * g1 = tsb8k pointer register
3002	 * g2 = tag access register
3003	 * g3 - g6 = scratch registers
3004	 * g7 = TSB tag to match
3005	 */
3006	.align	64
3007	ALTENTRY(sfmmu_uitlb_slowpath)
3008
3009	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
3010	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
3011	/* g4 - g5 = clobbered here */
3012
3013	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3014	/* g1 = first TSB pointer, g3 = second TSB pointer */
3015	srlx	%g2, TAG_VALO_SHIFT, %g7
3016	PROBE_2ND_ITSB(%g3, %g7)
3017	/* NOT REACHED */
3018
3019#else /* sun4v */
3020
3021	/*
3022	 * User instruction miss w/ multiple TSBs (sun4u).
3023	 * The first probe covers 8K, 64K, and 512K page sizes,
3024	 * because 64K and 512K mappings are replicated off 8K
3025	 * pointer.  Probe of 1st TSB has already been done prior to entry
3026	 * into this routine. For the UTSB_PHYS case we probe up to 3
3027	 * valid other TSBs in the following order:
3028	 * 1) shared TSB for 4M-256M pages
3029	 * 2) private TSB for 4M-256M pages
3030	 * 3) shared TSB for 8K-512K pages
3031	 *
3032	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
3033	 * 4M-256M pages.
3034	 *
3035	 * Just like sfmmu_udtlb_slowpath, except:
3036	 *   o Uses ASI_ITLB_IN
3037	 *   o checks for execute permission
3038	 *   o No ISM prediction.
3039	 *
3040	 * g1 = tsb8k pointer register
3041	 * g2 = tag access register
3042	 * g4 - g6 = scratch registers
3043	 * g7 = TSB tag to match
3044	 */
3045	.align	64
3046	ALTENTRY(sfmmu_uitlb_slowpath)
3047
3048#ifdef UTSB_PHYS
3049
3050       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
3051        brlz,pt %g6, 1f
3052          nop
3053        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
3054        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
30551:
3056        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
3057        brlz,pt %g3, 2f
3058          nop
3059        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3060        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
30612:
3062        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
3063        brlz,pt %g6, sfmmu_tsb_miss_tt
3064          nop
3065        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
3066        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
3067        ba,pn   %xcc, sfmmu_tsb_miss_tt
3068          nop
3069
3070#else /* UTSB_PHYS */
3071	mov	%g1, %g3	/* save tsb8k reg in %g3 */
3072	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
3073	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
3074	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
3075	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
3076	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
3077       /* g1 = first TSB pointer, g3 = second TSB pointer */
3078        srlx    %g2, TAG_VALO_SHIFT, %g7
3079        PROBE_2ND_ITSB(%g3, %g7, isynth)
3080	ba,pn	%xcc, sfmmu_tsb_miss_tt
3081	  nop
3082
3083#endif /* UTSB_PHYS */
3084#endif /* sun4v */
3085
3086#if defined(sun4u) && defined(UTSB_PHYS)
3087
3088        /*
3089	 * We come here for ism predict DTLB_MISS case or if
3090	 * if probe in first TSB failed.
3091         */
3092
3093        .align 64
3094        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
3095
3096	/*
3097         * g1 = tsb8k pointer register
3098         * g2 = tag access register
3099         * g4 - %g6 = scratch registers
3100         * g7 = TSB tag to match
3101	 */
3102
3103	/*
3104	 * ISM non-predict probe order
3105         * probe 1ST_TSB (8K index)
3106         * probe 2ND_TSB (4M index)
3107         * probe 4TH_TSB (4M index)
3108         * probe 3RD_TSB (8K index)
3109	 *
3110	 * We already probed first TSB in DTLB_MISS handler.
3111	 */
3112
3113        /*
3114         * Private 2ND TSB 4M-256 pages
3115         */
3116	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
3117	brlz,pt %g3, 1f
3118	  nop
3119        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3120        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3121
3122	/*
3123	 * Shared Context 4TH TSB 4M-256 pages
3124	 */
31251:
3126	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
3127	brlz,pt %g6, 2f
3128	  nop
3129        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
3130        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
3131
3132        /*
3133         * Shared Context 3RD TSB 8K-512K pages
3134         */
31352:
3136	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
3137	brlz,pt %g6, sfmmu_tsb_miss_tt
3138	  nop
3139        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
3140        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
3141	ba,pn	%xcc, sfmmu_tsb_miss_tt
3142	  nop
3143
3144	.align 64
3145        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
3146
3147	/*
3148         * g1 = tsb8k pointer register
3149         * g2 = tag access register
3150         * g4 - g6 = scratch registers
3151         * g7 = TSB tag to match
3152	 */
3153
3154	/*
3155	 * ISM predict probe order
3156	 * probe 4TH_TSB (4M index)
3157	 * probe 2ND_TSB (4M index)
3158	 * probe 1ST_TSB (8K index)
3159	 * probe 3RD_TSB (8K index)
3160
3161	/*
3162	 * Shared Context 4TH TSB 4M-256 pages
3163	 */
3164	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
3165	brlz,pt %g6, 4f
3166	  nop
3167        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
3168        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
3169
3170        /*
3171         * Private 2ND TSB 4M-256 pages
3172         */
31734:
3174	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
3175	brlz,pt %g3, 5f
3176	  nop
3177        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3178        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
3179
31805:
3181        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
3182
3183        /*
3184         * Shared Context 3RD TSB 8K-512K pages
3185         */
3186	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
3187	brlz,pt %g6, 6f
3188	  nop
3189        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
3190        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
31916:
3192	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
3193	  nop
3194
3195#else /* sun4u && UTSB_PHYS */
3196
3197       .align 64
3198        ALTENTRY(sfmmu_udtlb_slowpath)
3199
3200	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
3201	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
3202	  mov	%g1, %g3
3203
3204udtlb_miss_probefirst:
3205	/*
3206	 * g1 = 8K TSB pointer register
3207	 * g2 = tag access register
3208	 * g3 = (potentially) second TSB entry ptr
3209	 * g6 = ism pred.
3210	 * g7 = vpg_4m
3211	 */
3212#ifdef sun4v
3213	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
3214	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
3215
3216	/*
3217	 * Here:
3218	 *   g1 = first TSB pointer
3219	 *   g2 = tag access reg
3220	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
3221	 */
3222	brgz,pn	%g6, sfmmu_tsb_miss_tt
3223	  nop
3224#else /* sun4v */
3225	mov	%g1, %g4
3226	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
3227	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
3228
3229	/*
3230	 * Here:
3231	 *   g1 = first TSB pointer
3232	 *   g2 = tag access reg
3233	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
3234	 */
3235	brgz,pn	%g6, sfmmu_tsb_miss_tt
3236	  nop
3237	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
3238	/* fall through in 8K->4M probe order */
3239#endif /* sun4v */
3240
3241udtlb_miss_probesecond:
3242	/*
3243	 * Look in the second TSB for the TTE
3244	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
3245	 * g2 = tag access reg
3246	 * g3 = 8K TSB pointer register
3247	 * g6 = ism pred.
3248	 * g7 = vpg_4m
3249	 */
3250#ifdef sun4v
3251	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
3252	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3253	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
3254#else /* sun4v */
3255	mov	%g3, %g7
3256	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
3257	/* %g2 clobbered, %g3 =second tsbe ptr */
3258	mov	MMU_TAG_ACCESS, %g2
3259	ldxa	[%g2]ASI_DMMU, %g2
3260#endif /* sun4v */
3261
3262	srlx	%g2, TAG_VALO_SHIFT, %g7
3263	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3264	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
3265	brgz,pn	%g6, udtlb_miss_probefirst
3266	  nop
3267
3268	/* fall through to sfmmu_tsb_miss_tt */
3269#endif /* sun4u && UTSB_PHYS */
3270
3271
3272	ALTENTRY(sfmmu_tsb_miss_tt)
3273	TT_TRACE(trace_tsbmiss)
3274	/*
3275	 * We get here if there is a TSB miss OR a write protect trap.
3276	 *
3277	 * g1 = First TSB entry pointer
3278	 * g2 = tag access register
3279	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
3280	 * g4 - g7 = scratch registers
3281	 */
3282
3283	ALTENTRY(sfmmu_tsb_miss)
3284
3285	/*
3286	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3287	 * point to trapstat's TSB miss return code (note that trapstat
3288	 * itself will patch the correct offset to add).
3289	 */
3290	rdpr	%tl, %g7
3291	cmp	%g7, 1
3292	ble,pt	%xcc, 0f
3293	  sethi	%hi(KERNELBASE), %g6
3294	rdpr	%tpc, %g7
3295	or	%g6, %lo(KERNELBASE), %g6
3296	cmp	%g7, %g6
3297	bgeu,pt	%xcc, 0f
3298	/* delay slot safe */
3299
3300	ALTENTRY(tsbmiss_trapstat_patch_point)
3301	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3302	wrpr	%g7, %tpc
3303	add	%g7, 4, %g7
3304	wrpr	%g7, %tnpc
33050:
3306	CPU_TSBMISS_AREA(%g6, %g7)
3307	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
3308	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
3309
3310	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
3311	brz,a,pn %g3, 1f			/* skip ahead if kernel */
3312	  ldn	[%g6 + TSBMISS_KHATID], %g7
3313	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
3314	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
3315
3316	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3317
3318	cmp	%g3, INVALID_CONTEXT
3319	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
3320	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3321
3322#if defined(sun4v) || defined(UTSB_PHYS)
3323        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
3324        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
3325        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
3326#endif /* sun4v || UTSB_PHYS */
3327
3328	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3329	/*
3330	 * The miss wasn't in an ISM segment.
3331	 *
3332	 * %g1 %g3, %g4, %g5, %g7 all clobbered
3333	 * %g2 = (pseudo) tag access
3334	 */
3335
3336	ba,pt	%icc, 2f
3337	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3338
33391:
3340	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3341	/*
3342	 * 8K and 64K hash.
3343	 */
33442:
3345
3346	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3347		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
3348		sfmmu_suspend_tl, tsb_512K)
3349	/* NOT REACHED */
3350
3351tsb_512K:
3352	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3353	brz,pn	%g5, 3f
3354	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3355	and	%g4, HAT_512K_FLAG, %g5
3356
3357	/*
3358	 * Note that there is a small window here where we may have
3359	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3360	 * flag yet, so we will skip searching the 512k hash list.
3361	 * In this case we will end up in pagefault which will find
3362	 * the mapping and return.  So, in this instance we will end up
3363	 * spending a bit more time resolving this TSB miss, but it can
3364	 * only happen once per process and even then, the chances of that
3365	 * are very small, so it's not worth the extra overhead it would
3366	 * take to close this window.
3367	 */
3368	brz,pn	%g5, tsb_4M
3369	  nop
33703:
3371	/*
3372	 * 512K hash
3373	 */
3374
3375	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3376		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
3377		sfmmu_suspend_tl, tsb_4M)
3378	/* NOT REACHED */
3379
3380tsb_4M:
3381	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3382	brz,pn	%g5, 4f
3383	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3384	and	%g4, HAT_4M_FLAG, %g5
3385	brz,pn	%g5, tsb_32M
3386	  nop
33874:
3388	/*
3389	 * 4M hash
3390	 */
3391
3392	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3393		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
3394		sfmmu_suspend_tl, tsb_32M)
3395	/* NOT REACHED */
3396
3397tsb_32M:
3398	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3399#ifdef	sun4v
3400        brz,pn	%g5, 6f
3401#else
3402	brz,pn  %g5, tsb_pagefault
3403#endif
3404	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3405	and	%g4, HAT_32M_FLAG, %g5
3406	brz,pn	%g5, tsb_256M
3407	  nop
34085:
3409	/*
3410	 * 32M hash
3411	 */
3412
3413	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3414		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
3415		sfmmu_suspend_tl, tsb_256M)
3416	/* NOT REACHED */
3417
3418#if defined(sun4u) && !defined(UTSB_PHYS)
3419#define tsb_shme        tsb_pagefault
3420#endif
3421tsb_256M:
3422	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3423	and	%g4, HAT_256M_FLAG, %g5
3424	brz,pn	%g5, tsb_shme
3425	  nop
34266:
3427	/*
3428	 * 256M hash
3429	 */
3430
3431	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3432	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
3433	    sfmmu_suspend_tl, tsb_shme)
3434	/* NOT REACHED */
3435
3436tsb_checktte:
3437	/*
3438	 * g1 = hblk_misc
3439	 * g2 = tagacc
3440	 * g3 = tte
3441	 * g4 = tte pa
3442	 * g5 = tte va
3443	 * g6 = tsbmiss area
3444	 * g7 = hatid
3445	 */
3446	brlz,a,pt %g3, tsb_validtte
3447	  rdpr	%tt, %g7
3448
3449#if defined(sun4u) && !defined(UTSB_PHYS)
3450#undef tsb_shme
3451	ba      tsb_pagefault
3452	  nop
3453#else /* sun4u && !UTSB_PHYS */
3454
3455tsb_shme:
3456	/*
3457	 * g2 = tagacc
3458	 * g6 = tsbmiss area
3459	 */
3460	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3461	brz,pn	%g5, tsb_pagefault
3462	  nop
3463	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3464	brz,pn	%g7, tsb_pagefault
3465	  nop
3466
3467	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3468		MMU_PAGESHIFT64K, TTE64K, tsb_shme_l8K, tsb_shme_checktte,
3469		sfmmu_suspend_tl, tsb_shme_512K)
3470	/* NOT REACHED */
3471
3472tsb_shme_512K:
3473	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3474	and	%g4, HAT_512K_FLAG, %g5
3475	brz,pn	%g5, tsb_shme_4M
3476	  nop
3477
3478	/*
3479	 * 512K hash
3480	 */
3481
3482	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3483		MMU_PAGESHIFT512K, TTE512K, tsb_shme_l512K, tsb_shme_checktte,
3484		sfmmu_suspend_tl, tsb_shme_4M)
3485	/* NOT REACHED */
3486
3487tsb_shme_4M:
3488	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3489	and	%g4, HAT_4M_FLAG, %g5
3490	brz,pn	%g5, tsb_shme_32M
3491	  nop
34924:
3493	/*
3494	 * 4M hash
3495	 */
3496	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3497		MMU_PAGESHIFT4M, TTE4M, tsb_shme_l4M, tsb_shme_checktte,
3498		sfmmu_suspend_tl, tsb_shme_32M)
3499	/* NOT REACHED */
3500
3501tsb_shme_32M:
3502	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3503	and	%g4, HAT_32M_FLAG, %g5
3504	brz,pn	%g5, tsb_shme_256M
3505	  nop
3506
3507	/*
3508	 * 32M hash
3509	 */
3510
3511	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3512		MMU_PAGESHIFT32M, TTE32M, tsb_shme_l32M, tsb_shme_checktte,
3513		sfmmu_suspend_tl, tsb_shme_256M)
3514	/* NOT REACHED */
3515
3516tsb_shme_256M:
3517	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3518	and	%g4, HAT_256M_FLAG, %g5
3519	brz,pn	%g5, tsb_pagefault
3520	  nop
3521
3522	/*
3523	 * 256M hash
3524	 */
3525
3526	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3527	    MMU_PAGESHIFT256M, TTE256M, tsb_shme_l256M, tsb_shme_checktte,
3528	    sfmmu_suspend_tl, tsb_pagefault)
3529	/* NOT REACHED */
3530
3531tsb_shme_checktte:
3532
3533	brgez,pn %g3, tsb_pagefault
3534	  rdpr	%tt, %g7
3535	/*
3536	 * g1 = ctx1 flag
3537	 * g3 = tte
3538	 * g4 = tte pa
3539	 * g5 = tte va
3540	 * g6 = tsbmiss area
3541	 * g7 = tt
3542	 */
3543
3544	brz,pt  %g1, tsb_validtte
3545	  nop
3546	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3547	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3548	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3549
3550	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3551#endif /* sun4u && !UTSB_PHYS */
3552
3553tsb_validtte:
3554	/*
3555	 * g3 = tte
3556	 * g4 = tte pa
3557	 * g5 = tte va
3558	 * g6 = tsbmiss area
3559	 * g7 = tt
3560	 */
3561
3562	/*
3563	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3564	 */
3565	cmp	%g7, FAST_PROT_TT
3566	bne,pt	%icc, 4f
3567	  nop
3568
3569	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
3570	    tsb_protfault)
3571
3572	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3573#ifdef sun4v
3574	MMU_FAULT_STATUS_AREA(%g7)
3575	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
3576#else /* sun4v */
3577	mov     MMU_TAG_ACCESS, %g5
3578	ldxa    [%g5]ASI_DMMU, %g5
3579#endif /* sun4v */
3580	ba,pt	%xcc, tsb_update_tl1
3581	  nop
35824:
3583	/*
3584	 * ITLB translation was found but execute permission is
3585	 * disabled. If we have software execute permission (soft exec
3586	 * bit is set), then enable hardware execute permission.
3587	 * Otherwise continue with a protection violation.
3588	 */
3589	cmp     %g7, T_INSTR_MMU_MISS
3590	be,pn	%icc, 5f
3591	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3592	cmp     %g7, FAST_IMMU_MISS_TT
3593	bne,pt %icc, 3f
3594	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
35955:
3596	bnz,pn %icc, 3f
3597	  TTE_CHK_SOFTEXEC_ML(%g3)		/* check soft execute */
3598	bz,pn %icc, tsb_protfault
3599	  nop
3600	TTE_SET_EXEC_ML(%g3, %g4, %g7, tsb_lset_exec)
36013:
3602	/*
3603	 * Set reference bit if not already set
3604	 */
3605	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
3606
3607	/*
3608	 * Now, load into TSB/TLB.  At this point:
3609	 * g3 = tte
3610	 * g4 = patte
3611	 * g6 = tsbmiss area
3612	 */
3613	rdpr	%tt, %g7
3614#ifdef sun4v
3615	MMU_FAULT_STATUS_AREA(%g2)
3616	cmp	%g7, T_INSTR_MMU_MISS
3617	be,a,pt	%icc, 9f
3618	  nop
3619	cmp	%g7, FAST_IMMU_MISS_TT
3620	be,a,pt	%icc, 9f
3621	  nop
3622	add	%g2, MMFSA_D_, %g2
36239:
3624	ldx	[%g2 + MMFSA_CTX_], %g7
3625	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3626	ldx	[%g2 + MMFSA_ADDR_], %g2
3627	mov	%g2, %g5		! load the fault addr for later use
3628	srlx	%g2, TTARGET_VA_SHIFT, %g2
3629	or	%g2, %g7, %g2
3630#else /* sun4v */
3631	mov     MMU_TAG_ACCESS, %g5
3632	cmp     %g7, FAST_IMMU_MISS_TT
3633	be,a,pt %icc, 9f
3634	   ldxa  [%g0]ASI_IMMU, %g2
3635	ldxa    [%g0]ASI_DMMU, %g2
3636	ba,pt   %icc, tsb_update_tl1
3637	   ldxa  [%g5]ASI_DMMU, %g5
36389:
3639	ldxa    [%g5]ASI_IMMU, %g5
3640#endif /* sun4v */
3641
3642tsb_update_tl1:
3643	TTE_CLR_SOFTEXEC_ML(%g3)
3644	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3645	brz,pn	%g7, tsb_kernel
3646#ifdef sun4v
3647	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3648#else  /* sun4v */
3649	  srlx	%g3, TTE_SZ_SHFT, %g7
3650#endif /* sun4v */
3651
3652tsb_user:
3653#ifdef sun4v
3654	cmp	%g7, TTE4M
3655	bge,pn	%icc, tsb_user4m
3656	  nop
3657#else /* sun4v */
3658	cmp	%g7, TTESZ_VALID | TTE4M
3659	be,pn	%icc, tsb_user4m
3660	  srlx	%g3, TTE_SZ2_SHFT, %g7
3661	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3662#ifdef ITLB_32M_256M_SUPPORT
3663	bnz,pn	%icc, tsb_user4m
3664	  nop
3665#else /* ITLB_32M_256M_SUPPORT */
3666	bnz,a,pn %icc, tsb_user_pn_synth
3667	 nop
3668#endif /* ITLB_32M_256M_SUPPORT */
3669#endif /* sun4v */
3670
3671tsb_user8k:
3672#if defined(sun4v) || defined(UTSB_PHYS)
3673	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3674	and	%g7, HAT_CHKCTX1_FLAG, %g1
3675	brz,a,pn %g1, 1f
3676	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
3677	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3678	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
3679	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3680        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
36811:
3682#else /* defined(sun4v) || defined(UTSB_PHYS) */
3683	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
3684#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3685
3686#ifndef UTSB_PHYS
3687	mov	ASI_N, %g7	! user TSBs accessed by VA
3688	mov	%g7, %asi
3689#endif /* !UTSB_PHYS */
3690
3691	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
3692
3693	rdpr    %tt, %g5
3694#ifdef sun4v
3695	cmp	%g5, T_INSTR_MMU_MISS
3696	be,a,pn	%xcc, 9f
3697	  mov	%g3, %g5
3698#endif /* sun4v */
3699	cmp	%g5, FAST_IMMU_MISS_TT
3700	be,pn	%xcc, 9f
3701	  mov	%g3, %g5
3702
3703	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3704	! trapstat wants TTE in %g5
3705	retry
37069:
3707	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3708	! trapstat wants TTE in %g5
3709	retry
3710
3711tsb_user4m:
3712#if defined(sun4v) || defined(UTSB_PHYS)
3713	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3714	and	%g7, HAT_CHKCTX1_FLAG, %g1
3715	brz,a,pn %g1, 4f
3716	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
3717	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3718	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
3719	  nop
3720        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3721
3722#else /* defined(sun4v) || defined(UTSB_PHYS) */
3723	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
3724#endif /* defined(sun4v) || defined(UTSB_PHYS) */
37254:
3726	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3727	  nop
3728
3729#ifndef UTSB_PHYS
3730	mov	ASI_N, %g7	! user TSBs accessed by VA
3731	mov	%g7, %asi
3732#endif /* UTSB_PHYS */
3733
3734        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
3735
37365:
3737	rdpr    %tt, %g5
3738#ifdef sun4v
3739        cmp     %g5, T_INSTR_MMU_MISS
3740        be,a,pn %xcc, 9f
3741          mov   %g3, %g5
3742#endif /* sun4v */
3743        cmp     %g5, FAST_IMMU_MISS_TT
3744        be,pn   %xcc, 9f
3745        mov     %g3, %g5
3746
3747        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3748        ! trapstat wants TTE in %g5
3749        retry
37509:
3751        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3752        ! trapstat wants TTE in %g5
3753        retry
3754
3755#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3756	/*
3757	 * Panther ITLB synthesis.
3758	 * The Panther 32M and 256M ITLB code simulates these two large page
3759	 * sizes with 4M pages, to provide support for programs, for example
3760	 * Java, that may copy instructions into a 32M or 256M data page and
3761	 * then execute them. The code below generates the 4M pfn bits and
3762	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3763	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3764	 * are ignored by the hardware.
3765	 *
3766	 * Now, load into TSB/TLB.  At this point:
3767	 * g2 = tagtarget
3768	 * g3 = tte
3769	 * g4 = patte
3770	 * g5 = tt
3771	 * g6 = tsbmiss area
3772	 */
3773tsb_user_pn_synth:
3774	rdpr %tt, %g5
3775	cmp    %g5, FAST_IMMU_MISS_TT
3776	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3777	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3778	bz,pn %icc, 4b				/* if not, been here before */
3779	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3780	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3781	  mov	%g3, %g5
3782
3783	mov	MMU_TAG_ACCESS, %g7
3784	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3785	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3786
3787	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3788	mov	%g7, %asi
3789	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */
37905:
3791        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3792        retry
3793
3794tsb_user_itlb_synth:
3795	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3796
3797	mov	MMU_TAG_ACCESS, %g7
3798	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3799	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3800	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3801	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3802
3803	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3804	mov	%g7, %asi
3805	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */
38067:
3807	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3808        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3809        retry
3810#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3811
3812tsb_kernel:
3813	rdpr	%tt, %g5
3814#ifdef sun4v
3815	cmp	%g7, TTE4M
3816	bge,pn	%icc, 5f
3817#else
3818	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3819	be,pn	%icc, 5f
3820#endif /* sun4v */
3821	  nop
3822	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
3823	ba,pt	%xcc, 6f
3824	  nop
38255:
3826	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
3827	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
3828	  nop
38296:
3830#ifndef sun4v
3831tsb_kernel_patch_asi:
3832	or	%g0, RUNTIME_PATCH, %g6
3833	mov	%g6, %asi	! XXX avoid writing to %asi !!
3834#endif
3835	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
38363:
3837#ifdef sun4v
3838	cmp	%g5, T_INSTR_MMU_MISS
3839	be,a,pn	%icc, 1f
3840	  mov	%g3, %g5			! trapstat wants TTE in %g5
3841#endif /* sun4v */
3842	cmp	%g5, FAST_IMMU_MISS_TT
3843	be,pn	%icc, 1f
3844	  mov	%g3, %g5			! trapstat wants TTE in %g5
3845	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3846	! trapstat wants TTE in %g5
3847	retry
38481:
3849	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3850	! trapstat wants TTE in %g5
3851	retry
3852
3853tsb_ism:
3854	/*
3855	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3856	 * page size down to smallest.
3857	 *
3858	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3859	 *	register
3860	 * g3 = ismmap->ism_seg
3861	 * g4 = physical address of ismmap->ism_sfmmu
3862	 * g6 = tsbmiss area
3863	 */
3864	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3865	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3866	  mov	PTL1_BAD_ISM, %g1
3867						/* g5 = pa of imap_vb_shift */
3868	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3869	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3870	srlx	%g3, %g4, %g3			/* clr size field */
3871	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3872	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3873	and     %g2, %g1, %g4                   /* g4 = ctx number */
3874	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3875	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3876	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3877	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3878	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3879#if defined(sun4v) || defined(UTSB_PHYS)
3880	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3881	brz,pt %g5, tsb_chk4M_ism
3882	  nop
3883	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3884	or      %g5, HAT_CHKCTX1_FLAG, %g5
3885	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3886	rdpr    %tt, %g5
3887	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3888#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3889
3890	/*
3891	 * ISM pages are always locked down.
3892	 * If we can't find the tte then pagefault
3893	 * and let the spt segment driver resolve it.
3894	 *
3895	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3896	 * g4 = imap_hatflags
3897	 * g6 = tsb miss area
3898	 * g7 = ISM hatid
3899	 */
3900
3901tsb_chk4M_ism:
3902	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3903	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3904	  nop
3905
3906tsb_ism_32M:
3907	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3908	brz,pn	%g5, tsb_ism_256M
3909	  nop
3910
3911	/*
3912	 * 32M hash.
3913	 */
3914
3915	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
3916	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3917	    tsb_ism_4M)
3918	/* NOT REACHED */
3919
3920tsb_ism_32M_found:
3921	brlz,a,pt %g3, tsb_validtte
3922	  rdpr	%tt, %g7
3923	ba,pt	%xcc, tsb_ism_4M
3924	  nop
3925
3926tsb_ism_256M:
3927	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3928	brz,a,pn %g5, ptl1_panic
3929	  mov	PTL1_BAD_ISM, %g1
3930
3931	/*
3932	 * 256M hash.
3933	 */
3934	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
3935	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3936	    tsb_ism_4M)
3937
3938tsb_ism_256M_found:
3939	brlz,a,pt %g3, tsb_validtte
3940	  rdpr	%tt, %g7
3941
3942tsb_ism_4M:
3943	/*
3944	 * 4M hash.
3945	 */
3946	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
3947	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3948	    tsb_ism_8K)
3949	/* NOT REACHED */
3950
3951tsb_ism_4M_found:
3952	brlz,a,pt %g3, tsb_validtte
3953	  rdpr	%tt, %g7
3954
3955tsb_ism_8K:
3956	/*
3957	 * 8K and 64K hash.
3958	 */
3959
3960	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
3961	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3962	    tsb_pagefault)
3963	/* NOT REACHED */
3964
3965tsb_ism_8K_found:
3966	brlz,a,pt %g3, tsb_validtte
3967	  rdpr	%tt, %g7
3968
3969tsb_pagefault:
3970	rdpr	%tt, %g7
3971	cmp	%g7, FAST_PROT_TT
3972	be,a,pn	%icc, tsb_protfault
3973	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3974
3975tsb_protfault:
3976	/*
3977	 * we get here if we couldn't find a valid tte in the hash.
3978	 *
3979	 * If user and we are at tl>1 we go to window handling code.
3980	 *
3981	 * If kernel and the fault is on the same page as our stack
3982	 * pointer, then we know the stack is bad and the trap handler
3983	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3984	 *
3985	 * If this is a kernel trap and tl>1, panic.
3986	 *
3987	 * Otherwise we call pagefault.
3988	 */
3989	cmp	%g7, FAST_IMMU_MISS_TT
3990#ifdef sun4v
3991	MMU_FAULT_STATUS_AREA(%g4)
3992	ldx	[%g4 + MMFSA_I_CTX], %g5
3993	ldx	[%g4 + MMFSA_D_CTX], %g4
3994	move	%icc, %g5, %g4
3995	cmp	%g7, T_INSTR_MMU_MISS
3996	move	%icc, %g5, %g4
3997#else
3998	mov	MMU_TAG_ACCESS, %g4
3999	ldxa	[%g4]ASI_DMMU, %g2
4000	ldxa	[%g4]ASI_IMMU, %g5
4001	move	%icc, %g5, %g2
4002	cmp	%g7, T_INSTR_MMU_MISS
4003	move	%icc, %g5, %g2
4004	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
4005#endif /* sun4v */
4006	brnz,pn	%g4, 3f				/* skip if not kernel */
4007	  rdpr	%tl, %g5
4008
4009	add	%sp, STACK_BIAS, %g3
4010	srlx	%g3, MMU_PAGESHIFT, %g3
4011	srlx	%g2, MMU_PAGESHIFT, %g4
4012	cmp	%g3, %g4
4013	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
4014	  mov	PTL1_BAD_STACK, %g1
4015
4016	cmp	%g5, 1
4017	ble,pt	%icc, 2f
4018	  nop
4019	TSTAT_CHECK_TL1(2f, %g1, %g2)
4020	rdpr	%tt, %g2
4021	cmp	%g2, FAST_PROT_TT
4022	mov	PTL1_BAD_KPROT_FAULT, %g1
4023	movne	%icc, PTL1_BAD_KMISS, %g1
4024	ba,pt	%icc, ptl1_panic
4025	  nop
4026
40272:
4028	/*
4029	 * We are taking a pagefault in the kernel on a kernel address.  If
4030	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
4031	 * want to call sfmmu_pagefault -- we will instead note that a fault
4032	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
4033	 * (instead of a "retry").  This will step over the faulting
4034	 * instruction.
4035	 */
4036	CPU_INDEX(%g1, %g2)
4037	set	cpu_core, %g2
4038	sllx	%g1, CPU_CORE_SHIFT, %g1
4039	add	%g1, %g2, %g1
4040	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
4041	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
4042	bz	sfmmu_pagefault
4043	or	%g2, CPU_DTRACE_BADADDR, %g2
4044	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
4045	GET_MMU_D_ADDR(%g3, %g4)
4046	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
4047	done
4048
40493:
4050	cmp	%g5, 1
4051	ble,pt	%icc, 4f
4052	  nop
4053	TSTAT_CHECK_TL1(4f, %g1, %g2)
4054	ba,pt	%icc, sfmmu_window_trap
4055	  nop
4056
40574:
4058	/*
4059	 * We are taking a pagefault on a non-kernel address.  If we are in
4060	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
4061	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
4062	 */
4063	CPU_INDEX(%g1, %g2)
4064	set	cpu_core, %g2
4065	sllx	%g1, CPU_CORE_SHIFT, %g1
4066	add	%g1, %g2, %g1
4067	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
4068	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
4069	bz	sfmmu_mmu_trap
4070	or	%g2, CPU_DTRACE_BADADDR, %g2
4071	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
4072	GET_MMU_D_ADDR(%g3, %g4)
4073	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
4074
4075	/*
4076	 * Be sure that we're actually taking this miss from the kernel --
4077	 * otherwise we have managed to return to user-level with
4078	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
4079	 */
4080	rdpr	%tstate, %g2
4081	btst	TSTATE_PRIV, %g2
4082	bz,a	ptl1_panic
4083	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
4084	done
4085
4086	ALTENTRY(tsb_tl0_noctxt)
4087	/*
4088	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
4089	 * if it is, indicated that we have faulted and issue a done.
4090	 */
4091	CPU_INDEX(%g5, %g6)
4092	set	cpu_core, %g6
4093	sllx	%g5, CPU_CORE_SHIFT, %g5
4094	add	%g5, %g6, %g5
4095	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
4096	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
4097	bz	1f
4098	or	%g6, CPU_DTRACE_BADADDR, %g6
4099	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
4100	GET_MMU_D_ADDR(%g3, %g4)
4101	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
4102
4103	/*
4104	 * Be sure that we're actually taking this miss from the kernel --
4105	 * otherwise we have managed to return to user-level with
4106	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
4107	 */
4108	rdpr	%tstate, %g5
4109	btst	TSTATE_PRIV, %g5
4110	bz,a	ptl1_panic
4111	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
4112	TSTAT_CHECK_TL1(2f, %g1, %g2);
41132:
4114	done
4115
41161:
4117	rdpr	%tt, %g5
4118	cmp	%g5, FAST_IMMU_MISS_TT
4119#ifdef sun4v
4120	MMU_FAULT_STATUS_AREA(%g2)
4121	be,a,pt	%icc, 2f
4122	  ldx	[%g2 + MMFSA_I_CTX], %g3
4123	cmp	%g5, T_INSTR_MMU_MISS
4124	be,a,pt	%icc, 2f
4125	  ldx	[%g2 + MMFSA_I_CTX], %g3
4126	ldx	[%g2 + MMFSA_D_CTX], %g3
41272:
4128#else
4129	mov	MMU_TAG_ACCESS, %g2
4130	be,a,pt	%icc, 2f
4131	  ldxa	[%g2]ASI_IMMU, %g3
4132	ldxa	[%g2]ASI_DMMU, %g3
41332:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
4134#endif /* sun4v */
4135	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
4136	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
4137	rdpr	%tl, %g5
4138	cmp	%g5, 1
4139	ble,pt	%icc, sfmmu_mmu_trap
4140	  nop
4141	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
4142	ba,pt	%icc, sfmmu_window_trap
4143	  nop
4144	SET_SIZE(sfmmu_tsb_miss)
4145#endif  /* lint */
4146
4147#if defined (lint)
4148/*
4149 * This routine will look for a user or kernel vaddr in the hash
4150 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
4151 * grab any locks.  It should only be used by other sfmmu routines.
4152 */
4153/* ARGSUSED */
4154pfn_t
4155sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
4156{
4157	return(0);
4158}
4159
4160/* ARGSUSED */
4161pfn_t
4162sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
4163{
4164	return(0);
4165}
4166
4167#else /* lint */
4168
4169	ENTRY_NP(sfmmu_vatopfn)
4170 	/*
4171 	 * disable interrupts
4172 	 */
4173 	rdpr	%pstate, %o3
4174#ifdef DEBUG
4175	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
4176#endif
4177	/*
4178	 * disable interrupts to protect the TSBMISS area
4179	 */
4180	andn    %o3, PSTATE_IE, %o5
4181	wrpr    %o5, 0, %pstate
4182
4183	/*
4184	 * o0 = vaddr
4185	 * o1 = sfmmup
4186	 * o2 = ttep
4187	 */
4188	CPU_TSBMISS_AREA(%g1, %o5)
4189	ldn	[%g1 + TSBMISS_KHATID], %o4
4190	cmp	%o4, %o1
4191	bne,pn	%ncc, vatopfn_nokernel
4192	  mov	TTE64K, %g5			/* g5 = rehash # */
4193	mov %g1,%o5				/* o5 = tsbmiss_area */
4194	/*
4195	 * o0 = vaddr
4196	 * o1 & o4 = hatid
4197	 * o2 = ttep
4198	 * o5 = tsbmiss area
4199	 */
4200	mov	HBLK_RANGE_SHIFT, %g6
42011:
4202
4203	/*
4204	 * o0 = vaddr
4205	 * o1 = sfmmup
4206	 * o2 = ttep
4207	 * o3 = old %pstate
4208	 * o4 = hatid
4209	 * o5 = tsbmiss
4210	 * g5 = rehash #
4211	 * g6 = hmeshift
4212	 *
4213	 * The first arg to GET_TTE is actually tagaccess register
4214	 * not just vaddr. Since this call is for kernel we need to clear
4215	 * any lower vaddr bits that would be interpreted as ctx bits.
4216	 */
4217	set     TAGACC_CTX_MASK, %g1
4218	andn    %o0, %g1, %o0
4219	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
4220		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
4221
4222kvtop_hblk_found:
4223	/*
4224	 * o0 = vaddr
4225	 * o1 = sfmmup
4226	 * o2 = ttep
4227	 * g1 = tte
4228	 * g2 = tte pa
4229	 * g3 = tte va
4230	 * o2 = tsbmiss area
4231	 * o1 = hat id
4232	 */
4233	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
4234	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4235	stx %g1,[%o2]				/* put tte into *ttep */
4236	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
4237	/*
4238	 * o0 = vaddr
4239	 * o1 = sfmmup
4240	 * o2 = ttep
4241	 * g1 = pfn
4242	 */
4243	ba,pt	%xcc, 6f
4244	  mov	%g1, %o0
4245
4246kvtop_nohblk:
4247	/*
4248	 * we get here if we couldn't find valid hblk in hash.  We rehash
4249	 * if neccesary.
4250	 */
4251	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
4252#ifdef sun4v
4253	cmp	%g5, MAX_HASHCNT
4254#else
4255	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
4256#endif /* sun4v */
4257	be,a,pn	%icc, 6f
4258	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4259	mov	%o1, %o4			/* restore hatid */
4260#ifdef sun4v
4261        add	%g5, 2, %g5
4262	cmp	%g5, 3
4263	move	%icc, MMU_PAGESHIFT4M, %g6
4264	ba,pt	%icc, 1b
4265	movne	%icc, MMU_PAGESHIFT256M, %g6
4266#else
4267        inc	%g5
4268	cmp	%g5, 2
4269	move	%icc, MMU_PAGESHIFT512K, %g6
4270	ba,pt	%icc, 1b
4271	movne	%icc, MMU_PAGESHIFT4M, %g6
4272#endif /* sun4v */
42736:
4274	retl
4275 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4276
4277tsb_suspend:
4278	/*
4279	 * o0 = vaddr
4280	 * o1 = sfmmup
4281	 * o2 = ttep
4282	 * g1 = tte
4283	 * g2 = tte pa
4284	 * g3 = tte va
4285	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
4286	 */
4287	stx %g1,[%o2]				/* put tte into *ttep */
4288	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
4289	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
4290	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
42918:
4292	retl
4293	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
4294
4295vatopfn_nokernel:
4296	/*
4297	 * This routine does NOT support user addresses
4298	 * There is a routine in C that supports this.
4299	 * The only reason why we don't have the C routine
4300	 * support kernel addresses as well is because
4301	 * we do va_to_pa while holding the hashlock.
4302	 */
4303 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4304	save	%sp, -SA(MINFRAME), %sp
4305	sethi	%hi(sfmmu_panic3), %o0
4306	call	panic
4307	 or	%o0, %lo(sfmmu_panic3), %o0
4308
4309	SET_SIZE(sfmmu_vatopfn)
4310
4311	/*
4312	 * %o0 = vaddr
4313	 * %o1 = hashno (aka szc)
4314	 *
4315	 *
4316	 * This routine is similar to sfmmu_vatopfn() but will only look for
4317	 * a kernel vaddr in the hash structure for the specified rehash value.
4318	 * It's just an optimization for the case when pagesize for a given
4319	 * va range is already known (e.g. large page heap) and we don't want
4320	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4321	 *
4322	 * Returns valid pfn or PFN_INVALID if
4323	 * tte for specified rehash # is not found, invalid or suspended.
4324	 */
4325	ENTRY_NP(sfmmu_kvaszc2pfn)
4326 	/*
4327 	 * disable interrupts
4328 	 */
4329 	rdpr	%pstate, %o3
4330#ifdef DEBUG
4331	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4332#endif
4333	/*
4334	 * disable interrupts to protect the TSBMISS area
4335	 */
4336	andn    %o3, PSTATE_IE, %o5
4337	wrpr    %o5, 0, %pstate
4338
4339	CPU_TSBMISS_AREA(%g1, %o5)
4340	ldn	[%g1 + TSBMISS_KHATID], %o4
4341	sll	%o1, 1, %g6
4342	add	%g6, %o1, %g6
4343	add	%g6, MMU_PAGESHIFT, %g6
4344	/*
4345	 * %o0 = vaddr
4346	 * %o1 = hashno
4347	 * %o3 = old %pstate
4348	 * %o4 = ksfmmup
4349	 * %g1 = tsbmiss area
4350	 * %g6 = hmeshift
4351	 */
4352
4353	/*
4354	 * The first arg to GET_TTE is actually tagaccess register
4355	 * not just vaddr. Since this call is for kernel we need to clear
4356	 * any lower vaddr bits that would be interpreted as ctx bits.
4357	 */
4358	srlx	%o0, MMU_PAGESHIFT, %o0
4359	sllx	%o0, MMU_PAGESHIFT, %o0
4360	GET_TTE(%o0, %o4, %g3, %g4, %g5, %g1, %o5, %g6, %o1,
4361		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4362		kvaszc2pfn_nohblk)
4363
4364kvaszc2pfn_hblk_found:
4365	/*
4366	 * %g3 = tte
4367	 * %o0 = vaddr
4368	 */
4369	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4370	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4371	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4372	/*
4373	 * g3 = pfn
4374	 */
4375	ba,pt	%xcc, 1f
4376	  mov	%g3, %o0
4377
4378kvaszc2pfn_nohblk:
4379	mov	-1, %o0
4380
43811:
4382	retl
4383 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4384
4385	SET_SIZE(sfmmu_kvaszc2pfn)
4386
4387#endif /* lint */
4388
4389
4390
4391#if !defined(lint)
4392
4393/*
4394 * kpm lock used between trap level tsbmiss handler and kpm C level.
4395 */
4396#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
4397	mov     0xff, tmp1						;\
4398label1:									;\
4399	casa    [kpmlckp]asi, %g0, tmp1					;\
4400	brnz,pn tmp1, label1						;\
4401	mov     0xff, tmp1						;\
4402	membar  #LoadLoad
4403
4404#define KPMLOCK_EXIT(kpmlckp, asi)					\
4405	membar  #LoadStore|#StoreStore					;\
4406	sta     %g0, [kpmlckp]asi
4407
4408/*
4409 * Lookup a memseg for a given pfn and if found, return the physical
4410 * address of the corresponding struct memseg in mseg, otherwise
4411 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4412 * tsbmp, %asi is assumed to be ASI_MEM.
4413 * This lookup is done by strictly traversing only the physical memseg
4414 * linkage. The more generic approach, to check the virtual linkage
4415 * before using the physical (used e.g. with hmehash buckets), cannot
4416 * be used here. Memory DR operations can run in parallel to this
4417 * lookup w/o any locks and updates of the physical and virtual linkage
4418 * cannot be done atomically wrt. to each other. Because physical
4419 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4420 * as "physical NULL" pointer.
4421 */
4422#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4423	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
4424	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
4425	udivx	pfn, mseg, mseg						;\
4426	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
4427	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
4428	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
4429	add	tmp1, mseg, tmp1					;\
4430	ldxa	[tmp1]%asi, mseg					;\
4431	cmp	mseg, MSEG_NULLPTR_PA					;\
4432	be,pn	%xcc, label/**/1		/* if not found */	;\
4433	  nop								;\
4434	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4435	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4436	blu,pn	%xcc, label/**/1					;\
4437	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4438	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4439	bgeu,pn	%xcc, label/**/1					;\
4440	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
4441	mulx	tmp1, PAGE_SIZE, tmp1					;\
4442	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
4443	add	tmp2, tmp1, tmp1			/* pp */	;\
4444	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
4445	cmp	tmp2, pfn						;\
4446	be,pt	%xcc, label/**/_ok			/* found */	;\
4447label/**/1:								;\
4448	/* brute force lookup */					;\
4449	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
4450	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
4451label/**/2:								;\
4452	cmp	mseg, MSEG_NULLPTR_PA					;\
4453	be,pn	%xcc, label/**/_ok		/* if not found */	;\
4454	  nop								;\
4455	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4456	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4457	blu,a,pt %xcc, label/**/2					;\
4458	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4459	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4460	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4461	bgeu,a,pt %xcc, label/**/2					;\
4462	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4463label/**/_ok:
4464
4465	/*
4466	 * kpm tsb miss handler large pages
4467	 * g1 = 8K kpm TSB entry pointer
4468	 * g2 = tag access register
4469	 * g3 = 4M kpm TSB entry pointer
4470	 */
4471	ALTENTRY(sfmmu_kpm_dtsb_miss)
4472	TT_TRACE(trace_tsbmiss)
4473
4474	CPU_INDEX(%g7, %g6)
4475	sethi	%hi(kpmtsbm_area), %g6
4476	sllx	%g7, KPMTSBM_SHIFT, %g7
4477	or	%g6, %lo(kpmtsbm_area), %g6
4478	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4479
4480	/* check enable flag */
4481	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4482	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4483	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4484	  nop
4485
4486	/* VA range check */
4487	ldx	[%g6 + KPMTSBM_VBASE], %g7
4488	cmp	%g2, %g7
4489	blu,pn	%xcc, sfmmu_tsb_miss
4490	  ldx	[%g6 + KPMTSBM_VEND], %g5
4491	cmp	%g2, %g5
4492	bgeu,pn	%xcc, sfmmu_tsb_miss
4493	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4494
4495	/*
4496	 * check TL tsbmiss handling flag
4497	 * bump tsbmiss counter
4498	 */
4499	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4500#ifdef	DEBUG
4501	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4502	inc	%g5
4503	brz,pn	%g3, sfmmu_kpm_exception
4504	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4505#else
4506	inc	%g5
4507	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4508#endif
4509	/*
4510	 * At this point:
4511	 *  g1 = 8K kpm TSB pointer (not used)
4512	 *  g2 = tag access register
4513	 *  g3 = clobbered
4514	 *  g6 = per-CPU kpm tsbmiss area
4515	 *  g7 = kpm_vbase
4516	 */
4517
4518	/* vaddr2pfn */
4519	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4520	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4521	srax    %g4, %g3, %g2			/* which alias range (r) */
4522	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4523	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4524
4525	/*
4526	 * Setup %asi
4527	 * mseg_pa = page_numtomemseg_nolock(pfn)
4528	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4529	 * g2=pfn
4530	 */
4531	mov	ASI_MEM, %asi
4532	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4533	cmp	%g3, MSEG_NULLPTR_PA
4534	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4535	  nop
4536
4537	/*
4538	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4539	 * g2=pfn g3=mseg_pa
4540	 */
4541	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4542	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4543	srlx	%g2, %g5, %g4
4544	sllx	%g4, %g5, %g4
4545	sub	%g4, %g7, %g4
4546	srlx	%g4, %g5, %g4
4547
4548	/*
4549	 * Validate inx value
4550	 * g2=pfn g3=mseg_pa g4=inx
4551	 */
4552#ifdef	DEBUG
4553	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4554	cmp	%g4, %g5			/* inx - nkpmpgs */
4555	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4556	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4557#else
4558	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4559#endif
4560	/*
4561	 * kp = &mseg_pa->kpm_pages[inx]
4562	 */
4563	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4564	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4565	add	%g5, %g4, %g5			/* kp */
4566
4567	/*
4568	 * KPMP_HASH(kp)
4569	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4570	 */
4571	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4572	sub	%g7, 1, %g7			/* mask */
4573	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4574	add	%g5, %g1, %g5			/* y = ksp + x */
4575	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4576
4577	/*
4578	 * Calculate physical kpm_page pointer
4579	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4580	 */
4581	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4582	add	%g1, %g4, %g1			/* kp_pa */
4583
4584	/*
4585	 * Calculate physical hash lock address
4586	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4587	 */
4588	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4589	sllx	%g5, KPMHLK_SHIFT, %g5
4590	add	%g4, %g5, %g3
4591	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4592
4593	/*
4594	 * Assemble tte
4595	 * g1=kp_pa g2=pfn g3=hlck_pa
4596	 */
4597#ifdef sun4v
4598	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4599	sllx	%g5, 32, %g5
4600	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4601	or	%g4, TTE4M, %g4
4602	or	%g5, %g4, %g5
4603#else
4604	sethi	%hi(TTE_VALID_INT), %g4
4605	mov	TTE4M, %g5
4606	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4607	or	%g5, %g4, %g5			/* upper part */
4608	sllx	%g5, 32, %g5
4609	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4610	or	%g5, %g4, %g5
4611#endif
4612	sllx	%g2, MMU_PAGESHIFT, %g4
4613	or	%g5, %g4, %g5			/* tte */
4614	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4615	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4616
4617	/*
4618	 * tsb dropin
4619	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4620	 */
4621
4622	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4623	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4624
4625	/* use C-handler if there's no go for dropin */
4626	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4627	cmp	%g7, -1
4628	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4629	  nop
4630
4631#ifdef	DEBUG
4632	/* double check refcnt */
4633	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4634	brz,pn	%g7, 5f			/* let C-handler deal with this */
4635	  nop
4636#endif
4637
4638#ifndef sun4v
4639	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4640	mov	ASI_N, %g1
4641	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4642	movnz	%icc, ASI_MEM, %g1
4643	mov	%g1, %asi
4644#endif
4645
4646	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4647	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4648
4649	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4650	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4651
4652	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4653
4654	/* KPMLOCK_EXIT(kpmlckp, asi) */
4655	KPMLOCK_EXIT(%g3, ASI_MEM)
4656
4657	/*
4658	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4659	 * point to trapstat's TSB miss return code (note that trapstat
4660	 * itself will patch the correct offset to add).
4661	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4662	 */
4663	rdpr	%tl, %g7
4664	cmp	%g7, 1
4665	ble	%icc, 0f
4666	sethi	%hi(KERNELBASE), %g6
4667	rdpr	%tpc, %g7
4668	or	%g6, %lo(KERNELBASE), %g6
4669	cmp	%g7, %g6
4670	bgeu	%xcc, 0f
4671	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4672	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4673	wrpr	%g7, %tpc
4674	add	%g7, 4, %g7
4675	wrpr	%g7, %tnpc
46760:
4677	retry
46785:
4679	/* g3=hlck_pa */
4680	KPMLOCK_EXIT(%g3, ASI_MEM)
4681	ba,pt	%icc, sfmmu_kpm_exception
4682	  nop
4683	SET_SIZE(sfmmu_kpm_dtsb_miss)
4684
4685	/*
4686	 * kpm tsbmiss handler for smallpages
4687	 * g1 = 8K kpm TSB pointer
4688	 * g2 = tag access register
4689	 * g3 = 4M kpm TSB pointer
4690	 */
4691	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4692	TT_TRACE(trace_tsbmiss)
4693	CPU_INDEX(%g7, %g6)
4694	sethi	%hi(kpmtsbm_area), %g6
4695	sllx	%g7, KPMTSBM_SHIFT, %g7
4696	or	%g6, %lo(kpmtsbm_area), %g6
4697	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4698
4699	/* check enable flag */
4700	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4701	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4702	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4703	  nop
4704
4705	/*
4706	 * VA range check
4707	 * On fail: goto sfmmu_tsb_miss
4708	 */
4709	ldx	[%g6 + KPMTSBM_VBASE], %g7
4710	cmp	%g2, %g7
4711	blu,pn	%xcc, sfmmu_tsb_miss
4712	  ldx	[%g6 + KPMTSBM_VEND], %g5
4713	cmp	%g2, %g5
4714	bgeu,pn	%xcc, sfmmu_tsb_miss
4715	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4716
4717	/*
4718	 * check TL tsbmiss handling flag
4719	 * bump tsbmiss counter
4720	 */
4721	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4722#ifdef	DEBUG
4723	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4724	inc	%g5
4725	brz,pn	%g1, sfmmu_kpm_exception
4726	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4727#else
4728	inc	%g5
4729	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4730#endif
4731	/*
4732	 * At this point:
4733	 *  g1 = clobbered
4734	 *  g2 = tag access register
4735	 *  g3 = 4M kpm TSB pointer (not used)
4736	 *  g6 = per-CPU kpm tsbmiss area
4737	 *  g7 = kpm_vbase
4738	 */
4739
4740	/*
4741	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4742	 * which is defined in mach_kpm.h. Any changes in that macro
4743	 * should also be ported back to this assembly code.
4744	 */
4745	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
4746	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4747	srax    %g4, %g3, %g7			/* which alias range (r) */
4748	brz,pt	%g7, 2f
4749	  sethi   %hi(vac_colors_mask), %g5
4750	ld	[%g5 + %lo(vac_colors_mask)], %g5
4751
4752	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
4753	and	%g1, %g5, %g1			/* g1 = v */
4754	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
4755	cmp	%g7, %g1			/* if (r > v) */
4756	bleu,pn %xcc, 1f
4757	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
4758	sub	%g7, %g1, %g5			/* g5 = r - v */
4759	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
4760	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
4761	ba	2f
4762	  nop
47631:
4764	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
4765	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
4766
4767	/*
4768	 * paddr2pfn
4769	 *  g1 = vcolor (not used)
4770	 *  g2 = tag access register
4771	 *  g3 = clobbered
4772	 *  g4 = paddr
4773	 *  g5 = clobbered
4774	 *  g6 = per-CPU kpm tsbmiss area
4775	 *  g7 = clobbered
4776	 */
47772:
4778	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
4779
4780	/*
4781	 * Setup %asi
4782	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4783	 * if (mseg not found) sfmmu_kpm_exception
4784	 * g2=pfn g6=per-CPU kpm tsbmiss area
4785	 * g4 g5 g7 for scratch use.
4786	 */
4787	mov	ASI_MEM, %asi
4788	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4789	cmp	%g3, MSEG_NULLPTR_PA
4790	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4791	  nop
4792
4793	/*
4794	 * inx = pfn - mseg_pa->kpm_pbase
4795	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
4796	 */
4797	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4798	sub	%g2, %g7, %g4
4799
4800#ifdef	DEBUG
4801	/*
4802	 * Validate inx value
4803	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
4804	 */
4805	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4806	cmp	%g4, %g5			/* inx - nkpmpgs */
4807	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4808	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4809#else
4810	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4811#endif
4812	/* ksp = &mseg_pa->kpm_spages[inx] */
4813	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4814	add	%g5, %g4, %g5			/* ksp */
4815
4816	/*
4817	 * KPMP_SHASH(kp)
4818	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4819	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
4820	 */
4821	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4822	sub	%g7, 1, %g7			/* mask */
4823	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4824	add	%g5, %g1, %g5			/* y = ksp + x */
4825	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4826
4827	/*
4828	 * Calculate physical kpm_spage pointer
4829	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4830	 * g6=per-CPU kpm tsbmiss area
4831	 */
4832	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4833	add	%g1, %g4, %g1			/* ksp_pa */
4834
4835	/*
4836	 * Calculate physical hash lock address.
4837	 * Note: Changes in kpm_shlk_t must be reflected here.
4838	 * g1=ksp_pa g2=pfn g5=hashinx
4839	 * g6=per-CPU kpm tsbmiss area
4840	 */
4841	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4842	sllx	%g5, KPMSHLK_SHIFT, %g5
4843	add	%g4, %g5, %g3			/* hlck_pa */
4844
4845	/*
4846	 * Assemble non-cacheable tte initially
4847	 * g1=ksp_pa g2=pfn g3=hlck_pa
4848	 * g6=per-CPU kpm tsbmiss area
4849	 */
4850	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4851	sllx	%g5, 32, %g5
4852	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4853	or	%g5, %g4, %g5
4854	sllx	%g2, MMU_PAGESHIFT, %g4
4855	or	%g5, %g4, %g5			/* tte */
4856	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4857	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4858
4859	/*
4860	 * tsb dropin
4861	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4862	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
4863	 */
4864
4865	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4866	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4867
4868	/* use C-handler if there's no go for dropin */
4869	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
4870	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
4871	bz,pt	%icc, 5f				/* no go */
4872	  nop
4873	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
4874	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
4875	be,a,pn	%xcc, 3f
4876	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
48773:
4878#ifndef sun4v
4879	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4880	mov	ASI_N, %g1
4881	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4882	movnz	%icc, ASI_MEM, %g1
4883	mov	%g1, %asi
4884#endif
4885
4886	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4887	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4888
4889	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4890	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4891
4892	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4893
4894	/* KPMLOCK_EXIT(kpmlckp, asi) */
4895	KPMLOCK_EXIT(%g3, ASI_MEM)
4896
4897	/*
4898	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4899	 * point to trapstat's TSB miss return code (note that trapstat
4900	 * itself will patch the correct offset to add).
4901	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4902	 */
4903	rdpr	%tl, %g7
4904	cmp	%g7, 1
4905	ble	%icc, 0f
4906	sethi	%hi(KERNELBASE), %g6
4907	rdpr	%tpc, %g7
4908	or	%g6, %lo(KERNELBASE), %g6
4909	cmp	%g7, %g6
4910	bgeu	%xcc, 0f
4911	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4912	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4913	wrpr	%g7, %tpc
4914	add	%g7, 4, %g7
4915	wrpr	%g7, %tnpc
49160:
4917	retry
49185:
4919	/* g3=hlck_pa */
4920	KPMLOCK_EXIT(%g3, ASI_MEM)
4921	ba,pt	%icc, sfmmu_kpm_exception
4922	  nop
4923	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4924
4925#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4926#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4927#endif
4928
4929#endif /* lint */
4930
4931#ifdef	lint
4932/*
4933 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4934 * Called from C-level, sets/clears "go" indication for trap level handler.
4935 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4936 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4937 * Assumes khl_mutex is held when called from C-level.
4938 */
4939/* ARGSUSED */
4940void
4941sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4942{
4943}
4944
4945/*
4946 * kpm_smallpages: stores val to byte at address mapped within
4947 * low level lock brackets. The old value is returned.
4948 * Called from C-level.
4949 */
4950/* ARGSUSED */
4951int
4952sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4953{
4954	return (0);
4955}
4956
4957#else /* lint */
4958
4959	.seg	".data"
4960sfmmu_kpm_tsbmtl_panic:
4961	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4962	.byte	0
4963sfmmu_kpm_stsbmtl_panic:
4964	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4965	.byte	0
4966	.align	4
4967	.seg	".text"
4968
4969	ENTRY_NP(sfmmu_kpm_tsbmtl)
4970	rdpr	%pstate, %o3
4971	/*
4972	 * %o0 = &kp_refcntc
4973	 * %o1 = &khl_lock
4974	 * %o2 = 0/1 (off/on)
4975	 * %o3 = pstate save
4976	 */
4977#ifdef DEBUG
4978	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4979	bnz,pt %icc, 1f				/* disabled, panic	 */
4980	  nop
4981	save	%sp, -SA(MINFRAME), %sp
4982	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4983	call	panic
4984	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4985	ret
4986	restore
49871:
4988#endif /* DEBUG */
4989	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4990
4991	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4992	mov	-1, %o5
4993	brz,a	%o2, 2f
4994	  mov	0, %o5
49952:
4996	sth	%o5, [%o0]
4997	KPMLOCK_EXIT(%o1, ASI_N)
4998
4999	retl
5000	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
5001	SET_SIZE(sfmmu_kpm_tsbmtl)
5002
5003	ENTRY_NP(sfmmu_kpm_stsbmtl)
5004	rdpr	%pstate, %o3
5005	/*
5006	 * %o0 = &mapped
5007	 * %o1 = &kshl_lock
5008	 * %o2 = val
5009	 * %o3 = pstate save
5010	 */
5011#ifdef DEBUG
5012	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
5013	bnz,pt %icc, 1f				/* disabled, panic	 */
5014	  nop
5015	save	%sp, -SA(MINFRAME), %sp
5016	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
5017	call	panic
5018	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
5019	ret
5020	restore
50211:
5022#endif /* DEBUG */
5023	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
5024
5025	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
5026	ldsb	[%o0], %o5
5027	stb	%o2, [%o0]
5028	KPMLOCK_EXIT(%o1, ASI_N)
5029
5030	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
5031	retl
5032	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
5033	SET_SIZE(sfmmu_kpm_stsbmtl)
5034
5035#endif /* lint */
5036
5037#ifndef lint
5038#ifdef sun4v
5039	/*
5040	 * User/kernel data miss w// multiple TSBs
5041	 * The first probe covers 8K, 64K, and 512K page sizes,
5042	 * because 64K and 512K mappings are replicated off 8K
5043	 * pointer.  Second probe covers 4M page size only.
5044	 *
5045	 * MMU fault area contains miss address and context.
5046	 */
5047	ALTENTRY(sfmmu_slow_dmmu_miss)
5048	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
5049
5050slow_miss_common:
5051	/*
5052	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
5053	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
5054	 */
5055	brnz,pt	%g3, 8f			! check for user context
5056	  nop
5057
5058	/*
5059	 * Kernel miss
5060	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
5061	 * branch to sfmmu_tsb_miss_tt to handle it.
5062	 */
5063	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
5064sfmmu_dslow_patch_ktsb_base:
5065	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
5066sfmmu_dslow_patch_ktsb_szcode:
5067	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
5068
5069	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
5070	! %g1 = First TSB entry pointer, as TSB miss handler expects
5071
5072	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
5073sfmmu_dslow_patch_ktsb4m_base:
5074	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
5075sfmmu_dslow_patch_ktsb4m_szcode:
5076	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
5077
5078	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
5079	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
5080	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
5081	.empty
5082
50838:
5084	/*
5085	 * User miss
5086	 * Get first TSB pointer in %g1
5087	 * Get second TSB pointer (or NULL if no second TSB) in %g3
5088	 * Branch to sfmmu_tsb_miss_tt to handle it
5089	 */
5090	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
5091	/* %g1 = first TSB entry ptr now, %g2 preserved */
5092
5093	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
5094	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
5095	  nop
5096
5097	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
5098	/* %g3 = second TSB entry ptr now, %g2 preserved */
50999:
5100	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
5101	.empty
5102	SET_SIZE(sfmmu_slow_dmmu_miss)
5103
5104
5105	/*
5106	 * User/kernel instruction miss w/ multiple TSBs
5107	 * The first probe covers 8K, 64K, and 512K page sizes,
5108	 * because 64K and 512K mappings are replicated off 8K
5109	 * pointer.  Second probe covers 4M page size only.
5110	 *
5111	 * MMU fault area contains miss address and context.
5112	 */
5113	ALTENTRY(sfmmu_slow_immu_miss)
5114	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
5115	ba,a,pt	%xcc, slow_miss_common
5116	SET_SIZE(sfmmu_slow_immu_miss)
5117
5118#endif /* sun4v */
5119#endif	/* lint */
5120
5121#ifndef lint
5122
5123/*
5124 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
5125 */
5126	.seg	".data"
5127	.align	64
5128	.global tsbmiss_area
5129tsbmiss_area:
5130	.skip	(TSBMISS_SIZE * NCPU)
5131
5132	.align	64
5133	.global kpmtsbm_area
5134kpmtsbm_area:
5135	.skip	(KPMTSBM_SIZE * NCPU)
5136#endif	/* lint */
5137